{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,27]],"date-time":"2026-03-27T09:57:12Z","timestamp":1774605432195,"version":"3.50.1"},"reference-count":44,"publisher":"Springer Science and Business Media LLC","issue":"3","license":[{"start":{"date-parts":[[2025,9,9]],"date-time":"2025-09-09T00:00:00Z","timestamp":1757376000000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2025,9,9]],"date-time":"2025-09-09T00:00:00Z","timestamp":1757376000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"funder":[{"DOI":"10.13039\/100014718","name":"Innovative Research Group Project of the National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["U2003207"],"award-info":[{"award-number":["U2003207"]}],"id":[{"id":"10.13039\/100014718","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62201002"],"award-info":[{"award-number":["62201002"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["61902064"],"award-info":[{"award-number":["61902064"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100003995","name":"Natural Science Foundation of Anhui Province","doi-asserted-by":"crossref","award":["1908085MF209"],"award-info":[{"award-number":["1908085MF209"]}],"id":[{"id":"10.13039\/501100003995","id-type":"DOI","asserted-by":"crossref"}]},{"DOI":"10.13039\/100017128","name":"Science Fund for Distinguished Young Scholars of Anhui Province","doi-asserted-by":"publisher","award":["KJ2018A0018"],"award-info":[{"award-number":["KJ2018A0018"]}],"id":[{"id":"10.13039\/100017128","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Circuits Syst Signal Process"],"published-print":{"date-parts":[[2026,3]]},"DOI":"10.1007\/s00034-025-03276-6","type":"journal-article","created":{"date-parts":[[2025,9,9]],"date-time":"2025-09-09T10:32:52Z","timestamp":1757413972000},"page":"1979-1999","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["Multi-path Processing Structure for Multi-scale Feature Fusion in Speech Separation Transformer"],"prefix":"10.1007","volume":"45","author":[{"ORCID":"https:\/\/orcid.org\/0000-0001-6509-5520","authenticated-orcid":false,"given":"Jian","family":"Zhou","sequence":"first","affiliation":[]},{"given":"Yinhao","family":"Xu","sequence":"additional","affiliation":[]},{"given":"Cunhang","family":"Fan","sequence":"additional","affiliation":[]},{"given":"Liang","family":"Tao","sequence":"additional","affiliation":[]},{"given":"Zhao","family":"Lv","sequence":"additional","affiliation":[]},{"given":"Hon Keung","family":"Kwan","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2025,9,9]]},"reference":[{"key":"3276_CR1","doi-asserted-by":"publisher","first-page":"1083","DOI":"10.1007\/s00034-022-02166-5","volume":"42","author":"B Bhattarai","year":"2023","unstructured":"B. Bhattarai, Y.R. Pandeya, Y. Jie, A.K. Lamichhane, J. Lee, High-resolution representation learning and recurrent neural network for singing voice separation. Circuits Syst. Signal Process 42, 1083\u20131104 (2023)","journal-title":"Circuits Syst. Signal Process"},{"key":"3276_CR2","doi-asserted-by":"crossref","unstructured":"J. Chen, Q. Mao, D. Liu, Dual-path transformer network: Direct context-aware modeling for end-to-end monaural speech separation, in INTERSPEECH (2020)","DOI":"10.21437\/Interspeech.2020-2205"},{"key":"3276_CR3","unstructured":"J. Devlin, M.W. Chang, K. Lee, K. Toutanova, Bert: pre-training of deep bidirectional transformers for language understanding. ArXiv\u00a0abs\/1810.04805 (2019)"},{"key":"3276_CR4","doi-asserted-by":"publisher","first-page":"243","DOI":"10.1016\/j.neucom.2021.05.097","volume":"456","author":"J Gao","year":"2021","unstructured":"J. Gao, L. Qing, L. Li, Y. Cheng, Y. Peng, Multi-scale features based interpersonal relation recognition using higher-order graph neural network. Neurocomputing 456, 243\u2013252 (2021)","journal-title":"Neurocomputing"},{"key":"3276_CR5","doi-asserted-by":"publisher","first-page":"1875","DOI":"10.1162\/0899766054322964","volume":"17","author":"S Haykin","year":"2005","unstructured":"S. Haykin, Z. Chen, The cocktail party problem. Neural Comput. 17, 1875\u20131902 (2005)","journal-title":"Neural Comput."},{"key":"3276_CR6","doi-asserted-by":"crossref","unstructured":"J.R. Hershey, Z. Chen, J.L. Roux, S. Watanabe, Deep clustering: discriminative embeddings for segmentation and separation, in 2016 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 31\u201335 (2016)","DOI":"10.1109\/ICASSP.2016.7471631"},{"key":"3276_CR7","first-page":"22509","volume":"34","author":"X Hu","year":"2021","unstructured":"X. Hu, K. Li, W. Zhang, Y. Luo, J.M. Lemercier, T. Gerkmann, Speech separation using an asynchronous fully recurrent convolutional neural network. Adv. Neural. Inf. Process. Syst. 34, 22509\u201322522 (2021)","journal-title":"Adv. Neural. Inf. Process. Syst."},{"key":"3276_CR8","doi-asserted-by":"crossref","unstructured":"Y.Z. Isik, J.L. Roux, Z. Chen, S. Watanabe, J.R. Hershey, Single-channel multi-speaker separation using deep clustering, in INTERSPEECH (2016)","DOI":"10.21437\/Interspeech.2016-1176"},{"key":"3276_CR9","doi-asserted-by":"crossref","unstructured":"B. Kadioglu, M. Horgan, X. Liu, J. Pons, D. Darcy, V. Kumar, An empirical study of conv-tasnet, in ICASSP 2020\u20142020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 7264\u20137268 (2020)","DOI":"10.1109\/ICASSP40776.2020.9054721"},{"key":"3276_CR10","doi-asserted-by":"publisher","first-page":"1901","DOI":"10.1109\/TASLP.2017.2726762","volume":"25","author":"M Kolbaek","year":"2017","unstructured":"M. Kolbaek, D. Yu, Z. Tan, J.H. Jensen, Multitalker speech separation with utterance-level permutation invariant training of deep recurrent neural networks. IEEE\/ACM Trans. Audio Speech Lang. Process. 25, 1901\u20131913 (2017)","journal-title":"IEEE\/ACM Trans. Audio Speech Lang. Process."},{"key":"3276_CR11","doi-asserted-by":"crossref","unstructured":"M.W.Y. Lam, J.\u00a0Wang, D.\u00a0Su, D.\u00a0Yu, Effective low-cost time-domain audio separation using globally attentive locally recurrent networks, in 2021 IEEE Spoken Language Technology Workshop (SLT), pp. 801\u2013808 (2021)","DOI":"10.1109\/SLT48900.2021.9383464"},{"key":"3276_CR12","doi-asserted-by":"crossref","unstructured":"M.W.Y. Lam, J. Wang, D. Su, D. Yu, Sandglasset: a light multi-granularity self-attentive network for time-domain speech separation, in ICASSP 2021 - 2021 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 5759\u20135763 (2021)","DOI":"10.1109\/ICASSP39728.2021.9413837"},{"key":"3276_CR13","doi-asserted-by":"crossref","unstructured":"K. Li, S. Wang, X. Zhang, Y. Xu, W. Xu, Z. Tu, Pose recognition with cascade transformers, in 2021 IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 1944\u20131953 (2021)","DOI":"10.1109\/CVPR46437.2021.00198"},{"key":"3276_CR14","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1007\/s00034-023-02383-6","volume":"42","author":"L Li","year":"2023","unstructured":"L. Li, M. Jia, J. Liu, T.W. Pai, 05. Separation of multiple speech sources in reverberant environments based on sparse component enhancement. Circuits Syst. Signal Process. 42, 1\u201328 (2023). https:\/\/doi.org\/10.1007\/s00034-023-02383-6","journal-title":"Circuits Syst. Signal Process."},{"key":"3276_CR15","unstructured":"H. Lin, X.\u00a0Cheng, X.\u00a0Wu, F.\u00a0Yang, D.\u00a0Shen, Z.\u00a0Wang, Q.\u00a0Song, W.\u00a0Yuan, Cat: cross attention in vision transformer. ArXiv\u00a0abs\/2106.05786 (2021)"},{"key":"3276_CR16","doi-asserted-by":"crossref","unstructured":"D. Liu, Y.\u00a0Cui, L.\u00a0Yan, C.\u00a0Mousas, B.\u00a0Yang, Y.\u00a0Chen, Densernet: weakly supervised visual localization using multi-scale feature aggregation. ArXivabs\/2012.02366 (2021)","DOI":"10.1609\/aaai.v35i7.16760"},{"key":"3276_CR17","doi-asserted-by":"publisher","first-page":"4621","DOI":"10.3390\/rs13224621","volume":"13","author":"D Liu","year":"2021","unstructured":"D. Liu, G. Han, P. Liu, H. Yang, X. Sun, Q. Li, J. Wu, A novel 2d\u20133d cnn with spectral-spatial multi-scale feature fusion for hyperspectral image classification. Remote. Sens. 13, 4621 (2021)","journal-title":"Remote. Sens."},{"key":"3276_CR18","doi-asserted-by":"publisher","first-page":"2092","DOI":"10.1109\/TASLP.2019.2941148","volume":"27","author":"Y Liu","year":"2019","unstructured":"Y. Liu, D. Wang, Divide and conquer: a deep casa approach to talker-independent monaural speaker separation. IEEE\/ACM Trans. Audio Speech Lang. Process. 27, 2092\u20132102 (2019)","journal-title":"IEEE\/ACM Trans. Audio Speech Lang. Process."},{"key":"3276_CR19","doi-asserted-by":"crossref","unstructured":"F. Lu, Y. Yang, A feature aggregation hourglass network for human pose estimation, in 2021 International Joint Conference on Neural Networks (IJCNN), pp. 1\u20136 (2021)","DOI":"10.1109\/IJCNN52387.2021.9533871"},{"key":"3276_CR20","doi-asserted-by":"crossref","unstructured":"Y. Luo, Z.\u00a0Chen, T.\u00a0Yoshioka, Dual-path rnn: efficient long sequence modeling for time-domain single-channel speech separation, in ICASSP 2020 - 2020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 46\u201350 (2020)","DOI":"10.1109\/ICASSP40776.2020.9054266"},{"key":"3276_CR21","doi-asserted-by":"crossref","unstructured":"Y. Luo, N.\u00a0Mesgarani, Tasnet: Time-domain audio separation network for real-time, single-channel speech separation, in 2018 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 696\u2013700 (2018)","DOI":"10.1109\/ICASSP.2018.8462116"},{"key":"3276_CR22","doi-asserted-by":"publisher","first-page":"1256","DOI":"10.1109\/TASLP.2019.2915167","volume":"27","author":"Y Luo","year":"2019","unstructured":"Y. Luo, N. Mesgarani, Conv-tasnet: surpassing ideal time\u2013frequency magnitude masking for speech separation. IEEE\/ACM Trans. Audio Speech Lang. Process. 27, 1256\u20131266 (2019)","journal-title":"IEEE\/ACM Trans. Audio Speech Lang. Process."},{"key":"3276_CR23","doi-asserted-by":"crossref","unstructured":"S. Lutati, E.\u00a0Nachmani, L.\u00a0Wolf, Sepit approaching a single channel speech separation bound. arXiv preprint arXiv:2205.11801 (2022)","DOI":"10.21437\/Interspeech.2022-149"},{"key":"3276_CR24","unstructured":"E. Nachmani, Y.\u00a0Adi, L.\u00a0Wolf, Voice separation with an unknown number of multiple speakers, in ICML (2020)"},{"key":"3276_CR25","doi-asserted-by":"crossref","unstructured":"A. Newell, K.\u00a0Yang, J.\u00a0Deng, Stacked hourglass networks for human pose estimation, in ECCV (2016)","DOI":"10.1007\/978-3-319-46484-8_29"},{"key":"3276_CR26","unstructured":"M. Ravanelli, T.\u00a0Parcollet, P.\u00a0Plantinga, A.\u00a0Rouhe, S.\u00a0Cornell, L.\u00a0Lugosch, C.\u00a0Subakan, N.\u00a0Dawalatabad, A.\u00a0Heba, J.\u00a0Zhong, J.C. Chou, S.L. Yeh, S.W. Fu, C.F. Liao, E.\u00a0Rastorgueva, F.\u00a0Grondin, W.\u00a0Aris, H.\u00a0Na, Y.\u00a0Gao, R.D. Mori, Y.\u00a0Bengio, SpeechBrain: a general-purpose speech toolkit. arXiv:2106.04624 (2021)"},{"key":"3276_CR27","doi-asserted-by":"crossref","unstructured":"J. Rixen, M.\u00a0Renz, Sfsrnet: super-resolution for single-channel audio source separation, in Proceedings of the AAAI Conference on Artificial Intelligence, 11220\u201311228 (2022)","DOI":"10.1609\/aaai.v36i10.21372"},{"key":"3276_CR28","doi-asserted-by":"crossref","unstructured":"Z. Shi, H.\u00a0Lin, L.\u00a0Liu, R.\u00a0Liu, J.\u00a0Han, Furcanext: end-to-end monaural speech separation with dynamic gated dilated temporal convolutional networks, in MMM (2020)","DOI":"10.21437\/Interspeech.2019-1292"},{"key":"3276_CR29","unstructured":"Z. Shi, R.\u00a0Liu, J. Han, Lafurca: iterative refined speech separation based on context-aware dual-path parallel bi-lstm. arXiv preprint arXiv:2001.08998 (2020)"},{"key":"3276_CR30","doi-asserted-by":"publisher","first-page":"4242","DOI":"10.1109\/ACCESS.2023.3235010","volume":"11","author":"S Soni","year":"2023","unstructured":"S. Soni, R.N. Yadav, L. Gupta, State-of-the-art analysis of deep learning-based monaural speech source separation techniques. IEEE Access 11, 4242\u20134269 (2023). https:\/\/doi.org\/10.1109\/ACCESS.2023.3235010","journal-title":"IEEE Access"},{"key":"3276_CR31","doi-asserted-by":"crossref","unstructured":"A. Srinivas, T.Y. Lin, N.\u00a0Parmar, J.\u00a0Shlens, P.\u00a0Abbeel, A.\u00a0Vaswani, Bottleneck transformers for visual recognition, in 2021 IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 16514\u201316524 (2021)","DOI":"10.1109\/CVPR46437.2021.01625"},{"key":"3276_CR32","doi-asserted-by":"crossref","unstructured":"C. Subakan, M.\u00a0Ravanelli, S.\u00a0Cornell, M.\u00a0Bronzi, J.\u00a0Zhong, Attention is all you need in speech separation, in ICASSP 2021 - 2021 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 21\u201325 (2021)","DOI":"10.1109\/ICASSP39728.2021.9413901"},{"key":"3276_CR33","doi-asserted-by":"publisher","first-page":"34170","DOI":"10.1109\/ACCESS.2022.3162608","volume":"10","author":"RK Thakur","year":"2022","unstructured":"R.K. Thakur, S.K. Maji, Gradient and multi scale feature inspired deep blind gaussian denoiser. IEEE Access 10, 34170\u201334184 (2022)","journal-title":"IEEE Access"},{"key":"3276_CR34","doi-asserted-by":"crossref","unstructured":"E. Tzinis, Z.\u00a0Wang, P.\u00a0Smaragdis, Sudo rm-rf: Efficient networks for universal audio source separation, in 2020 IEEE 30th International Workshop on Machine Learning for Signal Processing (MLSP), pp. 1\u20136 (2020)","DOI":"10.1109\/MLSP49062.2020.9231900"},{"key":"3276_CR35","unstructured":"A. Vaswani, N.M. Shazeer, N.\u00a0Parmar, J.\u00a0Uszkoreit, L.\u00a0Jones, A.N. Gomez, L.\u00a0Kaiser, I.\u00a0Polosukhin, Attention is all you need. ArXivabs\/1706.03762 (2017)"},{"key":"3276_CR36","doi-asserted-by":"publisher","first-page":"1702","DOI":"10.1109\/TASLP.2018.2842159","volume":"26","author":"D Wang","year":"2018","unstructured":"D. Wang, J. Chen, Supervised speech separation based on deep learning: an overview. IEEE\/ACM Trans. Audio Speech Lang. Process. 26, 1702\u20131726 (2018)","journal-title":"IEEE\/ACM Trans. Audio Speech Lang. Process."},{"key":"3276_CR37","doi-asserted-by":"crossref","unstructured":"Z.Q. Wang, J.L. Roux, J.R. Hershey, Alternative objective functions for deep clustering, in 2018 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 686\u2013690 (2018)","DOI":"10.1109\/ICASSP.2018.8462507"},{"issue":"4","key":"3276_CR38","doi-asserted-by":"publisher","first-page":"2428","DOI":"10.1007\/s00034-023-02566-1","volume":"43","author":"S Wei","year":"2024","unstructured":"S. Wei, R. Zhang, Underdetermined blind source separation based on spatial estimation and compressed sensing. Circuits Syst. Signal Process. 43(4), 2428\u20132453 (2024)","journal-title":"Circuits Syst. Signal Process."},{"key":"3276_CR39","doi-asserted-by":"publisher","first-page":"678","DOI":"10.1109\/LSP.2021.3067498","volume":"28","author":"X Xu","year":"2021","unstructured":"X. Xu, Z. Chen, F. Yin, Monocular depth estimation with multi-scale feature fusion. IEEE Signal Process. Lett. 28, 678\u2013682 (2021)","journal-title":"IEEE Signal Process. Lett."},{"key":"3276_CR40","doi-asserted-by":"crossref","unstructured":"D. Yu, M.\u00a0Kolb\u00e6k, Z.\u00a0Tan, J.H. Jensen, Permutation invariant training of deep models for speaker-independent multi-talker speech separation, in 2017 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 241\u2013245 (2017)","DOI":"10.1109\/ICASSP.2017.7952154"},{"key":"3276_CR41","doi-asserted-by":"crossref","unstructured":"W. Yu, J. Zhou, H. bin Wang, L. Tao, Setransformer: speech enhancement transformer. Cogn. Comput. 14, 1152\u20131158 (2022)","DOI":"10.1007\/s12559-020-09817-2"},{"key":"3276_CR42","unstructured":"Y. Yuan, R.\u00a0Fu, L.\u00a0Huang, W.\u00a0Lin, C.\u00a0Zhang, X.\u00a0Chen, J.\u00a0Wang, Hrformer: High-resolution transformer for dense prediction. ArXivabs\/2110.09408 (2021)"},{"key":"3276_CR43","doi-asserted-by":"publisher","first-page":"2840","DOI":"10.1109\/TASLP.2021.3099291","volume":"29","author":"N Zeghidour","year":"2021","unstructured":"N. Zeghidour, D. Grangier, Wavesplit: end-to-end speech separation by speaker clustering. IEEE\/ACM Trans. Audio Speech Lang. Process. 29, 2840\u20132849 (2021)","journal-title":"IEEE\/ACM Trans. Audio Speech Lang. Process."},{"key":"3276_CR44","doi-asserted-by":"crossref","unstructured":"Y. Zhao, C.\u00a0Luo, Z.\u00a0Zha, W.\u00a0Zeng, Multi-scale group transformer for long sequence modeling in speech separation, in IJCAI (2020)","DOI":"10.24963\/ijcai.2020\/450"}],"container-title":["Circuits, Systems, and Signal Processing"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s00034-025-03276-6.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s00034-025-03276-6","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s00034-025-03276-6.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,3,27]],"date-time":"2026-03-27T09:14:05Z","timestamp":1774602845000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s00034-025-03276-6"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,9,9]]},"references-count":44,"journal-issue":{"issue":"3","published-print":{"date-parts":[[2026,3]]}},"alternative-id":["3276"],"URL":"https:\/\/doi.org\/10.1007\/s00034-025-03276-6","relation":{},"ISSN":["0278-081X","1531-5878"],"issn-type":[{"value":"0278-081X","type":"print"},{"value":"1531-5878","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025,9,9]]},"assertion":[{"value":"6 August 2024","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"16 July 2025","order":2,"name":"revised","label":"Revised","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"17 July 2025","order":3,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"9 September 2025","order":4,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors declare that there are no conflict of interest.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}}]}}