{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,27]],"date-time":"2026-03-27T09:10:17Z","timestamp":1774602617415,"version":"3.50.1"},"reference-count":83,"publisher":"Springer Science and Business Media LLC","issue":"3","license":[{"start":{"date-parts":[[2026,2,6]],"date-time":"2026-02-06T00:00:00Z","timestamp":1770336000000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2026,2,6]],"date-time":"2026-02-06T00:00:00Z","timestamp":1770336000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Int J Comput Vis"],"published-print":{"date-parts":[[2026,3]]},"DOI":"10.1007\/s11263-025-02689-9","type":"journal-article","created":{"date-parts":[[2026,2,6]],"date-time":"2026-02-06T03:44:24Z","timestamp":1770349464000},"update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["High-Quality Sound Separation Across Diverse Categories via Visually-Guided Generative Modeling"],"prefix":"10.1007","volume":"134","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-1469-1020","authenticated-orcid":false,"given":"Chao","family":"Huang","sequence":"first","affiliation":[]},{"given":"Susan","family":"Liang","sequence":"additional","affiliation":[]},{"given":"Yapeng","family":"Tian","sequence":"additional","affiliation":[]},{"given":"Anurag","family":"Kumar","sequence":"additional","affiliation":[]},{"given":"Chenliang","family":"Xu","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2026,2,6]]},"reference":[{"key":"2689_CR1","doi-asserted-by":"crossref","unstructured":"Afouras, T., Owens, A., Chung, J.\u00a0S., & Zisserman, A. (2020). Self-supervised learning of audio-visual objects from video. In European Conference on Computer Vision, pages 208\u2013224. Springer. Springer.","DOI":"10.1007\/978-3-030-58523-5_13"},{"key":"2689_CR2","unstructured":"Amit, T., Shaharbany, T., Nachmani, E., & Wolf, L. (2021). Segdiff: Image segmentation with diffusion probabilistic models. arXiv:2112.00390."},{"key":"2689_CR3","first-page":"17981","volume":"34","author":"J Austin","year":"2021","unstructured":"Austin, J., Johnson, D. D., Ho, J., Tarlow, D., & van den Berg, R. (2021). Structured denoising diffusion models in discrete state-spaces. Advances in Neural Information Processing Systems,34, 17981\u201317993.","journal-title":"Advances in Neural Information Processing Systems"},{"key":"2689_CR4","doi-asserted-by":"crossref","unstructured":"Avrahami, O., Lischinski, D., & Fried, O. (2022). Blended diffusion for text-driven editing of natural images. In Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pages 18208\u201318218.","DOI":"10.1109\/CVPR52688.2022.01767"},{"key":"2689_CR5","unstructured":"Baranchuk, D., Rubachev, I., Voynov, A., Khrulkov, V., & Babenko, A. (2021). Label-efficient semantic segmentation with diffusion models. arXiv:2112.03126."},{"key":"2689_CR6","doi-asserted-by":"crossref","unstructured":"Brempong, E.\u00a0A., Kornblith, S., Chen, T., Parmar, N., Minderer, M., & Norouzi, M. (2022). Denoising pretraining for semantic segmentation. In Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pages 4175\u20134186.","DOI":"10.1109\/CVPRW56347.2022.00462"},{"key":"2689_CR7","doi-asserted-by":"crossref","unstructured":"Chatterjee, M., Ahuja, N., & Cherian, A. (2022). Learning audio-visual dynamics using scene graphs for audio source separation. In NeurIPS.","DOI":"10.1109\/ICCV48922.2021.00124"},{"key":"2689_CR8","doi-asserted-by":"crossref","unstructured":"Chatterjee, M., Le\u00a0Roux, J., Ahuja, N., & Cherian, A. (2021). Visual scene graphs for audio source separation. In Proceedings of the IEEE\/CVF International Conference on Computer Vision, pages 1204\u20131213.","DOI":"10.1109\/ICCV48922.2021.00124"},{"key":"2689_CR9","first-page":"721","volume-title":"Vggsound: A large-scale audio-visual dataset","author":"H Chen","year":"2020","unstructured":"Chen, H., Xie, W., Vedaldi, A., & Zisserman, A. (2020). Vggsound: A large-scale audio-visual dataset (pp. 721\u2013725). IEEE."},{"key":"2689_CR10","doi-asserted-by":"crossref","unstructured":"Chen, J., Zhang, R., Lian, D., Yang, J., Zeng, Z., & Shi, J. (2023). iquery: Instruments as queries for audio-visual sound separation. In Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pages 14675\u201314686.","DOI":"10.1109\/CVPR52729.2023.01410"},{"key":"2689_CR11","unstructured":"Chen, N., Zhang, Y., Zen, H., Weiss, R.\u00a0J., Norouzi, M., & Chan, W. (2020b). Wavegrad: Estimating gradients for waveform generation."},{"key":"2689_CR12","doi-asserted-by":"crossref","unstructured":"Chen, S., Sun, P., Song, Y., & Luo, P. (2022a). Diffusiondet: Diffusion model for object detection.","DOI":"10.1109\/ICCV51070.2023.01816"},{"key":"2689_CR13","unstructured":"Chen, T., Zhang, R., & Hinton, G. (2022b). Analog bits: Generating discrete data using diffusion models with self-conditioning. arXiv preprint arXiv:2208.04202."},{"key":"2689_CR14","doi-asserted-by":"crossref","unstructured":"Chou, J.-C., Chien, C.-M., & Livescu, K. (2023). Av2wav: Diffusion-based re-synthesis from continuous self-supervised features for audio-visual speech enhancement. arXiv preprint arXiv:2309.08030.","DOI":"10.1109\/ICASSP48485.2024.10446625"},{"key":"2689_CR15","first-page":"8780","volume":"34","author":"P Dhariwal","year":"2021","unstructured":"Dhariwal, P., & Nichol, A. (2021). Diffusion models beat gans on image synthesis. Advances in Neural Information Processing Systems,34, 8780\u20138794.","journal-title":"Advances in Neural Information Processing Systems"},{"key":"2689_CR16","unstructured":"Dong, H.-W., Takahashi, N., Mitsufuji, Y., McAuley, J., & Berg-Kirkpatrick, T. (2023). Clipsep: Learning text-queried sound separation with noisy unlabeled videos. In Proceedings of International Conference on Learning Representations (ICLR)."},{"key":"2689_CR17","unstructured":"Du, Z., Wang, Y., Chen, Q., Shi, X., Lv, X., Zhao, T., Gao, Z., Yang, Y., Gao, C., Wang, H., & Yu, F.(2024). Cosyvoice 2: Scalable streaming speech synthesis with large language models. arXiv preprint arXiv:2412.10117."},{"key":"2689_CR18","doi-asserted-by":"crossref","unstructured":"Dumoulin, V., Perez, E., Schucher, N., Strub, F., Vries, H., & d., Courville, A., & Bengio, Y. (2018). Feature-wise transformations. Distill,3(7), Article e11.","DOI":"10.23915\/distill.00011"},{"key":"2689_CR19","doi-asserted-by":"publisher","first-page":"3","DOI":"10.1016\/j.neunet.2017.12.012","volume":"107","author":"S Elfwing","year":"2018","unstructured":"Elfwing, S., Uchibe, E., & Doya, K. (2018). Sigmoid-weighted linear units for neural network function approximation in reinforcement learning. Neural Networks,107, 3\u201311.","journal-title":"Neural Networks"},{"key":"2689_CR20","doi-asserted-by":"crossref","unstructured":"Ephrat, A., Mosseri, I., Lang, O., Dekel, T., Wilson, K., Hassidim, A., Freeman, W.\u00a0T., & Rubinstein, M. (2018). Looking to listen at the cocktail party: A speaker-independent audio-visual model for speech separation. arXiv preprint arXiv:1804.03619.","DOI":"10.1145\/3197517.3201357"},{"key":"2689_CR21","doi-asserted-by":"crossref","unstructured":"Gan, C., Huang, D., Zhao, H., Tenenbaum, J.\u00a0B., & Torralba, A. (2020). Music gesture for visual sound separation. In Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pages 10478\u201310487.","DOI":"10.1109\/CVPR42600.2020.01049"},{"key":"2689_CR22","doi-asserted-by":"crossref","unstructured":"Gao, R., Feris, R., & Grauman, K. (2018). Learning to separate object sounds by watching unlabeled video. In Proceedings of the European Conference on Computer Vision (ECCV), pages 35\u201353.","DOI":"10.1007\/978-3-030-01219-9_3"},{"key":"2689_CR23","doi-asserted-by":"crossref","unstructured":"Gao, R. & Grauman, K. (2019). Co-separating sounds of visual objects. In Proceedings of the IEEE\/CVF International Conference on Computer Vision, pages 3879\u20133888.","DOI":"10.1109\/ICCV.2019.00398"},{"key":"2689_CR24","unstructured":"Gao, R., Hoogeboom, E., Heek, J., Bortoli, V.\u00a0D., Murphy, K.\u00a0P., & Salimans, T. (2024). Diffusion meets flow matching: Two sides of the same coin."},{"key":"2689_CR25","first-page":"133345","volume":"37","author":"I Gat","year":"2024","unstructured":"Gat, I., Remez, T., Shaul, N., Kreuk, F., Chen, R. T., Synnaeve, G., Adi, Y., & Lipman, Y. (2024). Discrete flow matching. Advances in Neural Information Processing Systems,37, 133345\u2013133385.","journal-title":"Advances in Neural Information Processing Systems"},{"key":"2689_CR26","unstructured":"Gong, S., Li, M., Feng, J., Wu, Z., & Kong, L. (2022). Diffuseq: Sequence to sequence text generation with diffusion models. arXiv preprint arXiv:2210.08933."},{"key":"2689_CR27","doi-asserted-by":"crossref","unstructured":"Gu, S., Chen, D., Bao, J., Wen, F., Zhang, B., Chen, D., Yuan, L., & Guo, B. (2022). Vector quantized diffusion model for text-to-image synthesis. In Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pages 10696\u201310706.","DOI":"10.1109\/CVPR52688.2022.01043"},{"key":"2689_CR28","doi-asserted-by":"crossref","unstructured":"He, K., Zhang, X., Ren, S., & Sun, J. (2016). Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 770\u2013778.","DOI":"10.1109\/CVPR.2016.90"},{"key":"2689_CR29","first-page":"6840","volume":"33","author":"J Ho","year":"2020","unstructured":"Ho, J., Jain, A., & Abbeel, P. (2020). Denoising diffusion probabilistic models. Advances in Neural Information Processing Systems,33, 6840\u20136851.","journal-title":"Advances in Neural Information Processing Systems"},{"key":"2689_CR30","unstructured":"Ho, J., Salimans, T., Gritsenko, A., Chan, W., Norouzi, M., & Fleet, D.\u00a0J. (2022). Video diffusion models. arXiv preprint arXiv:2204.03458."},{"key":"2689_CR31","doi-asserted-by":"crossref","unstructured":"Huang, C., Liang, S., Tian, Y., Kumar, A., & Xu, C. (2024a). High-quality visually-guided sound separation from diverse categories. In Proceedings of the Asian Conference on Computer Vision (ACCV), pages 35\u201349.","DOI":"10.1007\/978-981-96-0960-4_7"},{"key":"2689_CR32","doi-asserted-by":"crossref","unstructured":"Huang, C., Markovic, D., Xu, C., & Richard, A. (2024b). Modeling and driving human body soundfields through acoustic primitives. arXiv preprint arXiv:2407.13083.","DOI":"10.1007\/978-3-031-72684-2_1"},{"key":"2689_CR33","doi-asserted-by":"crossref","unstructured":"Huang, C., Tian, Y., Kumar, A., & Xu, C. (2023). Egocentric audio-visual object localization. arXiv preprint arXiv:2303.13471.","DOI":"10.1109\/CVPR52729.2023.02194"},{"key":"2689_CR34","doi-asserted-by":"crossref","unstructured":"Huang, R., Zhao, Z., Liu, H., Liu, J., Cui, C., & Ren, Y. (2022). Prodiff: Progressive fast diffusion model for high-quality text-to-speech. In Proceedings of the 30th ACM International Conference on Multimedia, pages 2595\u20132605.","DOI":"10.1145\/3503161.3547855"},{"key":"2689_CR35","unstructured":"Jin, Y., Sun, Z., Li, N., Xu, K., Jiang, H., Zhuang, N., Huang, Q., Song, Y., Mu, Y., & Lin, Z. (2024). Pyramidal flow matching for efficient video generative modeling. arXiv preprint arXiv:2410.05954."},{"key":"2689_CR36","unstructured":"Kong, Z., Ping, W., Huang, J., Zhao, K., & Catanzaro, B. (2020). Diffwave: A versatile diffusion model for audio synthesis. arXiv preprint arXiv:2009.09761."},{"key":"2689_CR37","doi-asserted-by":"crossref","unstructured":"Lee, J. & Han, S. (2021). Nu-wave: A diffusion probabilistic model for neural audio upsampling. arXiv preprint arXiv:2104.02321.","DOI":"10.21437\/Interspeech.2021-36"},{"key":"2689_CR38","doi-asserted-by":"crossref","unstructured":"Lee, S., Jung, C., Jang, Y., Kim, J., & Chung, J.\u00a0S. (2023). Seeing through the conversation: Audio-visual speech separation based on diffusion model. arXiv preprint arXiv:2310.19581.","DOI":"10.1109\/ICASSP48485.2024.10447679"},{"key":"2689_CR39","first-page":"4328","volume":"35","author":"X Li","year":"2022","unstructured":"Li, X., Thickstun, J., Gulrajani, I., Liang, P. S., & Hashimoto, T. B. (2022). Diffusion-lm improves controllable text generation. Advances in Neural Information Processing Systems,35, 4328\u20134343.","journal-title":"Advances in Neural Information Processing Systems"},{"key":"2689_CR40","unstructured":"Lipman, Y., Chen, R.\u00a0T., Ben-Hamu, H., Nickel, M., & Le, M. (2022). Flow matching for generative modeling. In The Eleventh International Conference on Learning Representations."},{"key":"2689_CR41","unstructured":"Liu, A.\u00a0H., Le, M., Vyas, A., Shi, B., Tjandra, A., & Hsu, W.-N. (2023). Generative pre-training for speech with flow matching. arXiv preprint arXiv:2310.16338."},{"key":"2689_CR42","first-page":"38","volume-title":"Grounding dino: Marrying dino with grounded pre-training for open-set object detection","author":"S Liu","year":"2024","unstructured":"Liu, S., Zeng, Z., Ren, T., Li, F., Zhang, H., Yang, J., Jiang, Q., Li, C., Yang, J., Su, H., & Zhu, J. (2024). Grounding dino: Marrying dino with grounded pre-training for open-set object detection (pp. 38\u201355). Springer."},{"key":"2689_CR43","unstructured":"Liu, X., Gong, C., & Chengyue. (2022). Flow straight and fast: Learning to generate and transfer data with rectified flow. In The Eleventh International Conference on Learning Representations."},{"key":"2689_CR44","first-page":"11341","volume-title":"Matcha-tts: A fast tts architecture with conditional flow matching","author":"S Mehta","year":"2024","unstructured":"Mehta, S., Tu, R., Beskow, J., Sz\u00e9kely, \u00c9., & Henter, G. E. (2024). Matcha-tts: A fast tts architecture with conditional flow matching (pp. 11341\u201311345). IEEE."},{"key":"2689_CR45","unstructured":"Meng, C., He, Y., Song, Y., Song, J., Wu, J., Zhu, J.-Y., & Ermon, S. (2021). Sdedit: Guided image synthesis and editing with stochastic differential equations. arXiv preprint arXiv:2108.01073."},{"key":"2689_CR46","doi-asserted-by":"publisher","first-page":"1368","DOI":"10.1109\/TASLP.2021.3066303","volume":"29","author":"D Michelsanti","year":"2021","unstructured":"Michelsanti, D., Tan, Z.-H., Zhang, S.-X., Xu, Y., Yu, M., Yu, D., & Jensen, J. (2021). An overview of deep-learning-based audio-visual speech enhancement and separation. IEEE\/ACM Transactions on Audio, Speech, and Language Processing,29, 1368\u20131396.","journal-title":"IEEE\/ACM Transactions on Audio, Speech, and Language Processing"},{"key":"2689_CR47","unstructured":"Mittal, H., Morgado, P., Jain, U., & Gupta, A. (2022). Learning state-aware visual representations from audible interactions. In Proceedings of the European conference on computer vision (ECCV)."},{"key":"2689_CR48","unstructured":"Nichol, A., Dhariwal, P., Ramesh, A., Shyam, P., Mishkin, P., McGrew, B., Sutskever, I., & Chen, M. (2021). Glide: Towards photorealistic image generation and editing with text-guided diffusion models. arXiv preprint arXiv:2112.10741."},{"key":"2689_CR49","unstructured":"Nichol, A.\u00a0Q. & Dhariwal, P. (2021). Improved denoising diffusion probabilistic models. In International Conference on Machine Learning, pages 8162\u20138171. PMLR."},{"key":"2689_CR50","doi-asserted-by":"crossref","unstructured":"Owens, A. & Efros, A.\u00a0A. (2018). Audio-visual scene analysis with self-supervised multisensory features. In Proceedings of the European Conference on Computer Vision (ECCV), pages 631\u2013648.","DOI":"10.1007\/978-3-030-01231-1_39"},{"key":"2689_CR51","unstructured":"Polyak, A., Zohar, A., Brown, A., Tjandra, A., Sinha, A., Lee, A., Vyas, A., Shi, B., Ma, C.-Y., Chuang, C.-Y., Yan, D. (2024). Movie gen: A cast of media foundation models. arXiv preprint arXiv:2410.13720."},{"key":"2689_CR52","unstructured":"Popov, V., Vovk, I., Gogoryan, V., Sadekova, T., & Kudinov, M. (2021).Grad-tts: A diffusion probabilistic model for text-to-speech. In International Conference on Machine Learning, pages 8599\u20138608. PMLR."},{"key":"2689_CR53","first-page":"292","volume-title":"Multiple sound sources localization from coarse to fine","author":"R Qian","year":"2020","unstructured":"Qian, R., Hu, D., Dinkel, H., Wu, M., Xu, N., & Lin, W. (2020). Multiple sound sources localization from coarse to fine (pp. 292\u2013308). Springer."},{"key":"2689_CR54","first-page":"8748","volume-title":"Learning transferable visual models from natural language supervision","author":"A Radford","year":"2021","unstructured":"Radford, A., Kim, J. W., Hallacy, C., Ramesh, A., Goh, G., Agarwal, S., Sastry, G., Askell, A., Mishkin, P., Clark, J., & Krueger, G. (2021). Learning transferable visual models from natural language supervision (pp. 8748\u20138763). PMLR."},{"key":"2689_CR55","unstructured":"Raffel, C., McFee, B., Humphrey, E.\u00a0J., Salamon, J., Nieto, O., Liang, D., Ellis, D.\u00a0P., & Raffel, C.\u00a0C. (2014). Mir_eval: A transparent implementation of common mir metrics. In ISMIR, pages 367\u2013372."},{"key":"2689_CR56","unstructured":"Ramesh, A., Dhariwal, P., Nichol, A., Chu, C., and Chen, M. (2022). Hierarchical text-conditional image generation with clip latents. arXiv preprint arXiv:2204.06125."},{"key":"2689_CR57","doi-asserted-by":"crossref","unstructured":"Rombach, R., Blattmann, A., Lorenz, D., Esser, P., & Ommer, B. (2022). High-resolution image synthesis with latent diffusion models. In Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition, pages 10684\u201310695.","DOI":"10.1109\/CVPR52688.2022.01042"},{"key":"2689_CR58","doi-asserted-by":"crossref","unstructured":"Ronneberger, O., Fischer, P., & Brox, T. (2015). U-net: Convolutional networks for biomedical image segmentation. In Medical Image Computing and Computer-Assisted Intervention\u2013MICCAI 2015: 18th International Conference, Munich, Germany, October 5-9, 2015, Proceedings, Part III 18, pages 234\u2013241. Springer.","DOI":"10.1007\/978-3-319-24574-4_28"},{"key":"2689_CR59","doi-asserted-by":"crossref","unstructured":"Ruan, L., Ma, Y., Yang, H., He, H., Liu, B., Fu, J., Yuan, N. J., Jin, Q., & Guo, B. (2022). Mm-diffusion: Learning multi-modal diffusion models for joint audio and video generation. arXiv preprint arXiv:2212.09478.","DOI":"10.1109\/CVPR52729.2023.00985"},{"key":"2689_CR60","doi-asserted-by":"crossref","unstructured":"Ruiz, N., Li, Y., Jampani, V., Pritch, Y., Rubinstein, M., & Aberman, K. (2022). Dreambooth: Fine tuning text-to-image diffusion models for subject-driven generation. arXiv preprint arXiv:2208.12242.","DOI":"10.1109\/CVPR52729.2023.02155"},{"key":"2689_CR61","first-page":"36479","volume":"35","author":"C Saharia","year":"2022","unstructured":"Saharia, C., Chan, W., Saxena, S., Li, L., Whang, J., Denton, E. L., Ghasemipour, K., Gontijo Lopes, R., Karagol Ayan, B., Salimans, T., & Ho, J. (2022). Photorealistic text-to-image diffusion models with deep language understanding. Advances in Neural Information Processing Systems,35, 36479\u201336494.","journal-title":"Advances in Neural Information Processing Systems"},{"key":"2689_CR62","doi-asserted-by":"crossref","unstructured":"Scheibler, R., Ji, Y., Chung, S.-W., Byun, J., Choe, S., & Choi, M.-S. (2023). Diffusion-based generative speech source separation. In ICASSP 2023-2023 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 1\u20135. IEEE.","DOI":"10.1109\/ICASSP49357.2023.10095310"},{"key":"2689_CR63","unstructured":"Shen, Z., Zhang, M., Zhao, H., Yi, S., & Li, H. (2021). Efficient attention: Attention with linear complexities. In Proceedings of the IEEE\/CVF winter conference on applications of computer vision, pages 3531\u20133539."},{"key":"2689_CR64","unstructured":"Singer, U., Polyak, A., Hayes, T., Yin, X., An, J., Zhang, S., Hu, Q., Yang, H., Ashual, O., Gafni, O., Parikh, D. (2022). Make-a-video: Text-to-video generation without text-video data. arXiv preprint arXiv:2209.14792."},{"key":"2689_CR65","first-page":"177","volume-title":"Non-negative matrix factorization for polyphonic music transcription","author":"P Smaragdis","year":"2003","unstructured":"Smaragdis, P., & Brown, J. C. (2003). Non-negative matrix factorization for polyphonic music transcription (Vol. No. 03TH8684), pp. 177\u2013180). IEEE."},{"key":"2689_CR66","unstructured":"Song, J., Meng, C., & Ermon, S. (2020a). Denoising diffusion implicit models. arXiv preprint arXiv:2010.02502."},{"key":"2689_CR67","unstructured":"Song, Y. & Ermon, S. (2019). Generative modeling by estimating gradients of the data distribution. Advances in neural information processing systems, 32."},{"key":"2689_CR68","unstructured":"Song, Y., Sohl-Dickstein, J., Kingma, D.\u00a0P., Kumar, A., Ermon, S., & Poole, B. (2020b). Score-based generative modeling through stochastic differential equations. arXiv preprint arXiv:2011.13456."},{"key":"2689_CR69","unstructured":"Spiertz, M. & Gnann, V. (2009). Source-filter based clustering for monaural blind source separation. In Proceedings of the 12th International Conference on Digital Audio Effects, volume\u00a04, page\u00a06."},{"key":"2689_CR70","doi-asserted-by":"crossref","unstructured":"Tan, R., Ray, A., Burns, A., Plummer, B.\u00a0A., Salamon, J., Nieto, O., Russell, B., & Saenko, K. (2023). Language-guided audio-visual source separation via trimodal consistency. arXiv preprint arXiv:2303.16342.","DOI":"10.1109\/CVPR52729.2023.01019"},{"key":"2689_CR71","doi-asserted-by":"crossref","unstructured":"Tian, Y., Hu, D., & Xu, C. (2021). Cyclic co-learning of sounding object visual grounding and sound separation. In Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pages 2745\u20132754.","DOI":"10.1109\/CVPR46437.2021.00277"},{"key":"2689_CR72","doi-asserted-by":"crossref","unstructured":"Tian, Y., Shi, J., Li, B., Duan, Z., & Xu, C. (2018). Audio-visual event localization in unconstrained videos. In Proceedings of the European Conference on Computer Vision (ECCV), pages 247\u2013263.","DOI":"10.1007\/978-3-030-01216-8_16"},{"key":"2689_CR73","unstructured":"Tzinis, E., Wisdom, S., Jansen, A., Hershey, S., Remez, T., Ellis, D.\u00a0P., & Hershey, J.\u00a0R. (2020). Into the wild with audioscope: Unsupervised audio-visual separation of on-screen sounds. arXiv preprint arXiv:2011.01143."},{"key":"2689_CR74","doi-asserted-by":"crossref","unstructured":"Tzinis, E., Wisdom, S., Remez, T., & Hershey, J.\u00a0R. (2022). Audioscopev2: Audio-visual attention architectures for calibrated open-domain on-screen sound separation. In Computer Vision\u2013ECCV 2022: 17th European Conference, Tel Aviv, Israel, October 23\u201327, 2022, Proceedings, Part XXXVII, pages 368\u2013385. Springer.","DOI":"10.1007\/978-3-031-19836-6_21"},{"key":"2689_CR75","unstructured":"Vaswani, A., Shazeer, N., Parmar, N., Uszkoreit, J., Jones, L., Gomez, A.\u00a0N., Kaiser, \u0141., & Polosukhin, I. (2017). Attention is all you need. Advances in neural information processing systems, 30."},{"issue":"3","key":"2689_CR76","doi-asserted-by":"publisher","first-page":"1066","DOI":"10.1109\/TASL.2006.885253","volume":"15","author":"T Virtanen","year":"2007","unstructured":"Virtanen, T. (2007). Monaural sound source separation by nonnegative matrix factorization with temporal continuity and sparseness criteria. IEEE transactions on audio, speech, and language processing,15(3), 1066\u20131074.","journal-title":"IEEE transactions on audio, speech, and language processing"},{"key":"2689_CR77","doi-asserted-by":"crossref","unstructured":"Wang, Z.-Q., Cornell, S., Choi, S., Lee, Y., Kim, B.-Y., & Watanabe, S. (2023). Tf-gridnet: Making time-frequency domain models great again for monaural speaker separation. In ICASSP 2023-2023 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 1\u20135. IEEE.","DOI":"10.1109\/ICASSP49357.2023.10094992"},{"key":"2689_CR78","doi-asserted-by":"crossref","unstructured":"Wu, Y. & He, K. (2018). Group normalization. In Proceedings of the European conference on computer vision (ECCV), pages 3\u201319.","DOI":"10.1007\/978-3-030-01261-8_1"},{"key":"2689_CR79","doi-asserted-by":"crossref","unstructured":"Xu, X., Dai, B., & Lin, D. (2019). Recursive visual sound separation using minus-plus net. In Proceedings of the IEEE\/CVF International Conference on Computer Vision, pages 882\u2013891.","DOI":"10.1109\/ICCV.2019.00097"},{"key":"2689_CR80","doi-asserted-by":"crossref","unstructured":"Zhao, H., Gan, C., Ma, W.-C., & Torralba, A. (2019). The sound of motions. In Proceedings of the IEEE\/CVF International Conference on Computer Vision, pages 1735\u20131744.","DOI":"10.1109\/ICCV.2019.00182"},{"key":"2689_CR81","doi-asserted-by":"crossref","unstructured":"Zhao, H., Gan, C., Rouditchenko, A., Vondrick, C., McDermott, J., & Torralba, A. (2018). The sound of pixels. In Proceedings of the European conference on computer vision (ECCV), pages 570\u2013586.","DOI":"10.1007\/978-3-030-01246-5_35"},{"key":"2689_CR82","doi-asserted-by":"crossref","unstructured":"Zhu, L. & Rahtu, E. (2020).Visually guided sound source separation using cascaded opponent filter network. In Proceedings of the Asian Conference on Computer Vision.","DOI":"10.1007\/978-3-030-69544-6_25"},{"key":"2689_CR83","doi-asserted-by":"crossref","unstructured":"Zhu, L. & Rahtu, E. (2022). Visually guided sound source separation and localization using self-supervised motion representations. In Proceedings of the IEEE\/CVF Winter Conference on Applications of Computer Vision, pages 1289\u20131299.","DOI":"10.1109\/WACV51458.2022.00223"}],"container-title":["International Journal of Computer Vision"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11263-025-02689-9.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s11263-025-02689-9","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11263-025-02689-9.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,3,27]],"date-time":"2026-03-27T08:32:45Z","timestamp":1774600365000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s11263-025-02689-9"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2026,2,6]]},"references-count":83,"journal-issue":{"issue":"3","published-print":{"date-parts":[[2026,3]]}},"alternative-id":["2689"],"URL":"https:\/\/doi.org\/10.1007\/s11263-025-02689-9","relation":{},"ISSN":["0920-5691","1573-1405"],"issn-type":[{"value":"0920-5691","type":"print"},{"value":"1573-1405","type":"electronic"}],"subject":[],"published":{"date-parts":[[2026,2,6]]},"assertion":[{"value":"30 April 2025","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"17 September 2025","order":2,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"6 February 2026","order":3,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}}],"article-number":"104"}}