{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,13]],"date-time":"2026-04-13T23:13:10Z","timestamp":1776121990112,"version":"3.50.1"},"reference-count":71,"publisher":"Springer Science and Business Media LLC","issue":"7","license":[{"start":{"date-parts":[[2025,3,6]],"date-time":"2025-03-06T00:00:00Z","timestamp":1741219200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2025,3,6]],"date-time":"2025-03-06T00:00:00Z","timestamp":1741219200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Int J Comput Vis"],"published-print":{"date-parts":[[2025,7]]},"DOI":"10.1007\/s11263-025-02393-8","type":"journal-article","created":{"date-parts":[[2025,3,6]],"date-time":"2025-03-06T10:11:26Z","timestamp":1741255886000},"page":"4483-4503","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":31,"title":["Part-Whole Relational Fusion Towards Multi-Modal Scene Understanding"],"prefix":"10.1007","volume":"133","author":[{"given":"Yi","family":"Liu","sequence":"first","affiliation":[]},{"given":"Chengxin","family":"Li","sequence":"additional","affiliation":[]},{"given":"Shoukun","family":"Xu","sequence":"additional","affiliation":[]},{"given":"Jungong","family":"Han","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2025,3,6]]},"reference":[{"key":"2393_CR1","first-page":"10944","volume":"34","author":"Y Huang","year":"2021","unstructured":"Huang, Y., Du, C., Xue, Z., Chen, X., Zhao, H., & Huang, L. (2021). What makes multi-modal learning better than single (provably). Advances in Neural Information Processing Systems, 34, 10944\u201310956.","journal-title":"Advances in Neural Information Processing Systems"},{"issue":"8","key":"2393_CR2","doi-asserted-by":"publisher","first-page":"2122","DOI":"10.1007\/s11263-023-01784-z","volume":"131","author":"Y Wang","year":"2023","unstructured":"Wang, Y., Mao, Q., Zhu, H., Deng, J., Zhang, Y., Ji, J., Li, H., & Zhang, Y. (2023). Multi-modal 3d object detection in autonomous driving: a survey. International Journal of Computer Vision, 131(8), 2122\u20132152.","journal-title":"International Journal of Computer Vision"},{"issue":"5","key":"2393_CR3","doi-asserted-by":"publisher","first-page":"1748","DOI":"10.1007\/s11263-023-01952-1","volume":"132","author":"J Liu","year":"2024","unstructured":"Liu, J., Lin, R., Wu, G., Liu, R., Luo, Z., & Fan, X. (2024). Coconet: Coupled contrastive learning network with multi-level feature ensemble for multi-modality image fusion. International Journal of Computer Vision, 132(5), 1748\u20131775.","journal-title":"International Journal of Computer Vision"},{"key":"2393_CR4","doi-asserted-by":"publisher","first-page":"2618","DOI":"10.1007\/s11263-024-01998-9","volume":"132","author":"M Planamente","year":"2024","unstructured":"Planamente, M., Plizzari, C., Peirone, S. A., Caputo, B., & Bottino, A. (2024). Crelative norm alignment for tackling domain shift in deep multi-modal classification. International Journal of Computer Vision, 132, 2618\u20132638.","journal-title":"International Journal of Computer Vision"},{"key":"2393_CR5","doi-asserted-by":"publisher","first-page":"2845","DOI":"10.1007\/s11263-024-01999-8","volume":"132","author":"X-F Zhu","year":"2024","unstructured":"Zhu, X.-F., Xu, T., Liu, Z., Tang, Z., Wu, X.-J., & Kittler, J. (2024). Unimod1k: Towards a more universal large-scale dataset and benchmark for multi-modal learning. International Journal of Computer Vision, 132, 2845\u20132860.","journal-title":"International Journal of Computer Vision"},{"key":"2393_CR6","doi-asserted-by":"crossref","unstructured":"Cao, J., Leng, H., Lischinski, D., Cohen-Or, D., Tu, C., & Li, Y. (2021). Shapeconv: Shape-aware convolutional layer for indoor rgb-d semantic segmentation. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 7088\u20137097","DOI":"10.1109\/ICCV48922.2021.00700"},{"issue":"10","key":"2393_CR7","doi-asserted-by":"publisher","first-page":"6700","DOI":"10.1109\/TCSVT.2022.3168279","volume":"32","author":"Y Sun","year":"2022","unstructured":"Sun, Y., Cao, B., Zhu, P., & Hu, Q. (2022). Drone-based rgb-infrared cross-modality vehicle detection via uncertainty-aware learning. IEEE Transactions on Circuits and Systems for Video Technology, 32(10), 6700\u20136713.","journal-title":"IEEE Transactions on Circuits and Systems for Video Technology"},{"key":"2393_CR8","doi-asserted-by":"publisher","first-page":"2313","DOI":"10.1109\/TIP.2021.3049332","volume":"30","author":"L-Z Chen","year":"2021","unstructured":"Chen, L.-Z., Lin, Z., Wang, Z., Yang, Y.-L., & Cheng, M.-M. (2021). Spatial information guided convolution for real-time rgbd semantic segmentation. IEEE Transactions on Image Processing, 30, 2313\u20132324.","journal-title":"IEEE Transactions on Image Processing"},{"issue":"4","key":"2393_CR9","doi-asserted-by":"publisher","first-page":"4802","DOI":"10.1364\/OE.416130","volume":"29","author":"K Xiang","year":"2021","unstructured":"Xiang, K., Yang, K., & Wang, K. (2021). Polarization-driven semantic segmentation via efficient attention-bridged fusion. Optics Express, 29(4), 4802\u20134820.","journal-title":"Optics Express"},{"key":"2393_CR10","doi-asserted-by":"publisher","first-page":"7790","DOI":"10.1109\/TIP.2021.3109518","volume":"30","author":"W Zhou","year":"2021","unstructured":"Zhou, W., Liu, J., Lei, J., Yu, L., & Hwang, J.-N. (2021). Gmnet: Graded-feature multilabel-learning network for rgb-thermal urban scene semantic segmentation. IEEE Transactions on Image Processing, 30, 7790\u20137802.","journal-title":"IEEE Transactions on Image Processing"},{"key":"2393_CR11","doi-asserted-by":"crossref","unstructured":"Wang, Y., Chen, X., Cao, L., Huang, W., Sun, F., & Wang, Y. (2022). Multimodal token fusion for vision transformers. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 12186\u201312195","DOI":"10.1109\/CVPR52688.2022.01187"},{"key":"2393_CR12","doi-asserted-by":"crossref","unstructured":"Wan, B., Zhou, X., Sun, Y., Wang, T., Lv, C., Wang, S., Yin, H., & Yan, C. (2023). Mffnet: Multi-modal feature fusion network for vdt salient object detection. IEEE Transactions on Multimedia","DOI":"10.1109\/TMM.2023.3291823"},{"key":"2393_CR13","doi-asserted-by":"crossref","unstructured":"Broedermann, T., Sakaridis, C., Dai, D., & Van\u00a0Gool, L. (2023). Hrfuser: A multi-resolution sensor fusion architecture for 2d object detection. In: 2023 IEEE 26th International Conference on Intelligent Transportation Systems (ITSC), pp. 4159\u20134166. IEEE","DOI":"10.1109\/ITSC57777.2023.10422432"},{"key":"2393_CR14","doi-asserted-by":"crossref","unstructured":"Zhang, J., Liu, R., Shi, H., Yang, K., Rei\u00df, S., Peng, K., Fu, H., Wang, K., & Stiefelhagen, R. (2023). Delivering arbitrary-modal semantic segmentation. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 1136\u20131147","DOI":"10.1109\/CVPR52729.2023.00116"},{"key":"2393_CR15","unstructured":"Hinton, G.E., Sabour, S., & Frosst, N. (2018). Matrix capsules with em routing. In: International Conference on Learning Representations"},{"key":"2393_CR16","doi-asserted-by":"publisher","first-page":"6719","DOI":"10.1109\/TIP.2022.3215887","volume":"31","author":"Y Liu","year":"2022","unstructured":"Liu, Y., Zhang, D., Liu, N., Xu, S., & Han, J. (2022). Disentangled capsule routing for fast part-object relational saliency. IEEE Transactions on Image Processing, 31, 6719\u20136732.","journal-title":"IEEE Transactions on Image Processing"},{"key":"2393_CR17","doi-asserted-by":"crossref","unstructured":"Wang, H., Chen, Y., Ma, C., Avery, J., Hull, L., & Carneiro, G. (2023). Multi-modal learning with missing modality via shared-specific feature modelling. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 15878\u201315887","DOI":"10.1109\/CVPR52729.2023.01524"},{"key":"2393_CR18","doi-asserted-by":"crossref","unstructured":"Long, J., Shelhamer, E., & Darrell, T. (2015). Fully convolutional networks for semantic segmentation. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 3431\u20133440","DOI":"10.1109\/CVPR.2015.7298965"},{"key":"2393_CR19","doi-asserted-by":"crossref","unstructured":"Jin, Z., Gong, T., Yu, D., Chu, Q., Wang, J., Wang, C., & Shao, J. (2021). Mining contextual information beyond image for semantic segmentation. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 7231\u20137241","DOI":"10.1109\/ICCV48922.2021.00714"},{"key":"2393_CR20","doi-asserted-by":"crossref","unstructured":"Borse, S., Wang, Y., Zhang, Y., & Porikli, F. (2021). Inverseform: A loss function for structured boundary-aware segmentation. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 5901\u20135911","DOI":"10.1109\/CVPR46437.2021.00584"},{"key":"2393_CR21","doi-asserted-by":"crossref","unstructured":"Gu, J., Kwon, H., Wang, D., Ye, W., Li, M., Chen, Y.-H., Lai, L., Chandra, V., & Pan, D.Z. (2022). Multi-scale high-resolution vision transformer for semantic segmentation. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 12094\u201312103","DOI":"10.1109\/CVPR52688.2022.01178"},{"key":"2393_CR22","doi-asserted-by":"crossref","unstructured":"Hazirbas, C., Ma, L., Domokos, C., & Cremers, D. (2017). Fusenet: Incorporating depth into semantic segmentation via fusion-based cnn architecture. In: Computer Vision\u2013ACCV 2016: 13th Asian Conference on Computer Vision, Taipei, Taiwan, November 20-24, 2016, Revised Selected Papers, Part I 13, pp. 213\u2013228. Springer","DOI":"10.1007\/978-3-319-54181-5_14"},{"key":"2393_CR23","doi-asserted-by":"crossref","unstructured":"Wang, J., Wang, Z., Tao, D., See, S., & Wang, G. (2016). Learning common and specific features for rgb-d semantic segmentation with deconvolutional networks. In: Computer Vision\u2013ECCV 2016: 14th European Conference, Amsterdam, The Netherlands, October 11-14, 2016, Proceedings, Part V 14, pp. 664\u2013679. Springer","DOI":"10.1007\/978-3-319-46454-1_40"},{"key":"2393_CR24","first-page":"4835","volume":"33","author":"Y Wang","year":"2020","unstructured":"Wang, Y., Huang, W., Sun, F., Xu, T., Rong, Y., & Huang, J. (2020). Deep multimodal fusion by channel exchanging. Advances in Neural Information Processing Systems, 33, 4835\u20134845.","journal-title":"Advances in Neural Information Processing Systems"},{"key":"2393_CR25","doi-asserted-by":"publisher","first-page":"1158","DOI":"10.1109\/TMM.2023.3277281","volume":"26","author":"L Zhao","year":"2023","unstructured":"Zhao, L., Zhou, H., Zhu, X., Song, X., Li, H., & Tao, W. (2023). Lif-seg: Lidar and camera image fusion for 3d lidar semantic segmentation. IEEE Transactions on Multimedia, 26, 1158\u20131168.","journal-title":"IEEE Transactions on Multimedia"},{"key":"2393_CR26","doi-asserted-by":"crossref","unstructured":"Liu, H., Lu, T., Xu, Y., Liu, J., Li, W., & Chen, L. (2022). Camliflow: bidirectional camera-lidar fusion for joint optical flow and scene flow estimation. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 5791\u20135801","DOI":"10.1109\/CVPR52688.2022.00570"},{"key":"2393_CR27","doi-asserted-by":"crossref","unstructured":"Liang, Y., Wakaki, R., Nobuhara, S., & Nishino, K. (2022). Multimodal material segmentation. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 19800\u201319808","DOI":"10.1109\/CVPR52688.2022.01918"},{"key":"2393_CR28","doi-asserted-by":"crossref","unstructured":"Zhang, J., Liu, H., Yang, K., Hu, X., Liu, R., & Stiefelhagen, R. (2023). Cmx: Cross-modal fusion for rgb-x semantic segmentation with transformers. IEEE Transactions on Intelligent Transportation Systems","DOI":"10.1109\/TITS.2023.3300537"},{"key":"2393_CR29","doi-asserted-by":"crossref","unstructured":"Tian, X., Zhang, J., Xiang, M., & Dai, Y. (2023). Modeling the distributional uncertainty for salient object detection models. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 19660\u201319670","DOI":"10.1109\/CVPR52729.2023.01883"},{"key":"2393_CR30","doi-asserted-by":"crossref","unstructured":"Li, G., Bai, Z., Liu, Z., Zhang, X., & Ling, H. (2023). Salient object detection in optical remote sensing images driven by transformer. IEEE Transactions on Image Processing","DOI":"10.1109\/TIP.2023.3314285"},{"key":"2393_CR31","doi-asserted-by":"crossref","unstructured":"Yuan, J., Zhu, A., Xu, Q., Wattanachote, K., & Gong, Y. (2023). Ctif-net: A cnn-transformer iterative fusion network for salient object detection. IEEE Transactions on Circuits and Systems for Video Technology","DOI":"10.1109\/TCSVT.2023.3321190"},{"issue":"7","key":"2393_CR32","first-page":"3688","volume":"44","author":"Y Liu","year":"2021","unstructured":"Liu, Y., Zhang, D., Zhang, Q., & Han, J. (2021). Part-object relational visual saliency. IEEE Transactions on Pattern Analysis and Machine Intelligence, 44(7), 3688\u20133704.","journal-title":"IEEE Transactions on Pattern Analysis and Machine Intelligence"},{"key":"2393_CR33","doi-asserted-by":"crossref","unstructured":"Liu, Y., Zhou, L., Wu, G., Xu, S., & Han, J. (2023). Tcgnet: Type-correlation guidance for salient object detection. IEEE Transactions on Intelligent Transportation Systems","DOI":"10.1109\/TITS.2023.3342811"},{"key":"2393_CR34","doi-asserted-by":"publisher","first-page":"2160","DOI":"10.1109\/TIP.2023.3263111","volume":"32","author":"Z Wu","year":"2023","unstructured":"Wu, Z., Allibert, G., Meriaudeau, F., Ma, C., & Demonceaux, C. (2023). Hidanet: Rgb-d salient object detection via hierarchical depth awareness. IEEE Transactions on Image Processing, 32, 2160\u20132173.","journal-title":"IEEE Transactions on Image Processing"},{"issue":"4","key":"2393_CR35","doi-asserted-by":"publisher","first-page":"855","DOI":"10.1007\/s11263-022-01734-1","volume":"131","author":"J Li","year":"2023","unstructured":"Li, J., Ji, W., Zhang, M., Piao, Y., Lu, H., & Cheng, L. (2023). Delving into calibrated depth for accurate rgb-d salient object detection. International Journal of Computer Vision, 131(4), 855\u2013876.","journal-title":"International Journal of Computer Vision"},{"key":"2393_CR36","doi-asserted-by":"crossref","unstructured":"Xie, Z., Shao, F., Chen, G., Chen, H., Jiang, Q., Meng, X., & Ho, Y.-S. (2023). Cross-modality double bidirectional interaction and fusion network for rgb-t salient object detection. IEEE Transactions on Circuits and Systems for Video Technology","DOI":"10.1109\/TCSVT.2023.3241196"},{"key":"2393_CR37","doi-asserted-by":"crossref","unstructured":"Zhang, Z., Wang, J., & Han, Y. (2023). Saliency prototype for rgb-d and rgb-t salient object detection. In: Proceedings of the 31st ACM International Conference on Multimedia, pp. 3696\u20133705","DOI":"10.1145\/3581783.3612466"},{"issue":"3","key":"2393_CR38","doi-asserted-by":"publisher","first-page":"1558","DOI":"10.1109\/TMECH.2022.3215909","volume":"28","author":"K Song","year":"2022","unstructured":"Song, K., Wang, J., Bao, Y., Huang, L., & Yan, Y. (2022). A novel visible-depth-thermal image dataset of salient object detection for robotic visual perception. IEEE\/ASME Transactions on Mechatronics, 28(3), 1558\u20131569.","journal-title":"IEEE\/ASME Transactions on Mechatronics"},{"key":"2393_CR39","doi-asserted-by":"publisher","DOI":"10.1016\/j.patcog.2023.110074","volume":"147","author":"B Wan","year":"2024","unstructured":"Wan, B., Zhou, X., Sun, Y., Zhu, Z., Wang, H., Yan, C., et al. (2024). Tmnet: Triple-modal interaction encoder and multi-scale fusion decoder network for vdt salient object detection. Pattern Recognition, 147, 110074.","journal-title":"Pattern Recognition"},{"key":"2393_CR40","doi-asserted-by":"crossref","unstructured":"Bao, L., Zhou, X., Lu, X., Sun, Y., Yin, H., Hu, Z., Zhang, J., & Yan, C. (2024). Quality-aware selective fusion network for vdt salient object detection. IEEE Transactions on Image Processing","DOI":"10.1109\/TIP.2024.3393365"},{"key":"2393_CR41","unstructured":"Sabour, S., Frosst, N., & Hinton, G.E. (2017). Dynamic routing between capsules. Advances in neural information processing systems 30"},{"key":"2393_CR42","doi-asserted-by":"crossref","unstructured":"Pan, C., & Velipasalar, S. (2021). Pt-capsnet: A novel prediction-tuning capsule network suitable for deeper architectures. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 11996\u201312005","DOI":"10.1109\/ICCV48922.2021.01178"},{"key":"2393_CR43","doi-asserted-by":"publisher","first-page":"108486","DOI":"10.1016\/j.patcog.2021.108486","volume":"124","author":"R Shi","year":"2022","unstructured":"Shi, R., Niu, L., & Zhou, R. (2022). Sparse capsnet with explicit regularizer. Pattern Recognition, 124, 108486.","journal-title":"Pattern Recognition"},{"key":"2393_CR44","doi-asserted-by":"crossref","unstructured":"Liu, Y., Cheng, D., Zhang, D., Xu, S., & Han, J. (2024). Capsule networks with residual pose routing. IEEE Transactions on Neural Networks and Learning Systems","DOI":"10.1109\/TNNLS.2023.3347722"},{"key":"2393_CR45","doi-asserted-by":"publisher","first-page":"107851","DOI":"10.1016\/j.patcog.2021.107851","volume":"120","author":"M Jampour","year":"2021","unstructured":"Jampour, M., Abbaasi, S., & Javidi, M. (2021). Capsnet regularization and its conjugation with resnet for signature identification. Pattern Recognition, 120, 107851.","journal-title":"Pattern Recognition"},{"key":"2393_CR46","doi-asserted-by":"publisher","first-page":"5154","DOI":"10.1109\/TIFS.2021.3124734","volume":"16","author":"Y Liu","year":"2021","unstructured":"Liu, Y., Zhang, D., Zhang, Q., & Han, J. (2021). Integrating part-object relationship and contrast for camouflaged object detection. IEEE Transactions on Information Forensics and Security, 16, 5154\u20135166.","journal-title":"IEEE Transactions on Information Forensics and Security"},{"key":"2393_CR47","doi-asserted-by":"publisher","first-page":"1815","DOI":"10.1109\/TASLP.2022.3178236","volume":"30","author":"J Wu","year":"2022","unstructured":"Wu, J., Mai, S., & Hu, H. (2022). Interpretable multimodal capsule fusion. IEEE\/ACM Transactions on Audio, Speech, and Language Processing, 30, 1815\u20131826.","journal-title":"IEEE\/ACM Transactions on Audio, Speech, and Language Processing"},{"key":"2393_CR48","doi-asserted-by":"crossref","unstructured":"He, K., Zhang, X., Ren, S., & Sun, J. (2016). Deep residual learning for image recognition. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 770\u2013778","DOI":"10.1109\/CVPR.2016.90"},{"key":"2393_CR49","first-page":"12077","volume":"34","author":"E Xie","year":"2021","unstructured":"Xie, E., Wang, W., Yu, Z., Anandkumar, A., Alvarez, J. M., & Luo, P. (2021). Segformer: Simple and efficient design for semantic segmentation with transformers. Advances in Neural Information Processing Systems, 34, 12077\u201312090.","journal-title":"Advances in Neural Information Processing Systems"},{"key":"2393_CR50","doi-asserted-by":"crossref","unstructured":"Shrivastava, A., Gupta, A., & Girshick, R. (2016). Training region-based object detectors with online hard example mining. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 761\u2013769","DOI":"10.1109\/CVPR.2016.89"},{"key":"2393_CR51","doi-asserted-by":"crossref","unstructured":"Liu, Z., Lin, Y., Cao, Y., Hu, H., Wei, Y., Zhang, Z., Lin, S., & Guo, B. (2021). Swin transformer: Hierarchical vision transformer using shifted windows. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 10012\u201310022","DOI":"10.1109\/ICCV48922.2021.00986"},{"key":"2393_CR52","doi-asserted-by":"publisher","first-page":"19","DOI":"10.1007\/s10479-005-5724-z","volume":"134","author":"P-T De Boer","year":"2005","unstructured":"De Boer, P.-T., Kroese, D. P., Mannor, S., & Rubinstein, R. Y. (2005). A tutorial on the cross-entropy method. Annals of Operations Research, 134, 19\u201367.","journal-title":"Annals of Operations Research"},{"key":"2393_CR53","doi-asserted-by":"publisher","first-page":"6855","DOI":"10.1109\/TIP.2021.3099405","volume":"30","author":"J Li","year":"2021","unstructured":"Li, J., Su, J., Xia, C., Ma, M., & Tian, Y. (2021). Salient object detection with purificatory mechanism and structural similarity loss. IEEE Transactions on Image Processing, 30, 6855\u20136868.","journal-title":"IEEE Transactions on Image Processing"},{"key":"2393_CR54","doi-asserted-by":"crossref","unstructured":"Rahman, M.A., & Wang, Y. (2016). Optimizing intersection-over-union in deep neural networks for image segmentation. In: International Symposium on Visual Computing, pp. 234\u2013244. Springer","DOI":"10.1007\/978-3-319-50835-1_22"},{"issue":"7","key":"2393_CR55","doi-asserted-by":"publisher","first-page":"4486","DOI":"10.1109\/TCSVT.2021.3127149","volume":"32","author":"Z Liu","year":"2021","unstructured":"Liu, Z., Tan, Y., He, Q., & Xiao, Y. (2021). Swinnet: Swin transformer drives edge-aware rgb-d and rgb-t salient object detection. IEEE Transactions on Circuits and Systems for Video Technology, 32(7), 4486\u20134497.","journal-title":"IEEE Transactions on Circuits and Systems for Video Technology"},{"key":"2393_CR56","doi-asserted-by":"crossref","unstructured":"Fan, D.-P., Cheng, M.-M., Liu, Y., Li, T., & Borji, A. (2017). Structure-measure: A new way to evaluate foreground maps. In: Proceedings of the IEEE International Conference on Computer Vision, pp. 4548\u20134557","DOI":"10.1109\/ICCV.2017.487"},{"key":"2393_CR57","doi-asserted-by":"crossref","unstructured":"Achanta, R., Hemami, S., Estrada, F., & Susstrunk, S. (2009). Frequency-tuned salient region detection. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 1597\u20131604","DOI":"10.1109\/CVPR.2009.5206596"},{"key":"2393_CR58","doi-asserted-by":"crossref","unstructured":"Fan, D.-P., Gong, C., Cao, Y., Ren, B., Cheng, M.-M., & Borji, A. (2018). Enhanced-alignment measure for binary foreground map evaluation. In: Proceedings of the International Joint Conference on Artificial Intelligence, pp. 698\u2013704","DOI":"10.24963\/ijcai.2018\/97"},{"key":"2393_CR59","doi-asserted-by":"crossref","unstructured":"Wu, Z., Su, L., & Huang, Q. (2019). Cascaded partial decoder for fast and accurate salient object detection. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 3907\u20133916","DOI":"10.1109\/CVPR.2019.00403"},{"key":"2393_CR60","doi-asserted-by":"crossref","unstructured":"Chen, S., Tan, X., Wang, B., & Hu, X. (2018). Reverse attention for salient object detection. In: Proceedings of the European Conference on Computer Vision (ECCV), pp. 234\u2013250","DOI":"10.1007\/978-3-030-01240-3_15"},{"key":"2393_CR61","doi-asserted-by":"crossref","unstructured":"Fan, D.-P., Zhai, Y., Borji, A., Yang, J., & Shao, L. (2020). Bbs-net: Rgb-d salient object detection with a bifurcated backbone strategy network. In: European Conference on Computer Vision, pp. 275\u2013292. Springer","DOI":"10.1007\/978-3-030-58610-2_17"},{"key":"2393_CR62","doi-asserted-by":"publisher","first-page":"7012","DOI":"10.1109\/TIP.2020.3028289","volume":"30","author":"Z Chen","year":"2020","unstructured":"Chen, Z., Cong, R., Xu, Q., & Huang, Q. (2020). Dpanet: Depth potentiality-aware gated attention network for rgb-d salient object detection. IEEE Transactions on Image Processing, 30, 7012\u20137024.","journal-title":"IEEE Transactions on Image Processing"},{"issue":"3","key":"2393_CR63","doi-asserted-by":"publisher","first-page":"4309","DOI":"10.1109\/TNNLS.2022.3202241","volume":"35","author":"Q Chen","year":"2022","unstructured":"Chen, Q., Zhang, Z., Lu, Y., Fu, K., & Zhao, Q. (2022). 3-d convolutional neural networks for rgb-d salient object detection and beyond. IEEE Transactions on Neural Networks and Learning Systems, 35(3), 4309\u20134323.","journal-title":"IEEE Transactions on Neural Networks and Learning Systems"},{"key":"2393_CR64","doi-asserted-by":"crossref","unstructured":"Tang, B., Liu, Z., Tan, Y., & He, Q. (2022). Hrtransnet: Hrformer-driven two-modality salient object detection. IEEE Transactions on Circuits and Systems for Video Technology, 33(2), 728\u2013742.","DOI":"10.1109\/TCSVT.2022.3202563"},{"issue":"5","key":"2393_CR65","doi-asserted-by":"publisher","first-page":"2949","DOI":"10.1109\/TCSVT.2021.3099120","volume":"32","author":"J Wang","year":"2021","unstructured":"Wang, J., Song, K., Bao, Y., Huang, L., & Yan, Y. (2021). Cgfnet: Cross-guided fusion network for rgb-t salient object detection. IEEE Transactions on Circuits and Systems for Video Technology, 32(5), 2949\u20132961.","journal-title":"IEEE Transactions on Circuits and Systems for Video Technology"},{"key":"2393_CR66","doi-asserted-by":"publisher","first-page":"3752","DOI":"10.1109\/TIP.2022.3176540","volume":"31","author":"Z Tu","year":"2022","unstructured":"Tu, Z., Li, Z., Li, C., & Tang, J. (2022). Weakly alignment-free rgbt salient object detection with deep correlation network. IEEE Transactions on Image Processing, 31, 3752\u20133764.","journal-title":"IEEE Transactions on Image Processing"},{"key":"2393_CR67","doi-asserted-by":"publisher","first-page":"1329","DOI":"10.1109\/TIP.2023.3242775","volume":"32","author":"W Zhou","year":"2023","unstructured":"Zhou, W., Zhu, Y., Lei, J., Yang, R., & Yu, L. (2023). Lsnet: Lightweight spatial boosting network for detecting salient objects in rgb-thermal images. IEEE Transactions on Image Processing, 32, 1329\u20131340.","journal-title":"IEEE Transactions on Image Processing"},{"key":"2393_CR68","doi-asserted-by":"crossref","unstructured":"Yang, D., Chen, Z., Wang, Y., Wang, S., Li, M., Liu, S., Zhao, X., Huang, S., Dong, Z., Zhai, P., et al. (2023). Context de-confounded emotion recognition. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 19005\u201319015","DOI":"10.1109\/CVPR52729.2023.01822"},{"issue":"17","key":"2393_CR69","doi-asserted-by":"publisher","first-page":"26731","DOI":"10.1007\/s11042-022-14305-w","volume":"82","author":"M Rana","year":"2023","unstructured":"Rana, M., & Bhushan, M. (2023). Machine learning and deep learning approach for medical image analysis: diagnosis to detection. Multimedia Tools and Applications, 82(17), 26731\u201326769.","journal-title":"Multimedia Tools and Applications"},{"key":"2393_CR70","unstructured":"Alman, J., & Song, Z. (2024). Fast attention requires bounded entries. Advances in Neural Information Processing Systems 36"},{"key":"2393_CR71","doi-asserted-by":"crossref","unstructured":"Agarwal, A., & Arora, C. (2023). Attention attention everywhere: Monocular depth prediction with skip attention. In: Proceedings of the IEEE\/CVF Winter Conference on Applications of Computer Vision, pp. 5861\u20135870","DOI":"10.1109\/WACV56688.2023.00581"}],"container-title":["International Journal of Computer Vision"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11263-025-02393-8.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s11263-025-02393-8\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11263-025-02393-8.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,6,7]],"date-time":"2025-06-07T06:03:36Z","timestamp":1749276216000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s11263-025-02393-8"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,3,6]]},"references-count":71,"journal-issue":{"issue":"7","published-print":{"date-parts":[[2025,7]]}},"alternative-id":["2393"],"URL":"https:\/\/doi.org\/10.1007\/s11263-025-02393-8","relation":{},"ISSN":["0920-5691","1573-1405"],"issn-type":[{"value":"0920-5691","type":"print"},{"value":"1573-1405","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025,3,6]]},"assertion":[{"value":"31 July 2024","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"8 February 2025","order":2,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"6 March 2025","order":3,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}}]}}