{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,9,11]],"date-time":"2025-09-11T19:06:52Z","timestamp":1757617612584,"version":"3.44.0"},"reference-count":52,"publisher":"Springer Science and Business Media LLC","issue":"10","license":[{"start":{"date-parts":[[2025,3,15]],"date-time":"2025-03-15T00:00:00Z","timestamp":1741996800000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2025,3,15]],"date-time":"2025-03-15T00:00:00Z","timestamp":1741996800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"crossref","award":["62406202","62406202","62406202","62406202","62406202"],"award-info":[{"award-number":["62406202","62406202","62406202","62406202","62406202"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"crossref"}]},{"DOI":"10.13039\/501100005047","name":"Natural Science Foundation of Liaoning Province","doi-asserted-by":"publisher","award":["2024-BS-098","2024-BS-098","2024-BS-098","2024-BS-098","2024-BS-098","2021- KF-12-01"],"award-info":[{"award-number":["2024-BS-098","2024-BS-098","2024-BS-098","2024-BS-098","2024-BS-098","2021- KF-12-01"]}],"id":[{"id":"10.13039\/501100005047","id-type":"DOI","asserted-by":"publisher"}]},{"name":"Young Teacher Training Fund of Shenyang University of Technology","award":["200005847","200005847","200005847","200005847","200005847"],"award-info":[{"award-number":["200005847","200005847","200005847","200005847","200005847"]}]},{"DOI":"10.13039\/501100014206","name":"Foundation of National Key Laboratory","doi-asserted-by":"crossref","award":["OEIP-O-202005"],"award-info":[{"award-number":["OEIP-O-202005"]}],"id":[{"id":"10.13039\/501100014206","id-type":"DOI","asserted-by":"crossref"}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Vis Comput"],"published-print":{"date-parts":[[2025,8]]},"DOI":"10.1007\/s00371-025-03855-3","type":"journal-article","created":{"date-parts":[[2025,3,15]],"date-time":"2025-03-15T09:39:46Z","timestamp":1742031586000},"page":"8055-8073","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":2,"title":["Enhanced RGB-T saliency detection via thermal-guided multi-stage attention network"],"prefix":"10.1007","volume":"41","author":[{"given":"Yu","family":"Pang","sequence":"first","affiliation":[]},{"given":"Yang","family":"Huang","sequence":"additional","affiliation":[]},{"given":"Chenyu","family":"Weng","sequence":"additional","affiliation":[]},{"given":"Jialin","family":"Lyu","sequence":"additional","affiliation":[]},{"given":"Chuanyue","family":"Bai","sequence":"additional","affiliation":[]},{"given":"Xiaosheng","family":"Yu","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2025,3,15]]},"reference":[{"key":"3855_CR1","doi-asserted-by":"publisher","unstructured":"Ali, S.G., Wang, X., Li, P., Li, H., Yang, P., Jung, Y., Qin, J., Kim, J., Sheng, B.: EGDNet: an efficient glomerular detection network for multiple anomalous pathological feature in glomerulonephritis. Vis. Comput., https:\/\/doi.org\/10.1007\/s00371-024-03570-5(2024)","DOI":"10.1007\/s00371-024-03570-5(2024)"},{"issue":"6","key":"3855_CR2","doi-asserted-by":"publisher","first-page":"2825","DOI":"10.1109\/TIP.2019.2891104","volume":"28","author":"H Chen","year":"2019","unstructured":"Chen, H., Li, Y.: Three-stream attention-aware network for RGB-D salient object detection. IEEE Trans. Image Process. 28(6), 2825\u20132835 (2019)","journal-title":"IEEE Trans. Image Process."},{"key":"3855_CR3","doi-asserted-by":"publisher","first-page":"376","DOI":"10.1016\/j.patcog.2018.08.007","volume":"86","author":"H Chen","year":"2019","unstructured":"Chen, H., Li, Y., Su, D.: Multi-modal fusion network with multi-scale multi-path and cross-modal interactions for RGB-D salient object detection. Pattern Recognit. 86, 376\u2013385 (2019)","journal-title":"Pattern Recognit."},{"key":"3855_CR4","doi-asserted-by":"crossref","unstructured":"Cheng, Y., Fu, H., Wei, X., Xiao, J., Cao, X.: Depth enhanced saliency detection method. In: Proc. Int Conf. Int. Multimedia. Comput Serv., p. 23 (2014)","DOI":"10.1145\/2632856.2632866"},{"key":"3855_CR5","unstructured":"Dosovitskiy, A., Beyer, L., Kolesnikov, A., Weissenborn, D., Zhai, X., Unterthiner, T., Dehghani, M., Minderer, M., Heigold, G., Gelly, S., Uszkoreit, J., Houlsby, N.: An image is worth 16x16 words:Transformers for image recognition at scale. In: Proc. Int Conf. Learn Represent., pp. 1\u201322 (2021)"},{"key":"3855_CR6","doi-asserted-by":"publisher","first-page":"6033","DOI":"10.1007\/s00371-023-03151-y","volume":"40","author":"Y Endo","year":"2024","unstructured":"Endo, Y.: Masked-attention diffusion guidance for spatially controlling text-to-image generation. Vis. Comput. 40, 6033\u20136045 (2024)","journal-title":"Vis. Comput."},{"key":"3855_CR7","doi-asserted-by":"crossref","unstructured":"Fu, K., Fan, D., Ji, G., Zhao, Q.: JL-DCF: Joint learning and densely-cooperative fusion framework for RGB-D salient object detection. In: Proc. IEEE Conf. Comput. Vis. Pattern Recognit., pp. 3049\u20133059 (2020)","DOI":"10.1109\/CVPR42600.2020.00312"},{"issue":"3","key":"3855_CR8","doi-asserted-by":"publisher","first-page":"1366","DOI":"10.1109\/TCSVT.2021.3069812","volume":"32","author":"L Huang","year":"2022","unstructured":"Huang, L., Song, K., Wang, J., Niu, M., Yan, Y.: Multi-graph fusion and learning for RGBT image saliency fusion. IEEE Trans. Circuits Syst. Video Technol. 32(3), 1366\u20131377 (2022)","journal-title":"IEEE Trans. Circuits Syst. Video Technol."},{"key":"3855_CR9","doi-asserted-by":"publisher","first-page":"2512512","DOI":"10.1109\/TIM.2022.3185323","volume":"71","author":"F Huo","year":"2022","unstructured":"Huo, F., Zhu, X., Zhang, Q., Liu, Z., Yu, W.: Real-time one-stream semantic-guided refinement network for RGB-Thermal salient object detection. IEEE Trans. Instrum Measur. 71, 2512512 (2022)","journal-title":"IEEE Trans. Instrum Measur."},{"key":"3855_CR10","doi-asserted-by":"crossref","unstructured":"Ju, R., Ge, L., Geng, W., Ren, T., Wu, G.: Depth saliency based on anisotropic center-surround difference. In: Proc Int Conf. Image Process., pp. 1115\u20131119 (2014)","DOI":"10.1109\/ICIP.2014.7025222"},{"key":"3855_CR11","first-page":"131","volume":"52","author":"C Li","year":"2018","unstructured":"Li, C., Wang, G., Ma, Y., Zheng, A., Luo, B., Tang, J.: RGB-T saliency detection benchmark: dataset, baselines, analysis and a novel approach. Proc. Chin Conf. Image. Graph Technol. 52, 131\u2013142 (2018)","journal-title":"Proc. Chin Conf. Image. Graph Technol."},{"key":"3855_CR12","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2024.3413598","author":"G Li","year":"2024","unstructured":"Li, G., Fang, C., Chen, Z., Mao, M., Lin, L.: Uncertainty-aware active domain adaptive salient object detection. IEEE Trans. Image Process. (2024). https:\/\/doi.org\/10.1109\/TIP.2024.3413598","journal-title":"IEEE Trans. Image Process."},{"key":"3855_CR13","doi-asserted-by":"crossref","unstructured":"Lin, T., Dollar, P., Girshick, R., He, K., Hariharan, B., Belongie, S.: Feature pyramid networks for object detection. In: Proc. IEEE Conf. Comput. Vis. Pattern Recognit., pp. 2117\u20132125 (2017)","DOI":"10.1109\/CVPR.2017.106"},{"key":"3855_CR14","doi-asserted-by":"crossref","unstructured":"Liu, J., Hou, Q., Cheng, M., Feng, J., Jiang, J.: A simple pooling-based design for real-time salient object detection, in Proc. IEEE Conf. Comput. Vis. Pattern Recognit., pp. 3912-3921 (2019)","DOI":"10.1109\/CVPR.2019.00404"},{"key":"3855_CR15","doi-asserted-by":"crossref","unstructured":"Liu, N., Zhang, N., Han, J.: Learning selective self-mutual attention for RGB-D saliency detection. In: Proc. IEEE Conf. Comput. Vis. Pattern Recognit., pp. 13756\u201313765 (2020)","DOI":"10.1109\/CVPR42600.2020.01377"},{"issue":"4","key":"3855_CR16","doi-asserted-by":"publisher","first-page":"846","DOI":"10.1109\/TMM.2019.2934426","volume":"22","author":"D Liu","year":"2019","unstructured":"Liu, D., An, P., Ma, R., Zhan, W., Huang, X., Yahya, A.: Content-based light field image compression method with Gaussion process regression. IEEE Trans. Multimedia 22(4), 846\u2013859 (2019)","journal-title":"IEEE Trans. Multimedia"},{"issue":"4","key":"3855_CR17","doi-asserted-by":"publisher","first-page":"1083","DOI":"10.1109\/TMI.2022.3223683","volume":"42","author":"R Liu","year":"2022","unstructured":"Liu, R., Wang, T., Li, H., Zhang, P., Li, J., Yang, X., Shen, D., Sheng, B.: TMM-Nets: transferred multi-to mono-modal generation for lupus retinopathy diagnosis. IEEE Trans. Med. Imag. 42(4), 1083\u20131094 (2022)","journal-title":"IEEE Trans. Med. Imag."},{"issue":"11","key":"3855_CR18","doi-asserted-by":"publisher","first-page":"7300","DOI":"10.1109\/TPAMI.2024.3388153","volume":"46","author":"N Liu","year":"2024","unstructured":"Liu, N., Luo, Z., Zhang, N., Han, J.: VST++: Efficient and stronger visual salincy transformer. IEEE Trans. Pattern Anal. Mach. Intell. 46(11), 7300\u20137316 (2024)","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"3855_CR19","doi-asserted-by":"crossref","unstructured":"Ma, Y., Sun, D., Meng, Q., Ding, Z., Li, C.: Learning multiscale deep features and SVM regressors for adaptive RGB-T saliency detection. In: Proc. Int. Symp. Comput. Intell. Design., pp. 389\u2013392 (2017)","DOI":"10.1109\/ISCID.2017.92"},{"key":"3855_CR20","doi-asserted-by":"publisher","first-page":"1026","DOI":"10.1109\/TIP.2022.3232209","volume":"32","author":"M Ma","year":"2023","unstructured":"Ma, M., Xia, C., Xie, C., Chen, X., Li, J.: Boosting broader receptive fields for salient object detection. IEEE Trans. Image Process. 32, 1026\u20131038 (2023)","journal-title":"IEEE Trans. Image Process."},{"key":"3855_CR21","doi-asserted-by":"publisher","DOI":"10.1016\/j.patcog.2023.109630","volume":"141","author":"F Meng","year":"2023","unstructured":"Meng, F., Gong, X., Zhang, Y.: SiamRank: a siamse based visual tracking network with ranking strategy. Pattern Recognit. 141, 109630 (2023)","journal-title":"Pattern Recognit."},{"key":"3855_CR22","doi-asserted-by":"publisher","first-page":"892","DOI":"10.1109\/TIP.2023.3234702","volume":"32","author":"Y Pang","year":"2023","unstructured":"Pang, Y., Zhao, X., Zhang, L., Lu, H.: CAVER: cross-modal view-mixed Transformer for bi-modal salient object detection. IEEE Trans. Image Porcess. 32, 892\u2013904 (2023)","journal-title":"IEEE Trans. Image Porcess."},{"key":"3855_CR23","doi-asserted-by":"publisher","DOI":"10.1016\/j.patcog.2022.109138","volume":"135","author":"Y Pang","year":"2023","unstructured":"Pang, Y., Wu, H., Wu, C.: Cross-modal co-feedback cellular automata for RGB-T saliency detection. Pattern Recognit. 135, 109138 (2023)","journal-title":"Pattern Recognit."},{"key":"3855_CR24","doi-asserted-by":"publisher","first-page":"2189","DOI":"10.1109\/TMM.2022.3144070","volume":"25","author":"Y Pang","year":"2023","unstructured":"Pang, Y., Wu, C., Wu, H., Yu, X.: Unsupervised multi-subclass saliency classification for salient object detection. IEEE Trans. Multimedia 25, 2189\u20132202 (2023)","journal-title":"IEEE Trans. Multimedia"},{"key":"3855_CR25","doi-asserted-by":"publisher","first-page":"1959","DOI":"10.1007\/s00371-022-02458-6","volume":"39","author":"Y Pang","year":"2023","unstructured":"Pang, Y., Wu, C., Wu, H., Yu, X.: Over-sampling strategy-based class-imbalanced salient object detection and its application in underwater scene. Vis Comput. 39, 1959\u20131974 (2023)","journal-title":"Vis Comput."},{"issue":"1","key":"3855_CR26","doi-asserted-by":"publisher","first-page":"24","DOI":"10.1007\/s11263-007-0110-8","volume":"81","author":"S Paris","year":"2009","unstructured":"Paris, S., Durand, F.: A fast approximation of the bilateral filter using a signal processing approach. In. J. Comput. Vis. 81(1), 24\u201352 (2009)","journal-title":"In. J. Comput. Vis."},{"key":"3855_CR27","doi-asserted-by":"crossref","unstructured":"Peng, H., Li, B., Xiong, W., Hu, W., Ji, R.: RGBD salient object detection: A benchmark and algorithms. In: Proc. Eur. Comput. Vis., pp. 92\u2013109 (2014)","DOI":"10.1007\/978-3-319-10578-9_7"},{"key":"3855_CR28","unstructured":"Simonyan, K., Zisserman, A.: Very deep convolutional networks for large-scale image recognition. In: Proc. Int. Conf. Learn. Representat., pp. 1\u201314 (2015)"},{"issue":"7","key":"3855_CR29","doi-asserted-by":"publisher","first-page":"3104","DOI":"10.1109\/TCSVT.2022.3233131","volume":"33","author":"K Song","year":"2023","unstructured":"Song, K., Huang, L., Gong, A., Yan, Y.: Multiple graph affinity interactive network and a variable illumination dataset for RGBT image salient object detection. IEEE Trans. Circuits Syst. Video Technol. 33(7), 3104\u20133118 (2023)","journal-title":"IEEE Trans. Circuits Syst. Video Technol."},{"issue":"12","key":"3855_CR30","doi-asserted-by":"publisher","first-page":"4421","DOI":"10.1109\/TCSVT.2019.2951621","volume":"30","author":"J Tang","year":"2020","unstructured":"Tang, J., Fan, D., Wang, X., Tu, Z., Li, C.: RGBT salient object detection: benchmark and a novel cooperative ranking approach. IEEE Trans. Circuits Syst. Video Technol. 30(12), 4421\u20134433 (2020)","journal-title":"IEEE Trans. Circuits Syst. Video Technol."},{"key":"3855_CR31","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2024.3511621","author":"H Tang","year":"2024","unstructured":"Tang, H., Li, Z., Zhang, D., He, S., Tang, J.: Divide-and-conquer: modality-aware triple-decoder network for robust RGB-T salient object detection. IEEE Trans. Pattern Anal. Mach. Intell. (2024). https:\/\/doi.org\/10.1109\/TPAMI.2024.3511621","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"3855_CR32","doi-asserted-by":"crossref","unstructured":"Tu, Z., Li, Z., Li, C., Lang, Y., Tang, J.: Multi-interactive Encoder-decoder Network for RGBT Salient Object Detection. (2020) arXiv preprint arXiv:2005.02315","DOI":"10.1109\/TIP.2021.3087412"},{"key":"3855_CR33","unstructured":"Tu, Z., Ma, Y., Li, Z., Li, C., Xu, J., Liu, Y.: RGBT salient object detection: A large-scale dataset and benchmark. (2020) arXiv preprint arXiv:2007.03262"},{"issue":"1","key":"3855_CR34","doi-asserted-by":"publisher","first-page":"160","DOI":"10.1109\/TMM.2019.2924578","volume":"22","author":"Z Tu","year":"2020","unstructured":"Tu, Z., Xia, T., Li, C., Wang, X., Ma, Y., Tang, J.: RGB-T image saliency detection via collaborative graph learning. IEEE Trans. Multimedia 22(1), 160\u2013173 (2020)","journal-title":"IEEE Trans. Multimedia"},{"key":"3855_CR35","unstructured":"Vaswani, A., Shazeer, N., Parmar, N., Uszkoreit, J., Jones, L., Gomez, A N., Kaiser, L., Polosukhin, I.: Attention is all you need. In: Proc. Neur Info. Process Sys., pp. 1\u201311 (2017)"},{"key":"3855_CR36","doi-asserted-by":"crossref","unstructured":"Wang, L., Lu, H., Xiang, R., Yang, M-H.: Deep networks for saliency detection via local estimation and global search. In: Proc. IEEE Conf. Comput. Vis. Pattern Recognit., pp. 3183\u20133192 (2015)","DOI":"10.1109\/CVPR.2015.7298938"},{"key":"3855_CR37","doi-asserted-by":"publisher","first-page":"55277","DOI":"10.1109\/ACCESS.2019.2913107","volume":"7","author":"N Wang","year":"2019","unstructured":"Wang, N., Gong, X.: Adaptive fusion for RGB-D salient objection. IEEE Access. 7, 55277\u201355284 (2019)","journal-title":"IEEE Access."},{"key":"3855_CR38","doi-asserted-by":"crossref","unstructured":"Wang, W., Shen, J., Dong, X., Yang, R.: Inferring salient objects from human fixations. IEEE Trans. Pattern Anal. Mach. Intell. 42(8), 1913\u20131927 (2020)","DOI":"10.1109\/TPAMI.2019.2905607"},{"issue":"6","key":"3855_CR39","doi-asserted-by":"publisher","first-page":"3239","DOI":"10.1109\/TPAMI.2021.3051099","volume":"44","author":"W Wang","year":"2022","unstructured":"Wang, W., Lai, Q., Fu, H., Shen, J., Ling, H., Yang, R.: Salient object detection in the deep learning era: an in-depth survey. IEEE Trans. Pattern Anal. Mach. Intell. 44(6), 3239\u20133259 (2022)","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"3855_CR40","doi-asserted-by":"publisher","DOI":"10.1016\/j.engappai.2022.105640","volume":"118","author":"H Wang","year":"2023","unstructured":"Wang, H., Song, K., Huang, L., Wen, H., Yan, Y.: Thermal images-aware guided early fusion network for cross-illumination RGB-T salient object detection. Engineer Applicat. Art. Intell. 118, 105640 (2023)","journal-title":"Engineer Applicat. Art. Intell."},{"key":"3855_CR41","doi-asserted-by":"publisher","first-page":"1711","DOI":"10.1007\/s00371-023-02881-3","volume":"40","author":"Z Xiang","year":"2023","unstructured":"Xiang, Z., Zhu, C., Qian, M., Shen, Y., Shao, Y.: FashionSegNet: a model for high-precision semantic segmentation of clothing images. Vis Comput. 40, 1711\u20131727 (2023)","journal-title":"Vis Comput."},{"issue":"8","key":"3855_CR42","doi-asserted-by":"publisher","first-page":"4499","DOI":"10.1109\/TNNLS.2021.3116209","volume":"34","author":"Z Xie","year":"2021","unstructured":"Xie, Z., Zhang, W., Sheng, B., Li, P., Chen, C.L.P.: BaGFN: broad attentive graph fusion network for high-order feature interactions. IEEE Trans. Neur Netw. Learn. Syst. 34(8), 4499\u20134513 (2021)","journal-title":"IEEE Trans. Neur Netw. Learn. Syst."},{"key":"3855_CR43","doi-asserted-by":"publisher","first-page":"4337","DOI":"10.1007\/s00371-023-03085-5","volume":"40","author":"X Yu","year":"2024","unstructured":"Yu, X., Pang, Y., Chi, J., Qi, Q.: Cross-modal collaborative propagation for RGB-T saliency detection. The Vis. Comput. 40, 4337\u20134354 (2024)","journal-title":"The Vis. Comput."},{"key":"3855_CR44","doi-asserted-by":"publisher","first-page":"3321","DOI":"10.1109\/TIP.2019.2959253","volume":"29","author":"Q Zhang","year":"2020","unstructured":"Zhang, Q., Huang, N., Yao, L., Zhang, D., Shan, C., Han, J.: RGB-T salient object detection via fusing multi-level CNN features. IEEE Trans. Image Process. 29, 3321\u20133335 (2020)","journal-title":"IEEE Trans. Image Process."},{"key":"3855_CR45","doi-asserted-by":"crossref","unstructured":"Zhao, J., Cao, Y., Fan, D., Cheng, M., Li, X., Zhang, L.: Contrast prior and fluid pyramid integration for RGBD salient object detection. In: Proc. IEEE Conf. Comput. Vis. Pattern Recognit., pp. 3922\u20133931 (2019)","DOI":"10.1109\/CVPR.2019.00405"},{"key":"3855_CR46","doi-asserted-by":"crossref","unstructured":"Zhao, X., Pang, Y., Zhang, L., Lu, H., Zhang, L.: Suppress and balance: A simple gated network for salient object detection. In: Proc. Eur. Comput. Vis., pp. 35\u201351 (2020)","DOI":"10.1007\/978-3-030-58536-5_3"},{"key":"3855_CR47","doi-asserted-by":"crossref","unstructured":"Zhao, X., Zhang, L., Pang, Y., Lu, H., Zhang, L.: A single stream network for robust and real-time RGB-D salient object detection. In: Proc. Eur. Comput. Vis., pp. 646\u2013662 (2020)","DOI":"10.1007\/978-3-030-58542-6_39"},{"issue":"3","key":"3855_CR48","doi-asserted-by":"publisher","first-page":"1224","DOI":"10.1109\/TCSVT.2021.3077058","volume":"32","author":"W Zhou","year":"2022","unstructured":"Zhou, W., Guo, Q., Lei, J., Yu, L., Hwang, J.-N.: ECFFNet: Effective and consistent feature fusion network for RGB-T salient object detection. IEEE Trans. Circuits Syst. Video Technol. 32(3), 1224\u20131235 (2022)","journal-title":"IEEE Trans. Circuits Syst. Video Technol."},{"issue":"4","key":"3855_CR49","doi-asserted-by":"publisher","first-page":"957","DOI":"10.1109\/TETCI.2021.3118043","volume":"6","author":"W Zhou","year":"2022","unstructured":"Zhou, W., Zhu, Y., Lei, J., Wan, J., Yu, L.: APNet: adversarial-learning-assistance and perceived importance fusion network for all-day RGB-T salient object detection. IEEE Trans. Emerg Top. Comput Intell. 6(4), 957\u2013968 (2022)","journal-title":"IEEE Trans. Emerg Top. Comput Intell."},{"key":"3855_CR50","doi-asserted-by":"publisher","first-page":"2593","DOI":"10.1109\/TIP.2023.3270801","volume":"32","author":"H Zhou","year":"2023","unstructured":"Zhou, H., Tian, C., Zhang, Z., Li, C., Ding, Y., Xie, Y., Li, Z.: Position-aware relation learning for RGB-thermal salient object detection. IEEE Trans. Image Process. 32, 2593\u20132607 (2023)","journal-title":"IEEE Trans. Image Process."},{"issue":"12","key":"3855_CR51","doi-asserted-by":"publisher","first-page":"7696","DOI":"10.1109\/TCSVT.2023.3278410","volume":"33","author":"X Zhou","year":"2023","unstructured":"Zhou, X., Wu, S., Shi, R., Zheng, B., Wang, S., Yin, H., Zhang, J., Yan, C.: Transformer-based multi-scale feature integration network for video saliency prediction. IEEE Trans. Circuits Syst. Video Technol. 33(12), 7696\u20137707 (2023)","journal-title":"IEEE Trans. Circuits Syst. Video Technol."},{"key":"3855_CR52","doi-asserted-by":"crossref","unstructured":"Zhu, C., Cai, X., Huang, K., Li, T., Li, G.: PDNet: Prior-model guided depth-enhanced network for salient object detection. In: Proc. IEEE Int. Conf. Multimedia Expo., pp. 199\u2013204 (2019)","DOI":"10.1109\/ICME.2019.00042"}],"container-title":["The Visual Computer"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s00371-025-03855-3.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s00371-025-03855-3\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s00371-025-03855-3.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,9,6]],"date-time":"2025-09-06T07:49:06Z","timestamp":1757144946000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s00371-025-03855-3"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,3,15]]},"references-count":52,"journal-issue":{"issue":"10","published-print":{"date-parts":[[2025,8]]}},"alternative-id":["3855"],"URL":"https:\/\/doi.org\/10.1007\/s00371-025-03855-3","relation":{},"ISSN":["0178-2789","1432-2315"],"issn-type":[{"type":"print","value":"0178-2789"},{"type":"electronic","value":"1432-2315"}],"subject":[],"published":{"date-parts":[[2025,3,15]]},"assertion":[{"value":"20 February 2025","order":1,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"15 March 2025","order":2,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors declare no competing interests.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}}]}}