{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,9,11]],"date-time":"2025-09-11T20:48:11Z","timestamp":1757623691108,"version":"3.44.0"},"reference-count":52,"publisher":"Springer Science and Business Media LLC","issue":"9","license":[{"start":{"date-parts":[[2025,6,17]],"date-time":"2025-06-17T00:00:00Z","timestamp":1750118400000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2025,6,17]],"date-time":"2025-06-17T00:00:00Z","timestamp":1750118400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62202205"],"award-info":[{"award-number":["62202205"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"name":"National Key Research and Development Program of China under Grant","award":["2023YFF1105102","2023YFF1105105"],"award-info":[{"award-number":["2023YFF1105102","2023YFF1105105"]}]},{"DOI":"10.13039\/501100012476","name":"Fundamental Research Funds for Central Universities of the Central South University","doi-asserted-by":"publisher","award":["JUSRP123030"],"award-info":[{"award-number":["JUSRP123030"]}],"id":[{"id":"10.13039\/501100012476","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Int J Comput Vis"],"published-print":{"date-parts":[[2025,9]]},"DOI":"10.1007\/s11263-025-02507-2","type":"journal-article","created":{"date-parts":[[2025,6,17]],"date-time":"2025-06-17T13:36:08Z","timestamp":1750167368000},"page":"6611-6635","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["OCCO: LVM-Guided Infrared and Visible Image Fusion Framework Based on Object-Aware and Contextual Contrastive Learning"],"prefix":"10.1007","volume":"133","author":[{"ORCID":"https:\/\/orcid.org\/0000-0003-4550-7879","authenticated-orcid":false,"given":"Hui","family":"Li","sequence":"first","affiliation":[]},{"given":"Congcong","family":"Bian","sequence":"additional","affiliation":[]},{"given":"Zeyang","family":"Zhang","sequence":"additional","affiliation":[]},{"given":"Xiaoning","family":"Song","sequence":"additional","affiliation":[]},{"given":"Xi","family":"Li","sequence":"additional","affiliation":[]},{"given":"Xiao-Jun","family":"Wu","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2025,6,17]]},"reference":[{"key":"2507_CR1","doi-asserted-by":"crossref","unstructured":"Chen, L., Zhu, Y., Papandreou, G., Schroff, F., & Adam, H. (2018). Encoder-decoder with atrous separable convolution for semantic image segmentation. CoRR arXiv:abs\/1802.02611.","DOI":"10.1007\/978-3-030-01234-2_49"},{"key":"2507_CR2","doi-asserted-by":"publisher","first-page":"199","DOI":"10.1016\/j.optcom.2014.12.032","volume":"341","author":"G Cui","year":"2015","unstructured":"Cui, G., Feng, H., Xu, Z., Li, Q., & Chen, Y. (2015). Detail preserved fusion of visible and infrared images using regional saliency extraction and multi-scale image decomposition. Optics Communications, 341, 199\u2013209.","journal-title":"Optics Communications"},{"issue":"12","key":"2507_CR3","doi-asserted-by":"publisher","first-page":"2959","DOI":"10.1109\/26.477498","volume":"43","author":"A Eskicioglu","year":"1995","unstructured":"Eskicioglu, A., & Fisher, P. (1995). Image quality measures and their performance. IEEE Transactions on Communications, 43(12), 2959\u20132965.","journal-title":"IEEE Transactions on Communications"},{"issue":"10","key":"2507_CR4","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1007\/s13042-024-02162-y","volume":"15","author":"S Huang","year":"2024","unstructured":"Huang, S., Wu, X., Yang, Y., Wan, W., & Wang, X. (2024). A dual-encoder network based on multi-layer feature fusion for infrared and visible image fusion. International Journal of Machine Learning and Cybernetics, 15(10), 1\u201310.","journal-title":"International Journal of Machine Learning and Cybernetics"},{"key":"2507_CR5","doi-asserted-by":"crossref","unstructured":"Hwang, S., Park, J., Kim, N., Choi, Y., & So Kweon, I. (2015). Multispectral pedestrian detection: Benchmark dataset and baseline. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR).","DOI":"10.1109\/CVPR.2015.7298706"},{"key":"2507_CR6","doi-asserted-by":"publisher","first-page":"401","DOI":"10.1016\/j.inffus.2023.02.014","volume":"95","author":"DK Jain","year":"2023","unstructured":"Jain, D. K., Zhao, X., Gonz lez-Almagro, G., Gan, C., & Kotecha, K. (2023). Multimodal pedestrian detection using metaheuristics with deep convolutional neural network in crowded scenes. Information Fusion, 95, 401\u2013414.","journal-title":"Information Fusion"},{"key":"2507_CR7","unstructured":"Jocher, G. (2020). Ultralytics yolov5. https:\/\/github.com\/ultralytics\/yolov5."},{"key":"2507_CR8","doi-asserted-by":"crossref","unstructured":"Kirillov, A., Mintun, E., Ravi, N., Mao, H., Rolland, C., Gustafson, L., Xiao, T., Whitehead, S., Berg, A. C., Lo, W. Y., & Doll\u00e1r, P. (2023). Segment anything. 2023 IEEE\/CVF International Conference on Computer Vision pp 3992\u20134003.","DOI":"10.1109\/ICCV51070.2023.00371"},{"issue":"5","key":"2507_CR9","doi-asserted-by":"publisher","first-page":"2614","DOI":"10.1109\/TIP.2018.2887342","volume":"28","author":"H Li","year":"2019","unstructured":"Li, H., & Wu, X. J. (2019). DenseFuse: A Fusion Approach to Infrared and Visible Images. IEEE Transactions on Image Processing, 28(5), 2614\u20132623.","journal-title":"IEEE Transactions on Image Processing"},{"key":"2507_CR10","doi-asserted-by":"publisher","DOI":"10.1016\/j.inffus.2023.102147","volume":"103","author":"H Li","year":"2024","unstructured":"Li, H., & Wu, X. J. (2024). Crossfuse: A novel cross attention mechanism based infrared and visible image fusion approach. Information Fusion, 103, Article 102147.","journal-title":"Information Fusion"},{"issue":"12","key":"2507_CR11","doi-asserted-by":"publisher","first-page":"9645","DOI":"10.1109\/TIM.2020.3005230","volume":"69","author":"H Li","year":"2020","unstructured":"Li, H., Wu, X. J., & Durrani, T. (2020). Nestfuse: An infrared and visible image fusion architecture based on nest connection and spatial\/channel attention models. IEEE Transactions on Instrumentation and Measurement, 69(12), 9645\u20139656.","journal-title":"IEEE Transactions on Instrumentation and Measurement"},{"key":"2507_CR12","doi-asserted-by":"publisher","first-page":"72","DOI":"10.1016\/j.inffus.2021.02.023","volume":"73","author":"H Li","year":"2021","unstructured":"Li, H., Wu, X. J., & Kittler, J. (2021). Rfn-nest: An end-to-end residual fusion network for infrared and visible images. Information Fusion, 73, 72\u201386.","journal-title":"Information Fusion"},{"key":"2507_CR13","doi-asserted-by":"publisher","first-page":"1625","DOI":"10.1007\/s11263-023-01948-x","volume":"132","author":"H Li","year":"2023","unstructured":"Li, H., Liu, J., Zhang, Y., & Liu, Y. (2023a). A deep learning framework for infrared and visible image fusion without strict registration. International Journal of Computer Vision, 132, 1625\u20131644.","journal-title":"International Journal of Computer Vision"},{"issue":"9","key":"2507_CR14","doi-asserted-by":"publisher","first-page":"11040","DOI":"10.1109\/TPAMI.2023.3268209","volume":"45","author":"H Li","year":"2023","unstructured":"Li, H., Xu, T., Wu, X. J., Lu, J., & Kittler, J. (2023b). Lrrnet: A novel representation learning guided fusion network for infrared and visible images. IEEE Transactions on Pattern Analysis and Machine Intelligence, 45(9), 11040\u201311052.","journal-title":"IEEE Transactions on Pattern Analysis and Machine Intelligence"},{"issue":"7","key":"2507_CR15","doi-asserted-by":"publisher","first-page":"2864","DOI":"10.1109\/TIP.2013.2244222","volume":"22","author":"S Li","year":"2013","unstructured":"Li, S., Kang, X., & Hu, J. (2013). Image fusion with guided filtering. IEEE Transactions on Image Processing, 22(7), 2864\u20132875.","journal-title":"IEEE Transactions on Image Processing"},{"key":"2507_CR16","doi-asserted-by":"publisher","first-page":"94","DOI":"10.1016\/j.infrared.2017.04.018","volume":"83","author":"C Liu","year":"2017","unstructured":"Liu, C., Qi, Y., & Ding, W. (2017a). Infrared and visible image fusion method based on saliency detection in sparse domain. Infrared Physics & Technology, 83, 94\u2013102.","journal-title":"Infrared Physics & Technology"},{"key":"2507_CR17","doi-asserted-by":"crossref","unstructured":"Liu, J., Fan, X., Huang, Z., Wu, G., Liu, R., Zhong, W., & Luo, Z. (2022) Target-aware dual adversarial learning and a multi-scenario multi-modality benchmark to fuse infrared and visible for object detection. Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition pp 5802\u20135811.","DOI":"10.1109\/CVPR52688.2022.00571"},{"key":"2507_CR18","doi-asserted-by":"crossref","unstructured":"Liu, J., Lin, R., Wu, G., Liu, R., Luo, Z., & Fan, X. (2023a) Coconet: Coupled contrastive learning network with multi-level feature ensemble for multi-modality image fusion. International Journal of Computer Vision p 1 28.","DOI":"10.1007\/s11263-023-01952-1"},{"key":"2507_CR19","doi-asserted-by":"crossref","unstructured":"Liu, J., Liu, Z., Wu, G., Ma, L., Liu, R., Zhong, W., ... Fan, X. (2023b) Multi-interactive feature learning and a full-time multi-modality benchmark for image fusion and segmentation. Proceedings of the IEEE\/CVF international conference on computer vision pp 8115\u20138124","DOI":"10.1109\/ICCV51070.2023.00745"},{"key":"2507_CR20","doi-asserted-by":"publisher","first-page":"237","DOI":"10.1016\/j.inffus.2023.02.027","volume":"95","author":"J Liu","year":"2023","unstructured":"Liu, J., Wu, G., Luan, J., Jiang, Z., Liu, R., & Fan, X. (2023c). Holoco: Holistic and local contrastive learning network for multi-exposure image fusion. Information Fusion, 95, 237\u2013249.","journal-title":"Information Fusion"},{"key":"2507_CR21","unstructured":"Liu, J., Wu, G., Liu, Z., Wang, D., Jiang, Z., Ma, L., ... Fan, X. (2024a). Infrared and visible image fusion: From data compatibility to task adaption. IEEE Transactions on Pattern Analysis and Machine Intelligence pp 1\u201320."},{"key":"2507_CR22","doi-asserted-by":"crossref","unstructured":"Liu, S., & Deng, W. (2015). Very deep convolutional neural network based image classification using small training sample size. 2015 3rd IAPR Asian Conference on Pattern Recognition pp 730\u2013734.","DOI":"10.1109\/ACPR.2015.7486599"},{"key":"2507_CR23","doi-asserted-by":"crossref","unstructured":"Liu, S., Zeng, Z., Ren, T., Li, F., Zhang, H., Yang, J., ... Zhang, L. (2023d) Grounding dino: Marrying dino with grounded pre-training for open-set object detection. arXiv preprint arXiv:2303.05499.","DOI":"10.1007\/978-3-031-72970-6_3"},{"key":"2507_CR24","doi-asserted-by":"publisher","DOI":"10.1016\/j.inffus.2024.102352","volume":"108","author":"X Liu","year":"2024","unstructured":"Liu, X., Huo, H., Li, J., Pang, S., & Zheng, B. (2024b). A semantic-driven coupled network for infrared and visible image fusion. Information Fusion, 108, Article 102352.","journal-title":"Information Fusion"},{"key":"2507_CR25","doi-asserted-by":"publisher","first-page":"147","DOI":"10.1016\/j.inffus.2014.09.004","volume":"24","author":"Y Liu","year":"2015","unstructured":"Liu, Y., Liu, S., & Wang, Z. (2015). A general framework for image fusion based on multi-scale transform and sparse representation. Information Fusion, 24, 147\u2013164.","journal-title":"Information Fusion"},{"key":"2507_CR26","doi-asserted-by":"publisher","first-page":"191","DOI":"10.1016\/j.inffus.2016.12.001","volume":"36","author":"Y Liu","year":"2017","unstructured":"Liu, Y., Chen, X., Peng, H., & Wang, Z. (2017b). Multi-focus image fusion with a deep convolutional neural network. Information Fusion, 36, 191\u2013207.","journal-title":"Information Fusion"},{"key":"2507_CR27","doi-asserted-by":"publisher","first-page":"71","DOI":"10.1016\/j.inffus.2020.06.013","volume":"64","author":"Y Liu","year":"2020","unstructured":"Liu, Y., Wang, L., Cheng, J., Li, C., & Chen, X. (2020). Multi-focus image fusion: A survey of the state of the art. Information Fusion, 64, 71\u201391.","journal-title":"Information Fusion"},{"key":"2507_CR28","doi-asserted-by":"publisher","first-page":"153","DOI":"10.1016\/j.inffus.2018.02.004","volume":"45","author":"J Ma","year":"2019","unstructured":"Ma, J., Ma, Y., & Li, C. (2019a). Infrared and visible image fusion methods and applications: A survey. Information Fusion, 45, 153\u2013178.","journal-title":"Information Fusion"},{"key":"2507_CR29","doi-asserted-by":"publisher","first-page":"11","DOI":"10.1016\/j.inffus.2018.09.004","volume":"48","author":"J Ma","year":"2019","unstructured":"Ma, J., Yu, W., Liang, P., Li, C., & Jiang, J. (2019b). FusionGAN: A generative adversarial network for infrared and visible image fusion. Information Fusion, 48, 11\u201326.","journal-title":"Information Fusion"},{"key":"2507_CR30","doi-asserted-by":"publisher","first-page":"4980","DOI":"10.1109\/TIP.2020.2977573","volume":"29","author":"J Ma","year":"2020","unstructured":"Ma, J., Xu, H., Jiang, J., Mei, X., & Zhang, X. P. (2020). Ddcgan: A dual-discriminator conditional generative adversarial network for multi-resolution image fusion. IEEE Transactions on Image Processing, 29, 4980\u20134995.","journal-title":"IEEE Transactions on Image Processing"},{"issue":"7","key":"2507_CR31","doi-asserted-by":"publisher","first-page":"1200","DOI":"10.1109\/JAS.2022.105686","volume":"9","author":"J Ma","year":"2022","unstructured":"Ma, J., Tang, L., Fan, F., Huang, J., Mei, X., & Ma, Y. (2022). Swinfusion: Cross-domain long-range learning for general image fusion via swin transformer. IEEE\/CAA Journal of Automatica Sinica, 9(7), 1200\u20131217.","journal-title":"IEEE\/CAA Journal of Automatica Sinica"},{"key":"2507_CR32","doi-asserted-by":"crossref","unstructured":"Mechrez, R., Talmi, I., & Zelnik-Manor, L. (2018). The contextual loss for image transformation with non-aligned data. In: Proceedings of the European conference on computer vision, pp 768\u2013783.","DOI":"10.1007\/978-3-030-01264-9_47"},{"issue":"9","key":"2507_CR33","doi-asserted-by":"publisher","first-page":"1855","DOI":"10.1016\/j.patcog.2004.03.010","volume":"37","author":"G Pajares","year":"2004","unstructured":"Pajares, G., & Manuel de la Cruz, J. (2004). A wavelet-based image fusion tutorial. Pattern Recognition, 37(9), 1855\u20131872.","journal-title":"Pattern Recognition"},{"key":"2507_CR34","doi-asserted-by":"publisher","DOI":"10.1117\/1.2945910","volume":"2","author":"J Roberts","year":"2008","unstructured":"Roberts, J., van Aardt, J., & Ahmed, F. B. (2008). Assessment of image fusion procedures using entropy, image quality, and multispectral classification. Journal of Applied Remote Sensing, 2, Article 023522.","journal-title":"Journal of Applied Remote Sensing"},{"key":"2507_CR35","doi-asserted-by":"publisher","first-page":"28","DOI":"10.1016\/j.inffus.2021.12.004","volume":"82","author":"L Tang","year":"2022","unstructured":"Tang, L., Yuan, J., & Ma, J. (2022). Image fusion in the loop of high-level vision tasks: A semantic-aware real-time infrared and visible image fusion network. Information Fusion, 82, 28\u201342.","journal-title":"Information Fusion"},{"key":"2507_CR36","doi-asserted-by":"publisher","DOI":"10.1016\/j.inffus.2023.101870","volume":"99","author":"L Tang","year":"2023","unstructured":"Tang, L., Zhang, H., Xu, H., & Ma, J. (2023). Rethinking the necessity of image fusion in high-level vision tasks: A practical infrared and visible image fusion network based on progressive semantic injection and scene fidelity. Information Fusion, 99, Article 101870.","journal-title":"Information Fusion"},{"key":"2507_CR37","unstructured":"Toet, A. (2014). TNO Image Fusion Dataset. https:\/\/figshare.com\/articles\/TN_Image_Fusion_Dataset\/1008029."},{"issue":"4","key":"2507_CR38","doi-asserted-by":"publisher","first-page":"600","DOI":"10.1109\/TIP.2003.819861","volume":"13","author":"Z Wang","year":"2004","unstructured":"Wang, Z., Bovik, A., Sheikh, H., & Simoncelli, E. P. (2004). Image quality assessment: from error visibility to structural similarity. IEEE Transactions on Image Processing, 13(4), 600\u2013612.","journal-title":"IEEE Transactions on Image Processing"},{"key":"2507_CR39","doi-asserted-by":"publisher","first-page":"2529","DOI":"10.1007\/s11263-023-01806-w","volume":"131","author":"Z Wang","year":"2023","unstructured":"Wang, Z., Li, X., Zhao, L., Duan, H., Wang, S., Liu, H., & Zhang, X. (2023). When multi-focus image fusion networks meet traditional edge-preservation technology. International Journal of Computer Vision, 131, 2529\u20132552.","journal-title":"International Journal of Computer Vision"},{"key":"2507_CR40","doi-asserted-by":"crossref","unstructured":"Xie, H., Zhang, Y., Qiu, J., Zhai, X., Liu, X., Yang, Y., & Zhong, J. (2023). Semantics lead all: Towards unified image registration and fusion from a semantic perspective. Information Fusion, 98, Article 101835.","DOI":"10.1016\/j.inffus.2023.101835"},{"key":"2507_CR41","doi-asserted-by":"crossref","unstructured":"Xu, H., Ma, J., Jiang, J., Guo, X., & Ling, H. (2020). U2fusion: A unified unsupervised image fusion network. IEEE Transactions on Pattern Analysis and Machine Intelligence, 44(1), 502\u2013518.","DOI":"10.1109\/TPAMI.2020.3012548"},{"key":"2507_CR42","doi-asserted-by":"publisher","first-page":"2761","DOI":"10.1007\/s11263-021-01501-8","volume":"129","author":"H Zhang","year":"2021","unstructured":"Zhang, H., & Ma, J. (2021). Sdnet: A versatile squeeze-and-decomposition network for real-time image fusion. International Journal of Computer Vision, 129, 2761\u20132785.","journal-title":"International Journal of Computer Vision"},{"issue":"12","key":"2507_CR43","doi-asserted-by":"publisher","first-page":"14679","DOI":"10.1109\/TITS.2023.3300537","volume":"24","author":"J Zhang","year":"2023","unstructured":"Zhang, J., Liu, H., Yang, K., Hu, X., Liu, R., & Stiefelhagen, R. (2023a). Cmx: Cross-modal fusion for rgb-x semantic segmentation with transformers. IEEE Transactions on Intelligent Transportation Systems, 24(12), 14679\u201314694.","journal-title":"IEEE Transactions on Intelligent Transportation Systems"},{"issue":"9","key":"2507_CR44","doi-asserted-by":"publisher","first-page":"2714","DOI":"10.1007\/s11263-021-01495-3","volume":"129","author":"P Zhang","year":"2021","unstructured":"Zhang, P., Wang, D., Lu, H., & Yang, X. (2021). Learning adaptive attribute-driven representation for real-time rgb-t tracking. International Journal of Computer Vision, 129(9), 2714\u20132729.","journal-title":"International Journal of Computer Vision"},{"key":"2507_CR45","doi-asserted-by":"crossref","unstructured":"Zhang, T., Guo, H., Jiao, Q., Zhang, Q., & Han, J. (2023b). Efficient rgb-t tracking via cross-modality distillation. In: 2023 IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp 5404\u20135413.","DOI":"10.1109\/CVPR52729.2023.00523"},{"key":"2507_CR46","doi-asserted-by":"crossref","unstructured":"Zhang, X., Chen, Q., Ng, R., & Koltun, V. (2019). Zoom to learn, learn to zoom. In: 2019 IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp 3757\u20133765.","DOI":"10.1109\/CVPR.2019.00388"},{"key":"2507_CR47","doi-asserted-by":"publisher","DOI":"10.1016\/j.inffus.2023.101863","volume":"99","author":"X Zhang","year":"2023","unstructured":"Zhang, X., Zhai, H., Liu, J., Wang, Z., & Sun, H. (2023c). Real-time infrared and visible image fusion network using adaptive pixel weighting strategy. Information Fusion, 99, Article 101863.","journal-title":"Information Fusion"},{"key":"2507_CR48","doi-asserted-by":"crossref","unstructured":"Zhao, W., Xie, S., Zhao, F., He, Y., & Lu, H. (2023a) Metafusion: Infrared and visible image fusion via meta-feature embedding from object detection. In: 2023 IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp 13955\u201313965.","DOI":"10.1109\/CVPR52729.2023.01341"},{"key":"2507_CR49","doi-asserted-by":"crossref","unstructured":"Zhao, Z., Bai, H., Zhang, J., Zhang, Y., Xu, S., Lin, Z., ... Van Gool, L. (2023b). Cddfuse: Correlation-driven dual-branch feature decomposition for multi-modality image fusion. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp 5906\u20135916.","DOI":"10.1109\/CVPR52729.2023.00572"},{"issue":"12","key":"2507_CR50","doi-asserted-by":"publisher","first-page":"5582","DOI":"10.1007\/s11263-024-02141-4","volume":"132","author":"Z Zheng","year":"2024","unstructured":"Zheng, Z., Zhong, Y., Ma, A., & Zhang, L. (2024). Single-temporal supervised learning for universal remote sensing change detection. International Journal of Computer Vision, 132(12), 5582\u20135602.","journal-title":"International Journal of Computer Vision"},{"key":"2507_CR51","doi-asserted-by":"crossref","unstructured":"Zhou, T., & Wang, W. (2024). Cross-image pixel contrasting for semantic segmentation. IEEE Transactions on Pattern Analysis and Machine Intelligence pp 1\u201315.","DOI":"10.1109\/TPAMI.2024.3367952"},{"key":"2507_CR52","first-page":"1","volume":"71","author":"Z Zhu","year":"2022","unstructured":"Zhu, Z., Yang, X., Lu, R., Shen, T., Xie, X., & Zhang, T. (2022). Clf-net: Contrastive learning for infrared and visible image fusion network. IEEE Transactions on Instrumentation and Measurement, 71, 1\u201315.","journal-title":"IEEE Transactions on Instrumentation and Measurement"}],"container-title":["International Journal of Computer Vision"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11263-025-02507-2.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s11263-025-02507-2\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11263-025-02507-2.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,9,9]],"date-time":"2025-09-09T08:05:54Z","timestamp":1757405154000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s11263-025-02507-2"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,6,17]]},"references-count":52,"journal-issue":{"issue":"9","published-print":{"date-parts":[[2025,9]]}},"alternative-id":["2507"],"URL":"https:\/\/doi.org\/10.1007\/s11263-025-02507-2","relation":{},"ISSN":["0920-5691","1573-1405"],"issn-type":[{"type":"print","value":"0920-5691"},{"type":"electronic","value":"1573-1405"}],"subject":[],"published":{"date-parts":[[2025,6,17]]},"assertion":[{"value":"23 September 2024","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"9 June 2025","order":2,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"17 June 2025","order":3,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"27 June 2025","order":4,"name":"change_date","label":"Change Date","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"Update","order":5,"name":"change_type","label":"Change Type","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"The acknowledgment has been corrected","order":6,"name":"change_details","label":"Change Details","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors declare that they have no known competing financial interests or personal relationships that could have appeared to influence the work reported in this paper.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}}]}}