{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,27]],"date-time":"2026-03-27T07:24:00Z","timestamp":1774596240132,"version":"3.50.1"},"reference-count":44,"publisher":"Elsevier BV","license":[{"start":{"date-parts":[[2026,8,1]],"date-time":"2026-08-01T00:00:00Z","timestamp":1785542400000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.elsevier.com\/tdm\/userlicense\/1.0\/"},{"start":{"date-parts":[[2026,8,1]],"date-time":"2026-08-01T00:00:00Z","timestamp":1785542400000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.elsevier.com\/legal\/tdmrep-license"},{"start":{"date-parts":[[2026,8,1]],"date-time":"2026-08-01T00:00:00Z","timestamp":1785542400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-017"},{"start":{"date-parts":[[2026,8,1]],"date-time":"2026-08-01T00:00:00Z","timestamp":1785542400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"},{"start":{"date-parts":[[2026,8,1]],"date-time":"2026-08-01T00:00:00Z","timestamp":1785542400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-012"},{"start":{"date-parts":[[2026,8,1]],"date-time":"2026-08-01T00:00:00Z","timestamp":1785542400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2026,8,1]],"date-time":"2026-08-01T00:00:00Z","timestamp":1785542400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-004"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62371350"],"award-info":[{"award-number":["62371350"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62471338"],"award-info":[{"award-number":["62471338"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62276192"],"award-info":[{"award-number":["62276192"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["elsevier.com","sciencedirect.com"],"crossmark-restriction":true},"short-container-title":["Information Fusion"],"published-print":{"date-parts":[[2026,8]]},"DOI":"10.1016\/j.inffus.2026.104212","type":"journal-article","created":{"date-parts":[[2026,2,7]],"date-time":"2026-02-07T23:36:48Z","timestamp":1770507408000},"page":"104212","update-policy":"https:\/\/doi.org\/10.1016\/elsevier_cm_policy","source":"Crossref","is-referenced-by-count":0,"special_numbering":"C","title":["CMVF: Cross-modal unregistered video fusion via spatio-temporal consistency"],"prefix":"10.1016","volume":"132","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-0772-7929","authenticated-orcid":false,"given":"Jianfeng","family":"Ding","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0001-5467-3428","authenticated-orcid":false,"given":"Hao","family":"Zhang","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-9796-488X","authenticated-orcid":false,"given":"Zhongyuan","family":"Wang","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-5403-1895","authenticated-orcid":false,"given":"Jinsheng","family":"Xiao","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-1993-2708","authenticated-orcid":false,"given":"Xin","family":"Tian","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-1862-4781","authenticated-orcid":false,"given":"Zhen","family":"Han","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-3264-3265","authenticated-orcid":false,"given":"Jiayi","family":"Ma","sequence":"additional","affiliation":[]}],"member":"78","reference":[{"key":"10.1016\/j.inffus.2026.104212_bib0001","series-title":"Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition","first-page":"7099","article-title":"Task-customized mixture of adapters for general image fusion","author":"Zhu","year":"2024"},{"key":"10.1016\/j.inffus.2026.104212_bib0002","series-title":"Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition","first-page":"26384","article-title":"Probing synergistic high-Order interaction in infrared and visible image fusion","author":"Zheng","year":"2024"},{"key":"10.1016\/j.inffus.2026.104212_bib0003","series-title":"Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition","first-page":"27026","article-title":"Text-if: leveraging semantic text guidance for degradation-aware and interactive image fusion","author":"Yi","year":"2024"},{"key":"10.1016\/j.inffus.2026.104212_bib0004","doi-asserted-by":"crossref","first-page":"5806","DOI":"10.1109\/TPAMI.2024.3367905","article-title":"Rethinking the effectiveness of objective evaluation metrics in multi-focus image fusion: a statistic-based approach","volume":"46","author":"Liu","year":"2024","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"10.1016\/j.inffus.2026.104212_bib0005","doi-asserted-by":"crossref","DOI":"10.1016\/j.inffus.2024.102450","article-title":"Diff-IF: multi-modality image fusion via diffusion model with fusion knowledge prior","volume":"110","author":"Yi","year":"2024","journal-title":"Inf. Fusion"},{"key":"10.1016\/j.inffus.2026.104212_bib0006","doi-asserted-by":"crossref","DOI":"10.1016\/j.inffus.2023.102147","article-title":"Crossfuse: a novel cross attention mechanism based infrared and visible image fusion approach","volume":"103","author":"Li","year":"2024","journal-title":"Inf. Fusion"},{"key":"10.1016\/j.inffus.2026.104212_bib0007","doi-asserted-by":"crossref","first-page":"40","DOI":"10.1016\/j.inffus.2020.08.022","article-title":"MFF-GAN: an unsupervised generative adversarial network with adaptive and gradient joint constraints for multi-focus image fusion","volume":"66","author":"Zhang","year":"2021","journal-title":"Inf. Fusion"},{"key":"10.1016\/j.inffus.2026.104212_bib0008","doi-asserted-by":"crossref","first-page":"185","DOI":"10.1016\/j.inffus.2022.09.019","article-title":"Current advances and future perspectives of image fusion: a comprehensive review","volume":"90","author":"Karim","year":"2023","journal-title":"Inf. Fusion"},{"key":"10.1016\/j.inffus.2026.104212_bib0009","doi-asserted-by":"crossref","DOI":"10.1016\/j.inffus.2023.102148","article-title":"Reciprocal transformer for hyperspectral and multispectral image fusion","volume":"104","author":"Ma","year":"2024","journal-title":"Inf. Fusion"},{"key":"10.1016\/j.inffus.2026.104212_bib0010","doi-asserted-by":"crossref","first-page":"5281","DOI":"10.1109\/TPAMI.2024.3368112","article-title":"A general spatial-frequency learning framework for multimodal image fusion","volume":"47","author":"Zhou","year":"2024","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"10.1016\/j.inffus.2026.104212_bib0011","series-title":"Proceedings of the IEEE International Conference on Computer Vision","first-page":"4714","article-title":"Deepfuse: a deep unsupervised approach for exposure fusion with extreme exposure image pairs","author":"Ram Prabhakar","year":"2017"},{"issue":"5","key":"10.1016\/j.inffus.2026.104212_bib0012","doi-asserted-by":"crossref","first-page":"2614","DOI":"10.1109\/TIP.2018.2887342","article-title":"Densefuse: a fusion approach to infrared and visible images","volume":"28","author":"Li","year":"2018","journal-title":"IEEE Trans. Image Process."},{"key":"10.1016\/j.inffus.2026.104212_bib0013","doi-asserted-by":"crossref","first-page":"11","DOI":"10.1016\/j.inffus.2018.09.004","article-title":"FusionGAN: a generative adversarial network for infrared and visible image fusion","volume":"48","author":"Ma","year":"2019","journal-title":"Inf. Fusion"},{"issue":"5","key":"10.1016\/j.inffus.2026.104212_bib0014","doi-asserted-by":"crossref","first-page":"1625","DOI":"10.1007\/s11263-023-01948-x","article-title":"A deep learning framework for infrared and visible image fusion without strict registration","volume":"132","author":"Li","year":"2024","journal-title":"Int. J. Comput. Vis."},{"key":"10.1016\/j.inffus.2026.104212_bib0015","series-title":"Proceedings of the AAAI Conference on Artificial Intelligence","first-page":"2126","article-title":"Transmef: a transformer-based multi-exposure image fusion framework using self-supervised multi-task learning","volume":"36","author":"Qu","year":"2022"},{"issue":"7","key":"10.1016\/j.inffus.2026.104212_bib0016","doi-asserted-by":"crossref","first-page":"1200","DOI":"10.1109\/JAS.2022.105686","article-title":"SwinFusion: cross-domain long-range learning for general image fusion via swin transformer","volume":"9","author":"Ma","year":"2022","journal-title":"IEEE\/CAA J. Autom. Sin."},{"key":"10.1016\/j.inffus.2026.104212_bib0017","series-title":"Proceedings of the IEEE\/CVF International Conference on Computer Vision","first-page":"8082","article-title":"DDFM: denoising diffusion model for multi-modality image fusion","author":"Zhao","year":"2023"},{"key":"10.1016\/j.inffus.2026.104212_bib0018","series-title":"The Thirty-ninth Annual Conference on Neural Information Processing Systems","first-page":"1","article-title":"Revisiting generative infrared and visible image fusion based on human cognitive laws","author":"Guo","year":"2025"},{"key":"10.1016\/j.inffus.2026.104212_bib0019","series-title":"Proceedings of the Computer Vision and Pattern Recognition Conference","first-page":"7457","article-title":"Task-driven image fusion with learnable fusion loss","author":"Bai","year":"2025"},{"key":"10.1016\/j.inffus.2026.104212_bib0020","series-title":"Proceedings of the Computer Vision and Pattern Recognition Conference","first-page":"2226","article-title":"DCEvo: discriminative cross-dimensional evolutionary learning for infrared and visible image fusion","author":"Liu","year":"2025"},{"key":"10.1016\/j.inffus.2026.104212_bib0021","series-title":"2023 International Conference on Decision Aid Sciences and Applications (DASA)","first-page":"12","article-title":"Vowels\u2019 articulatory location classification based on formant frequency","author":"Abdulaziz","year":"2023"},{"key":"10.1016\/j.inffus.2026.104212_bib0022","doi-asserted-by":"crossref","first-page":"3673","DOI":"10.1109\/TPAMI.2025.3535617","article-title":"MulFS-CAP: multimodal fusion-supervised cross-modality alignment perception for unregistered infrared-visible image fusion","volume":"47","author":"Li","year":"2025","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"10.1016\/j.inffus.2026.104212_bib0023","doi-asserted-by":"crossref","first-page":"11031","DOI":"10.1109\/TMM.2024.3443673","article-title":"RCVS: a unified registration and fusion framework for video streams","volume":"26","author":"Xie","year":"2024","journal-title":"IEEE Trans. Multimed."},{"key":"10.1016\/j.inffus.2026.104212_bib0024","series-title":"The Thirty-ninth Annual Conference on Neural Information Processing Systems","first-page":"1","article-title":"A unified solution to video fusion: from multi-frame learning to benchmarking","author":"Zhao","year":"2025"},{"key":"10.1016\/j.inffus.2026.104212_bib0025","series-title":"Proceedings of the IEEE\/CVF International Conference on Computer Vision","first-page":"14326","article-title":"Temcoco: temporally consistent multi-modal video fusion with visual-semantic collaboration","author":"Gong","year":"2025"},{"key":"10.1016\/j.inffus.2026.104212_bib0026","series-title":"Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition","first-page":"4967","article-title":"Learning temporal consistency for low light video enhancement from single images","author":"Zhang","year":"2021"},{"key":"10.1016\/j.inffus.2026.104212_bib0027","series-title":"Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition","first-page":"1881","article-title":"Self-supervised learning via conditional motion propagation","author":"Zhan","year":"2019"},{"key":"10.1016\/j.inffus.2026.104212_bib0028","series-title":"Proceedings of the IEEE\/CVF International Conference on Computer Vision","first-page":"3496","article-title":"LLVIP: a visible-infrared paired dataset for low-light vision","author":"Jia","year":"2021"},{"key":"10.1016\/j.inffus.2026.104212_bib0029","unstructured":"L. Tang, Y. Wang, M. Gong, Z. Li, Y. Deng, X. Yi, C. Li, H. Xu, H. Zhang, J. Ma, VideoFusion: a spatio-temporal collaborative network for multi-modal video fusion and restoration, arXiv preprint arXiv: 2503.23359(2025)."},{"key":"10.1016\/j.inffus.2026.104212_bib0030","series-title":"2017 IEEE\/RSJ International Conference on Intelligent Robots and Systems (IROS)","first-page":"5108","article-title":"MFNet: towards real-time semantic segmentation for autonomous vehicles with multi-spectral scenes","author":"Ha","year":"2017"},{"issue":"1","key":"10.1016\/j.inffus.2026.104212_bib0031","doi-asserted-by":"crossref","first-page":"502","DOI":"10.1109\/TPAMI.2020.3012548","article-title":"U2Fusion: a unified unsupervised image fusion network","volume":"44","author":"Xu","year":"2020","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"10.1016\/j.inffus.2026.104212_bib0032","doi-asserted-by":"crossref","first-page":"153","DOI":"10.1016\/j.inffus.2018.02.004","article-title":"Infrared and visible image fusion methods and applications: a survey","volume":"45","author":"Ma","year":"2019","journal-title":"Inf. Fusion"},{"issue":"12","key":"10.1016\/j.inffus.2026.104212_bib0033","doi-asserted-by":"crossref","first-page":"2959","DOI":"10.1109\/26.477498","article-title":"Image quality measures and their performance","volume":"43","author":"Eskicioglu","year":"1995","journal-title":"IEEE Trans. Commun."},{"issue":"7","key":"10.1016\/j.inffus.2026.104212_bib0034","doi-asserted-by":"crossref","first-page":"1","DOI":"10.1049\/el:20020212","article-title":"Information measure for performance of image fusion","volume":"38","author":"Qu","year":"2002","journal-title":"Electron. Lett."},{"issue":"2","key":"10.1016\/j.inffus.2026.104212_bib0035","doi-asserted-by":"crossref","first-page":"127","DOI":"10.1016\/j.inffus.2011.08.002","article-title":"A new image fusion performance metric based on visual information fidelity","volume":"14","author":"Han","year":"2013","journal-title":"Inf. Fusion"},{"issue":"12","key":"10.1016\/j.inffus.2026.104212_bib0036","doi-asserted-by":"crossref","first-page":"1890","DOI":"10.1016\/j.aeue.2015.09.004","article-title":"A new image quality metric for image fusion: the sum of the correlations of differences","volume":"69","author":"Aslantas","year":"2015","journal-title":"AEU - Int. J. Electron. Commun."},{"key":"10.1016\/j.inffus.2026.104212_bib0037","series-title":"2014 IEEE 8th International Conference on Application of Information and Communication Technologies (AICT)","first-page":"1","article-title":"Fast-FMI: non-reference image fusion metric","author":"Haghighat","year":"2014"},{"issue":"9","key":"10.1016\/j.inffus.2026.104212_bib0038","doi-asserted-by":"crossref","first-page":"11040","DOI":"10.1109\/TPAMI.2023.3268209","article-title":"Lrrnet: a novel representation learning guided fusion network for infrared and visible images","volume":"45","author":"Li","year":"2023","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"10.1016\/j.inffus.2026.104212_bib0039","doi-asserted-by":"crossref","first-page":"80","DOI":"10.1016\/j.inffus.2022.11.010","article-title":"MUFusion: a general unsupervised image fusion network based on memory unit","volume":"92","author":"Cheng","year":"2023","journal-title":"Inf. Fusion"},{"key":"10.1016\/j.inffus.2026.104212_bib0040","doi-asserted-by":"crossref","DOI":"10.1016\/j.inffus.2024.102655","article-title":"DDBFusion: an unified image decomposition and fusion framework based on dual decomposition and B\u00e9zier curves","volume":"114","author":"Zhang","year":"2025","journal-title":"Inf. Fusion"},{"key":"10.1016\/j.inffus.2026.104212_bib0041","series-title":"Proceedings of the Computer Vision and Pattern Recognition Conference","first-page":"28102","article-title":"One model for all: low-level task interaction is a key to task-agnostic image fusion","author":"Cheng","year":"2025"},{"key":"10.1016\/j.inffus.2026.104212_bib0042","series-title":"International Conference on Data Intelligence and Cognitive Informatics","first-page":"529","article-title":"A review on yolov8 and its advancements","author":"Sohan","year":"2024"},{"key":"10.1016\/j.inffus.2026.104212_bib0043","doi-asserted-by":"crossref","first-page":"2632","DOI":"10.1109\/TIFS.2025.3533907","article-title":"Towards efficient and certified recovery from poisoning attacks in federated learning","volume":"20","author":"Jiang","year":"2025","journal-title":"IEEE Trans. Inf. Forensics Secur."},{"key":"10.1016\/j.inffus.2026.104212_bib0044","series-title":"2024 IEEE International Conference on Big Data (BigData)","first-page":"7822","article-title":"Efficient federated unlearning with adaptive differential privacy preservation","author":"Jiang","year":"2024"}],"container-title":["Information Fusion"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/api.elsevier.com\/content\/article\/PII:S1566253526000916?httpAccept=text\/xml","content-type":"text\/xml","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/api.elsevier.com\/content\/article\/PII:S1566253526000916?httpAccept=text\/plain","content-type":"text\/plain","content-version":"vor","intended-application":"text-mining"}],"deposited":{"date-parts":[[2026,3,27]],"date-time":"2026-03-27T06:05:58Z","timestamp":1774591558000},"score":1,"resource":{"primary":{"URL":"https:\/\/linkinghub.elsevier.com\/retrieve\/pii\/S1566253526000916"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2026,8]]},"references-count":44,"alternative-id":["S1566253526000916"],"URL":"https:\/\/doi.org\/10.1016\/j.inffus.2026.104212","relation":{},"ISSN":["1566-2535"],"issn-type":[{"value":"1566-2535","type":"print"}],"subject":[],"published":{"date-parts":[[2026,8]]},"assertion":[{"value":"Elsevier","name":"publisher","label":"This article is maintained by"},{"value":"CMVF: Cross-modal unregistered video fusion via spatio-temporal consistency","name":"articletitle","label":"Article Title"},{"value":"Information Fusion","name":"journaltitle","label":"Journal Title"},{"value":"https:\/\/doi.org\/10.1016\/j.inffus.2026.104212","name":"articlelink","label":"CrossRef DOI link to publisher maintained version"},{"value":"article","name":"content_type","label":"Content Type"},{"value":"\u00a9 2026 Elsevier B.V. All rights are reserved, including those for text and data mining, AI training, and similar technologies.","name":"copyright","label":"Copyright"}],"article-number":"104212"}}