{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,5,8]],"date-time":"2025-05-08T04:48:38Z","timestamp":1746679718401,"version":"3.40.3"},"publisher-location":"Singapore","reference-count":30,"publisher":"Springer Nature Singapore","isbn-type":[{"type":"print","value":"9789819755998"},{"type":"electronic","value":"9789819756001"}],"license":[{"start":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T00:00:00Z","timestamp":1704067200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T00:00:00Z","timestamp":1704067200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2024]]},"DOI":"10.1007\/978-981-97-5600-1_30","type":"book-chapter","created":{"date-parts":[[2024,7,29]],"date-time":"2024-07-29T15:03:47Z","timestamp":1722265427000},"page":"345-356","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":1,"title":["GLAD: A Global-Attention-Based Diffusion Model for Infrared and Visible Image Fusion"],"prefix":"10.1007","author":[{"given":"Haozhe","family":"Guo","sequence":"first","affiliation":[]},{"given":"Mengjie","family":"Chen","sequence":"additional","affiliation":[]},{"given":"Kaijiang","family":"Li","sequence":"additional","affiliation":[]},{"given":"Hao","family":"Su","sequence":"additional","affiliation":[]},{"given":"Pei","family":"Lv","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,7,30]]},"reference":[{"key":"30_CR1","doi-asserted-by":"publisher","first-page":"16040","DOI":"10.1109\/ACCESS.2017.2735865","volume":"5","author":"A Dogra","year":"2017","unstructured":"Dogra, A., Goyal, B., Agrawal, S.: From multi-scale decomposition to non-multiscale decomposition methods: a comprehensive survey of image fusion techniques and its applications. IEEE Access 5, 16040\u201316067 (2017)","journal-title":"IEEE Access"},{"issue":"12","key":"30_CR2","doi-asserted-by":"publisher","first-page":"2162","DOI":"10.3390\/electronics9122162","volume":"9","author":"C Sun","year":"2020","unstructured":"Sun, C., Zhang, C., Xiong, N.: Infrared and visible image fusion techniques based on deep learning: a review. Electronics 9(12), 2162 (2020)","journal-title":"Electronics"},{"issue":"2","key":"30_CR3","doi-asserted-by":"publisher","first-page":"599","DOI":"10.3390\/s23020599","volume":"23","author":"W Ma","year":"2023","unstructured":"Ma, W., et al.: Infrared and visible image fusion technology and application: a review. Sensors 23(2), 599 (2023)","journal-title":"Sensors"},{"key":"30_CR4","doi-asserted-by":"publisher","DOI":"10.1016\/j.inffus.2023.101870","volume":"99","author":"L Tang","year":"2023","unstructured":"Tang, L., Zhang, H., Xu, H., Ma, J.: Rethinking the necessity of image fusion in high-level vision tasks: a practical infrared and visible image fusion network based on progressive semantic injection and scene fidelity. Inf. Fus. 99, 101870 (2023)","journal-title":"Inf. Fus."},{"key":"30_CR5","doi-asserted-by":"crossref","unstructured":"Zhang, X., Demiris, Y.: Visible and infrared image fusion using deep learning. IEEE Trans. Pattern Anal. Mach. Intell. (2023)","DOI":"10.1109\/TPAMI.2023.3261282"},{"key":"30_CR6","doi-asserted-by":"crossref","unstructured":"Liu, J., et al.: Target-aware dual adversarial learning and a multi-scenario multi-modality benchmark to fuse infrared and visible for object detection. In: Proceedings of the IEEE\/CVF Conference on Ccomputer Vision and Pattern Recognition, pp. 5802\u20135811 (2022)","DOI":"10.1109\/CVPR52688.2022.00571"},{"key":"30_CR7","doi-asserted-by":"crossref","unstructured":"Li, C., Zhu, C., Huang, Y., Tang, J., Wang, L.: Cross-modal ranking with soft consistency and noisy labels for robust RGB-T tracking. In: Proceedings of the European Conference on Computer Vision (ECCV), pp. 808\u2013823 (2018)","DOI":"10.1007\/978-3-030-01261-8_49"},{"key":"30_CR8","doi-asserted-by":"crossref","unstructured":"Lu, Y., et al.: Cross-modality person re-identification with shared-specific feature transfer. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 13379\u201313389 (2020)","DOI":"10.1109\/CVPR42600.2020.01339"},{"key":"30_CR9","doi-asserted-by":"publisher","first-page":"7790","DOI":"10.1109\/TIP.2021.3109518","volume":"30","author":"W Zhou","year":"2021","unstructured":"Zhou, W., Liu, J., Lei, J., Yu, L., Hwang, J.N.: GMNet: Graded-feature multilabel-learning network for rgb-thermal urban scene semantic segmentation. IEEE Trans. Image Process. 30, 7790\u20137802 (2021)","journal-title":"IEEE Trans. Image Process."},{"key":"30_CR10","doi-asserted-by":"publisher","first-page":"153","DOI":"10.1016\/j.inffus.2018.02.004","volume":"45","author":"J Ma","year":"2019","unstructured":"Ma, J., Ma, Y., Li, C.: Infrared and visible image fusion methods and applications: a survey. Inf. Fus. 45, 153\u2013178 (2019)","journal-title":"Inf. Fus."},{"key":"30_CR11","doi-asserted-by":"publisher","first-page":"147","DOI":"10.1016\/j.inffus.2014.09.004","volume":"24","author":"Y Liu","year":"2015","unstructured":"Liu, Y., Liu, S., Wang, Z.: A general framework for image fusion based on multiscale transform and sparse representation. Inf. Fus. 24, 147\u2013164 (2015)","journal-title":"Inf. Fus."},{"issue":"12","key":"30_CR12","doi-asserted-by":"publisher","first-page":"1882","DOI":"10.1109\/LSP.2016.2618776","volume":"23","author":"Y Liu","year":"2016","unstructured":"Liu, Y., Chen, X., Ward, R.K., Wang, Z.J.: Image fusion with convolutional sparse representation. IEEE Signal Process. Lett. 23(12), 1882\u20131886 (2016)","journal-title":"IEEE Signal Process. Lett."},{"key":"30_CR13","doi-asserted-by":"publisher","first-page":"52","DOI":"10.1016\/j.infrared.2016.01.009","volume":"76","author":"DP Bavirisetti","year":"2016","unstructured":"Bavirisetti, D.P., Dhuli, R.: Two-scale image fusion of visible and infrared images using saliency detection. Infrared Phys. Technol. 76, 52\u201364 (2016)","journal-title":"Infrared Phys. Technol."},{"key":"30_CR14","doi-asserted-by":"publisher","first-page":"8","DOI":"10.1016\/j.infrared.2017.02.005","volume":"82","author":"J Ma","year":"2017","unstructured":"Ma, J., Zhou, Z., Wang, B., Zong, H.: Infrared and visible image fusion based on visual saliency map and weighted least square optimization. Infrared Phys. Technol. 82, 8\u201317 (2017)","journal-title":"Infrared Phys. Technol."},{"key":"30_CR15","doi-asserted-by":"crossref","unstructured":"Li, S., Kang, X., Fang, L., Hu, J., Yin, H.: Pixel-level image fusion: A survey of the state of the art. information Fusion 33, 100\u2013112 (2017)","DOI":"10.1016\/j.inffus.2016.05.004"},{"issue":"5","key":"30_CR16","doi-asserted-by":"publisher","first-page":"2614","DOI":"10.1109\/TIP.2018.2887342","volume":"28","author":"H Li","year":"2018","unstructured":"Li, H., Wu, X.J.: DenseFuse: a fusion approach to infrared and visible images. IEEE Trans. Image Process. 28(5), 2614\u20132623 (2018)","journal-title":"IEEE Trans. Image Process."},{"key":"30_CR17","doi-asserted-by":"publisher","first-page":"79","DOI":"10.1016\/j.inffus.2022.03.007","volume":"83","author":"L Tang","year":"2022","unstructured":"Tang, L., Yuan, J., Zhang, H., Jiang, X., Ma, J.: PIAFusion: a progressive infrared and visible image fusion network based on illumination aware. Inf. Fus. 83, 79\u201392 (2022)","journal-title":"Inf. Fus."},{"key":"30_CR18","doi-asserted-by":"publisher","first-page":"11","DOI":"10.1016\/j.inffus.2018.09.004","volume":"48","author":"J Ma","year":"2019","unstructured":"Ma, J., Yu, W., Liang, P., Li, C., Jiang, J.: FusionGAN: a generative adversarial network for infrared and visible image fusion. Inf. Fus. 48, 11\u201326 (2019)","journal-title":"Inf. Fus."},{"key":"30_CR19","first-page":"1","volume":"70","author":"J Ma","year":"2020","unstructured":"Ma, J., Zhang, H., Shao, Z., Liang, P., Xu, H.: GANMcC: a generative adversarial network with multiclassification constraints for infrared and visible image fusion. IEEE Trans. Instrum. Meas. 70, 1\u201314 (2020)","journal-title":"IEEE Trans. Instrum. Meas."},{"issue":"1","key":"30_CR20","doi-asserted-by":"publisher","first-page":"502","DOI":"10.1109\/TPAMI.2020.3012548","volume":"44","author":"H Xu","year":"2020","unstructured":"Xu, H., Ma, J., Jiang, J., Guo, X., Ling, H.: U2Fusion: a unified unsupervised image fusion network. IEEE Trans. Pattern Anal. Mach. Intell. 44(1), 502\u2013518 (2020)","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"30_CR21","doi-asserted-by":"crossref","unstructured":"Yue, J., Fang, L., Xia, S., Deng, Y., Ma, J.: Dif-Fusion: towards high color fidelity in infrared and visible image fusion with diffusion models. IEEE Trans. Image Process. (2023)","DOI":"10.1109\/TIP.2023.3322046"},{"key":"30_CR22","doi-asserted-by":"crossref","unstructured":"Zhao, Z., et al.: DDFM: denoising diffusion model for multi-modality image fusion. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 8082\u20138093 (2023)","DOI":"10.1109\/ICCV51070.2023.00742"},{"issue":"4","key":"30_CR23","first-page":"4713","volume":"45","author":"C Saharia","year":"2022","unstructured":"Saharia, C., Ho, J., Chan, W., Salimans, T., Fleet, D.J., Norouzi, M.: Image superresolution via iterative refinement. IEEE Trans. Pattern Anal. Mach. Intell. 45(4), 4713\u20134726 (2022)","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"30_CR24","first-page":"6840","volume":"33","author":"J Ho","year":"2020","unstructured":"Ho, J., Jain, A., Abbeel, P.: Denoising diffusion probabilistic models. Adv. Neural. Inf. Process. Syst. 33, 6840\u20136851 (2020)","journal-title":"Adv. Neural. Inf. Process. Syst."},{"key":"30_CR25","unstructured":"Baranchuk, D., Rubachev, I., Voynov, A., Khrulkov, V., Babenko, A.: Label-efficient semantic segmentation with diffusion models. arXiv preprint arXiv:2112.03126 (2021)"},{"key":"30_CR26","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"421","DOI":"10.1007\/978-3-030-00928-1_48","volume-title":"Medical Image Computing and Computer Assisted Intervention \u2013 MICCAI 2018","author":"AG Roy","year":"2018","unstructured":"Roy, A.G., Navab, N., Wachinger, C.: Concurrent Spatial and Channel \u2018Squeeze & Excitation\u2019 in Fully Convolutional Networks. In: Frangi, A.F., Schnabel, J.A., Davatzikos, C., Alberola-L\u00f3pez, C., Fichtinger, G. (eds.) MICCAI 2018. LNCS, vol. 11070, pp. 421\u2013429. Springer, Cham (2018). https:\/\/doi.org\/10.1007\/978-3-030-00928-1_48"},{"issue":"3","key":"30_CR27","doi-asserted-by":"publisher","first-page":"934","DOI":"10.1109\/TITS.2018.2791533","volume":"19","author":"Y Choi","year":"2018","unstructured":"Choi, Y., et al.: Kaist multi-spectral day\/night data set for autonomous and assisted driving. IEEE Trans. Intell. Transp. Syst. 19(3), 934\u2013948 (2018)","journal-title":"IEEE Trans. Intell. Transp. Syst."},{"issue":"7","key":"30_CR28","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1049\/el:20020212","volume":"38","author":"G Qu","year":"2002","unstructured":"Qu, G., Zhang, D., Yan, P.: Information measure for performance of image fusion. Electron. Lett. 38(7), 1 (2002)","journal-title":"Electron. Lett."},{"issue":"2","key":"30_CR29","doi-asserted-by":"publisher","first-page":"127","DOI":"10.1016\/j.inffus.2011.08.002","volume":"14","author":"Y Han","year":"2013","unstructured":"Han, Y., Cai, Y., Cao, Y., Xu, X.: A new image fusion performance metric based on visual information fidelity. Inf. Fus. 14(2), 127\u2013135 (2013)","journal-title":"Inf. Fus."},{"issue":"4","key":"30_CR30","doi-asserted-by":"publisher","first-page":"308","DOI":"10.1049\/el:20000267","volume":"36","author":"CS Xydeas","year":"2000","unstructured":"Xydeas, C.S., Petrovic, V., et al.: Objective image fusion performance measure. Electron. Lett. 36(4), 308\u2013309 (2000)","journal-title":"Electron. Lett."}],"container-title":["Lecture Notes in Computer Science","Advanced Intelligent Computing Technology and Applications"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-981-97-5600-1_30","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,9,27]],"date-time":"2024-09-27T06:06:20Z","timestamp":1727417180000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-981-97-5600-1_30"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024]]},"ISBN":["9789819755998","9789819756001"],"references-count":30,"URL":"https:\/\/doi.org\/10.1007\/978-981-97-5600-1_30","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"type":"print","value":"0302-9743"},{"type":"electronic","value":"1611-3349"}],"subject":[],"published":{"date-parts":[[2024]]},"assertion":[{"value":"30 July 2024","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ICIC","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Conference on Intelligent Computing","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Tianjin","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"China","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2024","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"5 August 2024","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"8 August 2024","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"20","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"icic2024","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"http:\/\/www.ic-icc.cn\/2024\/index.htm","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}