{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,11]],"date-time":"2026-02-11T17:22:30Z","timestamp":1770830550281,"version":"3.50.1"},"publisher-location":"Cham","reference-count":23,"publisher":"Springer Nature Switzerland","isbn-type":[{"value":"9783031451690","type":"print"},{"value":"9783031451706","type":"electronic"}],"license":[{"start":{"date-parts":[[2023,1,1]],"date-time":"2023-01-01T00:00:00Z","timestamp":1672531200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2023,1,1]],"date-time":"2023-01-01T00:00:00Z","timestamp":1672531200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2023]]},"DOI":"10.1007\/978-3-031-45170-6_21","type":"book-chapter","created":{"date-parts":[[2023,11,14]],"date-time":"2023-11-14T13:03:02Z","timestamp":1699966982000},"page":"199-208","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":3,"title":["A Contrastive Learning Approach for\u00a0Infrared-Visible Image Fusion"],"prefix":"10.1007","author":[{"ORCID":"https:\/\/orcid.org\/0009-0006-1427-3722","authenticated-orcid":false,"given":"Ashish Kumar","family":"Gupta","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0009-0009-3327-7798","authenticated-orcid":false,"given":"Meghna","family":"Barnwal","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0001-6532-942X","authenticated-orcid":false,"given":"Deepak","family":"Mishra","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2023,12,4]]},"reference":[{"key":"21_CR1","doi-asserted-by":"publisher","first-page":"153","DOI":"10.1016\/j.inffus.2018.02.004","volume":"45","author":"J Ma","year":"2019","unstructured":"Ma, J., Ma, Y., Li, C.: Infrared and visible image fusion methods and applications: a survey. Inf. Fusion 45, 153\u2013178 (2019)","journal-title":"Inf. Fusion"},{"key":"21_CR2","doi-asserted-by":"publisher","first-page":"52","DOI":"10.1016\/j.infrared.2016.01.009","volume":"76","author":"DP Bavirisetti","year":"2016","unstructured":"Bavirisetti, D.P., Dhuli, R.: Two-scale image fusion of visible and infrared images using saliency detection. Infrared Phys. Technol. 76, 52\u201364 (2016)","journal-title":"Infrared Phys. Technol."},{"key":"21_CR3","doi-asserted-by":"crossref","unstructured":"Wang, X., Yin, J., Zhang, K., Li, S., Yan, J.: Infrared weak-small targets fusion based on latent low-rank representation and DWT. IEEE Access 7, 112 681\u2013112 692 (2019)","DOI":"10.1109\/ACCESS.2019.2934523"},{"key":"21_CR4","doi-asserted-by":"crossref","unstructured":"Yang, Y., et al.: Infrared and visible image fusion based on infrared background suppression. Opt. Lasers Eng. 164, 107528 (2023)","DOI":"10.1016\/j.optlaseng.2023.107528"},{"issue":"5","key":"21_CR5","doi-asserted-by":"publisher","first-page":"2614","DOI":"10.1109\/TIP.2018.2887342","volume":"28","author":"H Li","year":"2018","unstructured":"Li, H., Wu, X.-J.: DenseFuse: a fusion approach to infrared and visible images. IEEE Trans. Image Process. 28(5), 2614\u20132623 (2018)","journal-title":"IEEE Trans. Image Process."},{"key":"21_CR6","first-page":"1","volume":"71","author":"Z Zhu","year":"2022","unstructured":"Zhu, Z., Yang, X., Lu, R., Shen, T., Xie, X., Zhang, T.: CLF-Net: contrastive learning for infrared and visible image fusion network. IEEE Trans. Instrum. Meas. 71, 1\u201315 (2022)","journal-title":"IEEE Trans. Instrum. Meas."},{"key":"21_CR7","doi-asserted-by":"crossref","unstructured":"Ram Prabhakar, K., Sai Srikar, V., Venkatesh Babu, R.: DeepFuse: a deep unsupervised approach for exposure fusion with extreme exposure image pairs. In: Proceedings of the IEEE International Conference on Computer Vision, pp. 4714\u20134722 (2017)","DOI":"10.1109\/ICCV.2017.505"},{"key":"21_CR8","doi-asserted-by":"publisher","first-page":"640","DOI":"10.1109\/TCI.2020.2965304","volume":"6","author":"R Hou","year":"2020","unstructured":"Hou, R., et al.: VIF-Net: an unsupervised framework for infrared and visible image fusion. IEEE Trans. Comput. Imaging 6, 640\u2013651 (2020)","journal-title":"IEEE Trans. Comput. Imaging"},{"key":"21_CR9","doi-asserted-by":"publisher","first-page":"824","DOI":"10.1109\/TCI.2021.3100986","volume":"7","author":"H Xu","year":"2021","unstructured":"Xu, H., Zhang, H., Ma, J.: Classification saliency-based rule for visible and infrared image fusion. IEEE Trans. Comput. Imaging 7, 824\u2013836 (2021)","journal-title":"IEEE Trans. Comput. Imaging"},{"key":"21_CR10","unstructured":"Kang, M., Park, J.: ContraGAN: contrastive learning for conditional image generation. In: Advances in Neural Information Processing Systems, vol. 33, pp. 21 357\u201321 369 (2020)"},{"key":"21_CR11","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"319","DOI":"10.1007\/978-3-030-58545-7_19","volume-title":"Computer Vision \u2013 ECCV 2020","author":"T Park","year":"2020","unstructured":"Park, T., Efros, A.A., Zhang, R., Zhu, J.-Y.: Contrastive learning for unpaired image-to-image translation. In: Vedaldi, A., Bischof, H., Brox, T., Frahm, J.-M. (eds.) ECCV 2020, Part IX. LNCS, vol. 12354, pp. 319\u2013345. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-58545-7_19"},{"key":"21_CR12","doi-asserted-by":"publisher","unstructured":"Huang, D.-S., Jo, K.-H., Figueroa-Garc\u00eda, J.C.: Intelligent Computing Theories and Application: 13th International Conference, ICIC 2017, Liverpool, UK, August 7\u201310, 2017, Proceedings, Part II, vol. 10362. Springer, Cham (2017). https:\/\/doi.org\/10.1007\/978-3-319-63309-1","DOI":"10.1007\/978-3-319-63309-1"},{"key":"21_CR13","unstructured":"Spiegl, B.: Contrastive unpaired translation using focal loss for patch classification. arXiv preprint arXiv:2109.12431 (2021)"},{"key":"21_CR14","doi-asserted-by":"crossref","unstructured":"Andonian, A., Park, T., Russell, B., Isola, P., Zhu, J.-Y., Zhang, R.: Contrastive feature loss for image prediction. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 1934\u20131943 (2021)","DOI":"10.1109\/ICCVW54120.2021.00220"},{"issue":"4","key":"21_CR15","doi-asserted-by":"publisher","first-page":"600","DOI":"10.1109\/TIP.2003.819861","volume":"13","author":"Z Wang","year":"2004","unstructured":"Wang, Z., Bovik, A., Sheikh, H., Simoncelli, E.: Image quality assessment: from error visibility to structural similarity. IEEE Trans. Image Process. 13(4), 600\u2013612 (2004)","journal-title":"IEEE Trans. Image Process."},{"issue":"1","key":"21_CR16","doi-asserted-by":"publisher","first-page":"47","DOI":"10.1109\/TCI.2016.2644865","volume":"3","author":"H Zhao","year":"2016","unstructured":"Zhao, H., Gallo, O., Frosio, I., Kautz, J.: Loss functions for image restoration with neural networks. IEEE Trans. Comput. Imaging 3(1), 47\u201357 (2016)","journal-title":"IEEE Trans. Comput. Imaging"},{"key":"21_CR17","unstructured":"Toet, A., et al.: TNO image fusion dataset. Figshare. Data (2014)"},{"key":"21_CR18","doi-asserted-by":"publisher","first-page":"11","DOI":"10.1016\/j.inffus.2018.09.004","volume":"48","author":"J Ma","year":"2019","unstructured":"Ma, J., Yu, W., Liang, P., Li, C., Jiang, J.: FusionGAN: a generative adversarial network for infrared and visible image fusion. Inf. Fusion 48, 11\u201326 (2019)","journal-title":"Inf. Fusion"},{"key":"21_CR19","doi-asserted-by":"crossref","unstructured":"Li, H., Wu, X.-J., Kittler, J.: Infrared and visible image fusion using a deep learning framework. In: 2018 24th International Conference on Pattern Recognition (ICPR), pp. 2705\u20132710. IEEE (2018)","DOI":"10.1109\/ICPR.2018.8546006"},{"issue":"11","key":"21_CR20","doi-asserted-by":"publisher","first-page":"1961","DOI":"10.1364\/JOSAA.34.001961","volume":"34","author":"H Guo","year":"2017","unstructured":"Guo, H., Ma, Y., Mei, X., Ma, J.: Infrared and visible image fusion based on total variation and augmented Lagrangian. JOSA A 34(11), 1961\u20131968 (2017)","journal-title":"JOSA A"},{"issue":"12","key":"21_CR21","doi-asserted-by":"publisher","first-page":"1882","DOI":"10.1109\/LSP.2016.2618776","volume":"23","author":"Y Liu","year":"2016","unstructured":"Liu, Y., Chen, X., Ward, R.K., Wang, Z.J.: Image fusion with convolutional sparse representation. IEEE Signal Process. Lett. 23(12), 1882\u20131886 (2016)","journal-title":"IEEE Signal Process. Lett."},{"key":"21_CR22","doi-asserted-by":"crossref","unstructured":"Zhao, Z., Xu, S., Zhang, C., Liu, J., Li, P., Zhang, J.: DIDFuse: deep image decomposition for infrared and visible image fusion. arXiv preprint arXiv:2003.09210 (2020)","DOI":"10.24963\/ijcai.2020\/135"},{"issue":"1","key":"21_CR23","doi-asserted-by":"publisher","first-page":"203","DOI":"10.1109\/JSEN.2015.2478655","volume":"16","author":"DP Bavirisetti","year":"2015","unstructured":"Bavirisetti, D.P., Dhuli, R.: Fusion of infrared and visible sensor images based on anisotropic diffusion and Karhunen-Loeve transform. IEEE Sens. J. 16(1), 203\u2013209 (2015)","journal-title":"IEEE Sens. J."}],"container-title":["Lecture Notes in Computer Science","Pattern Recognition and Machine Intelligence"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-45170-6_21","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,11,14]],"date-time":"2023-11-14T13:05:35Z","timestamp":1699967135000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-45170-6_21"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023]]},"ISBN":["9783031451690","9783031451706"],"references-count":23,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-45170-6_21","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2023]]},"assertion":[{"value":"4 December 2023","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"PReMI","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Conference on Pattern Recognition and Machine Intelligence","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Kolkata","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"India","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2023","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"12 December 2023","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"15 December 2023","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"10","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"premi2023","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/www.isical.ac.in\/~premi23\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Double-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"EquinOCS","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"311","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"91","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"0","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"29% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Yes","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}