{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,10,31]],"date-time":"2025-10-31T08:05:34Z","timestamp":1761897934064,"version":"3.40.3"},"publisher-location":"Cham","reference-count":25,"publisher":"Springer Nature Switzerland","isbn-type":[{"type":"print","value":"9783031533013"},{"type":"electronic","value":"9783031533020"}],"license":[{"start":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T00:00:00Z","timestamp":1704067200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T00:00:00Z","timestamp":1704067200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2024]]},"DOI":"10.1007\/978-3-031-53302-0_1","type":"book-chapter","created":{"date-parts":[[2024,1,28]],"date-time":"2024-01-28T09:02:09Z","timestamp":1706432529000},"page":"3-16","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":1,"title":["Removing Stray-Light for\u00a0Wild-Field Fundus Image Fusion Based on\u00a0Large Generative Models"],"prefix":"10.1007","author":[{"given":"Jun","family":"Wu","sequence":"first","affiliation":[]},{"given":"Mingxin","family":"He","sequence":"additional","affiliation":[]},{"given":"Yang","family":"Liu","sequence":"additional","affiliation":[]},{"given":"Jingjie","family":"Lin","sequence":"additional","affiliation":[]},{"given":"Zeyu","family":"Huang","sequence":"additional","affiliation":[]},{"given":"Dayong","family":"Ding","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,1,29]]},"reference":[{"issue":"2","key":"1_CR1","doi-asserted-by":"publisher","first-page":"328","DOI":"10.1016\/j.optlastec.2011.07.009","volume":"44","author":"X Bai","year":"2012","unstructured":"Bai, X., Zhou, F., Xue, B.: Image enhancement using multi scale image features extracted by top-hat transform. Opt. Laser Technol. 44(2), 328\u2013336 (2012)","journal-title":"Opt. Laser Technol."},{"issue":"11","key":"1_CR2","doi-asserted-by":"publisher","first-page":"5187","DOI":"10.1109\/TIP.2016.2598681","volume":"25","author":"B Cai","year":"2016","unstructured":"Cai, B., Xu, X., Jia, K., Qing, C., Tao, D.: DehazeNet: an end-to-end system for single image haze removal. IEEE Trans. Image Process. 25(11), 5187\u20135198 (2016)","journal-title":"IEEE Trans. Image Process."},{"key":"1_CR3","doi-asserted-by":"publisher","DOI":"10.1016\/j.sigpro.2019.107445","volume":"170","author":"L Cao","year":"2020","unstructured":"Cao, L., Li, H., Zhang, Y.: Retinal image enhancement using low-pass filtering and $$\\alpha $$-rooting. Signal Process. 170, 107445 (2020)","journal-title":"Signal Process."},{"key":"1_CR4","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"87","DOI":"10.1007\/978-3-030-87237-3_9","volume-title":"Medical Image Computing and Computer Assisted Intervention \u2013 MICCAI 2021","author":"P Cheng","year":"2021","unstructured":"Cheng, P., Lin, L., Huang, Y., Lyu, J., Tang, X.: I-SECRET: importance-guided fundus image enhancement via semi-supervised contrastive constraining. In: de Bruijne, M., et al. (eds.) MICCAI 2021. LNCS, vol. 12908, pp. 87\u201396. Springer, Cham (2021). https:\/\/doi.org\/10.1007\/978-3-030-87237-3_9"},{"key":"1_CR5","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"48","DOI":"10.1007\/978-3-030-32239-7_6","volume-title":"Medical Image Computing and Computer Assisted Intervention \u2013 MICCAI 2019","author":"H Fu","year":"2019","unstructured":"Fu, H., et al.: Evaluation of retinal image quality assessment networks in different color-spaces. In: Shen, D., et al. (eds.) MICCAI 2019. LNCS, vol. 11764, pp. 48\u201356. Springer, Cham (2019). https:\/\/doi.org\/10.1007\/978-3-030-32239-7_6"},{"key":"1_CR6","unstructured":"Goodfellow, I., et al.: Generative adversarial nets. In: Advances in Neural Information Processing Systems 27 (2014)"},{"issue":"12","key":"1_CR7","first-page":"2341","volume":"33","author":"K He","year":"2010","unstructured":"He, K., Sun, J., Tang, X.: Single image haze removal using dark channel prior. IEEE Trans. Pattern Anal. Mach. Intell. 33(12), 2341\u20132353 (2010)","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"1_CR8","doi-asserted-by":"crossref","unstructured":"Hore, A., Ziou, D.: Image quality metrics: PSNR vs. SSIM. In: 20th International Conference on Pattern Recognition, pp. 2366\u20132369. IEEE (2010)","DOI":"10.1109\/ICPR.2010.579"},{"key":"1_CR9","doi-asserted-by":"crossref","unstructured":"Isola, P., Zhu, J.Y., Zhou, T., Efros, A.A.: Image-to-image translation with conditional adversarial networks. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 1125\u20131134 (2017)","DOI":"10.1109\/CVPR.2017.632"},{"issue":"3","key":"1_CR10","doi-asserted-by":"publisher","DOI":"10.1371\/journal.pone.0282416","volume":"18","author":"KG Lee","year":"2023","unstructured":"Lee, K.G., Song, S.J., Lee, S., Yu, H.G., Kim, D.I., Lee, K.M.: A deep learning-based framework for retinal fundus image enhancement. PLoS ONE 18(3), e0282416 (2023)","journal-title":"PLoS ONE"},{"issue":"7","key":"1_CR11","doi-asserted-by":"publisher","first-page":"1699","DOI":"10.1109\/TMI.2022.3147854","volume":"41","author":"H Li","year":"2022","unstructured":"Li, H., et al.: An annotation-free restoration network for cataractous fundus images. IEEE Trans. Med. Imaging 41(7), 1699\u20131710 (2022)","journal-title":"IEEE Trans. Med. Imaging"},{"issue":"3","key":"1_CR12","doi-asserted-by":"publisher","first-page":"209","DOI":"10.1109\/LSP.2012.2227726","volume":"20","author":"A Mittal","year":"2012","unstructured":"Mittal, A., Soundararajan, R., Bovik, A.C.: Making a \u0105\u0159completely blind\u0105\u015b image quality analyzer. IEEE Signal Process. Lett. 20(3), 209\u2013212 (2012)","journal-title":"IEEE Signal Process. Lett."},{"issue":"4","key":"1_CR13","doi-asserted-by":"publisher","first-page":"401","DOI":"10.1109\/42.41493","volume":"8","author":"E Peli","year":"1989","unstructured":"Peli, E., Peli, T.: Restoration of retinal images obtained through cataracts. IEEE Trans. Med. Imaging 8(4), 401\u2013406 (1989)","journal-title":"IEEE Trans. Med. Imaging"},{"key":"1_CR14","doi-asserted-by":"crossref","unstructured":"Qian, R., Tan, R.T., Yang, W., Su, J., Liu, J.: Attentive generative adversarial network for raindrop removal from a single image. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 2482\u20132491 (2018)","DOI":"10.1109\/CVPR.2018.00263"},{"key":"1_CR15","doi-asserted-by":"publisher","first-page":"35","DOI":"10.1023\/B:VLSI.0000028532.53893.82","volume":"38","author":"AM Reza","year":"2004","unstructured":"Reza, A.M.: Realization of the contrast limited adaptive histogram equalization (CLAHE) for real-time image enhancement. J. VLSI Signal Process. Syst. Signal Image Video Technol. 38, 35\u201344 (2004)","journal-title":"J. VLSI Signal Process. Syst. Signal Image Video Technol."},{"issue":"3","key":"1_CR16","doi-asserted-by":"publisher","first-page":"996","DOI":"10.1109\/TMI.2020.3043495","volume":"40","author":"Z Shen","year":"2020","unstructured":"Shen, Z., Fu, H., Shen, J., Shao, L.: Modeling and enhancing low-quality retinal fundus images. IEEE Trans. Med. Imaging 40(3), 996\u20131006 (2020)","journal-title":"IEEE Trans. Med. Imaging"},{"key":"1_CR17","unstructured":"Simonyan, K., Zisserman, A.: Very deep convolutional networks for large-scale image recognition. arXiv preprint arXiv:1409.1556 (2014)"},{"issue":"4","key":"1_CR18","doi-asserted-by":"publisher","first-page":"600","DOI":"10.1109\/TIP.2003.819861","volume":"13","author":"Z Wang","year":"2004","unstructured":"Wang, Z., Bovik, A.C., Sheikh, H.R., Simoncelli, E.P.: Image quality assessment: from error visibility to structural similarity. IEEE Trans. Image Process. 13(4), 600\u2013612 (2004)","journal-title":"IEEE Trans. Image Process."},{"key":"1_CR19","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"173","DOI":"10.1007\/978-3-031-16525-2_18","volume-title":"Ophthalmic Medical Image Analysis","author":"J Wu","year":"2022","unstructured":"Wu, J., et al.: Template mask based image fusion built-in algorithm for wide field fundus cameras. In: Antony, B., Fu, H., Lee, C.S., MacGillivray, T., Xu, Y., Zheng, Y. (eds.) OMIA 2022. LNCS, vol. 13576, pp. 173\u2013182. Springer, Cham (2022). https:\/\/doi.org\/10.1007\/978-3-031-16525-2_18"},{"key":"1_CR20","doi-asserted-by":"publisher","first-page":"137","DOI":"10.1016\/j.cmpb.2017.02.026","volume":"143","author":"L Xiong","year":"2017","unstructured":"Xiong, L., Li, H., Xu, L.: An enhancement method for color retinal images based on image formation model. Comput. Methods Programs Biomed. 143, 137\u2013150 (2017)","journal-title":"Comput. Methods Programs Biomed."},{"key":"1_CR21","doi-asserted-by":"publisher","DOI":"10.1016\/j.patcog.2022.108968","volume":"133","author":"B Yang","year":"2023","unstructured":"Yang, B., Zhao, H., Cao, L., Liu, H., Wang, N., Li, H.: Retinal image enhancement with artifact reduction and structure retention. Pattern Recogn. 133, 108968 (2023)","journal-title":"Pattern Recogn."},{"key":"1_CR22","doi-asserted-by":"crossref","unstructured":"Yang, Y., Wang, C., Liu, R., Zhang, L., Guo, X., Tao, D.: Self-augmented unpaired image dehazing via density and depth decomposition. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 2037\u20132046 (2022)","DOI":"10.1109\/CVPR52688.2022.00208"},{"issue":"1","key":"1_CR23","doi-asserted-by":"publisher","first-page":"122","DOI":"10.1166\/jmihi.2018.2244","volume":"8","author":"L Yao","year":"2018","unstructured":"Yao, L., Lin, Y., Muhammad, S.: An improved multi-scale image enhancement method based on Retinex theory. J. Med. Imaging Health Inform. 8(1), 122\u2013126 (2018)","journal-title":"J. Med. Imaging Health Inform."},{"key":"1_CR24","doi-asserted-by":"crossref","unstructured":"You, Q., Wan, C., Sun, J., Shen, J., Ye, H., Yu, Q.: Fundus image enhancement method based on CycleGAN. In: 41st Annual International Conference of the IEEE Engineering in Medicine and Biology Society (EMBC), pp. 4500\u20134503. IEEE (2019)","DOI":"10.1109\/EMBC.2019.8856950"},{"key":"1_CR25","doi-asserted-by":"crossref","unstructured":"Zhu, J.Y., Park, T., Isola, P., Efros, A.A.: Unpaired image-to-image translation using cycle-consistent adversarial networks. In: Proceedings of the IEEE International Conference on Computer Vision, pp. 2223\u20132232 (2017)","DOI":"10.1109\/ICCV.2017.244"}],"container-title":["Lecture Notes in Computer Science","MultiMedia Modeling"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-53302-0_1","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,3,7]],"date-time":"2024-03-07T11:54:49Z","timestamp":1709812489000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-53302-0_1"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024]]},"ISBN":["9783031533013","9783031533020"],"references-count":25,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-53302-0_1","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"type":"print","value":"0302-9743"},{"type":"electronic","value":"1611-3349"}],"subject":[],"published":{"date-parts":[[2024]]},"assertion":[{"value":"29 January 2024","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"MMM","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Conference on Multimedia Modeling","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Amsterdam","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"The Netherlands","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2024","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"29 January 2024","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2 February 2024","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"30","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"mmm2024","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Double-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"ConfTool Pro","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"297","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"112","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"0","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"38% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3.2","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3.2","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Yes","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}