{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,3,27]],"date-time":"2025-03-27T18:58:09Z","timestamp":1743101889882,"version":"3.40.3"},"publisher-location":"Cham","reference-count":10,"publisher":"Springer Nature Switzerland","isbn-type":[{"type":"print","value":"9783031505737"},{"type":"electronic","value":"9783031505744"}],"license":[{"start":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T00:00:00Z","timestamp":1704067200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T00:00:00Z","timestamp":1704067200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2024]]},"DOI":"10.1007\/978-3-031-50574-4_14","type":"book-chapter","created":{"date-parts":[[2024,2,20]],"date-time":"2024-02-20T08:02:33Z","timestamp":1708416153000},"page":"200-211","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":1,"title":["Automatic Focus Fusion Method of Concrete Crack Image Based on Deep Learning"],"prefix":"10.1007","author":[{"given":"Chuang","family":"Wang","sequence":"first","affiliation":[]},{"given":"Jiawei","family":"Pang","sequence":"additional","affiliation":[]},{"given":"Xiaolu","family":"Deng","sequence":"additional","affiliation":[]},{"given":"Yangjie","family":"Xia","sequence":"additional","affiliation":[]},{"given":"Ruiyang","family":"Li","sequence":"additional","affiliation":[]},{"given":"Caihui","family":"Wu","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,2,21]]},"reference":[{"issue":"09","key":"14_CR1","first-page":"469","volume":"37","author":"H Zhou","year":"2020","unstructured":"Zhou, H., Zhao, L.H., Liu, H.: Research on image restoration methods for global optimization of damaged areas. Comput. Simul. 37(09), 469\u2013473 (2020)","journal-title":"Comput. Simul."},{"issue":"S2","key":"14_CR2","first-page":"315","volume":"46","author":"YP Mao","year":"2019","unstructured":"Mao, Y.P., Yu, L., Guan, Z.J.: Multi-focus image fusion based on fractional differential. Comput. Sci. 46(S2), 315\u2013319 (2019)","journal-title":"Comput. Sci."},{"issue":"09","key":"14_CR3","first-page":"2293","volume":"42","author":"YC Wu","year":"2020","unstructured":"Wu, Y.C., Wang, Y.M., Wang, A.H.: Light field all-in-focus image fusion based on edge enhanced guided filtering. J. Electron. Inf. Technol. 42(09), 2293\u20132301 (2020)","journal-title":"J. Electron. Inf. Technol."},{"key":"14_CR4","unstructured":"Zhai, H., Zhuang, Y.: Multi-focus image fusion method using energy of Laplacian and convolutional neural network. J. Harbin Inst. Technol. 52(05), 137\u2013147(2020)"},{"issue":"03","key":"14_CR5","doi-asserted-by":"publisher","first-page":"278","DOI":"10.3788\/YJYXS20193403.0278","volume":"34","author":"DD Zhao","year":"2019","unstructured":"Zhao, D.D., Ji, Y.Q.: Multi-focus image fusion combining regional variance and EAV. Chinese J. Liq. Cryst. Displays 34(03), 278\u2013282 (2019)","journal-title":"Chinese J. Liq. Cryst. Displays"},{"issue":"05","key":"14_CR6","first-page":"1453","volume":"40","author":"ZX Zeng","year":"2020","unstructured":"Zeng, Z.X., Liu, J.: Microscopic image segmentation method of C.elegans based on deep learning. J. Comput. Appl. 40(05), 1453\u20131459 (2020)","journal-title":"J. Comput. Appl."},{"issue":"03","key":"14_CR7","first-page":"180","volume":"56","author":"J Cao","year":"2020","unstructured":"Cao, J., Chen, H., Zhang, J.W.: Research on multi-focus image fusion algorithm based on super resolution. Comput. Eng. Appl. 56(03), 180\u2013186 (2020)","journal-title":"Comput. Eng. Appl."},{"issue":"03","key":"14_CR8","doi-asserted-by":"publisher","first-page":"500","DOI":"10.1364\/AO.380551","volume":"41","author":"QJ Chen","year":"2020","unstructured":"Chen, Q.J., Wang, Z.B., Chai, Y.Z.: Multi-focus image fusion method based on improved VGG network. J. Appl. Opt. 41(03), 500\u2013507 (2020)","journal-title":"J. Appl. Opt."},{"issue":"09","key":"14_CR9","first-page":"1848","volume":"45","author":"YX Xie","year":"2019","unstructured":"Xie, Y.X., Wu, Y.C., Wang, Y.M.: Light field all-in-focus image fusion based on wavelet domain sharpness evaluation. J. Beijing Univ. Aeronaut. Astronaut. 45(09), 1848\u20131854 (2019)","journal-title":"J. Beijing Univ. Aeronaut. Astronaut."},{"issue":"09","key":"14_CR10","doi-asserted-by":"publisher","first-page":"1283","DOI":"10.37188\/CJLCD.2020-0339","volume":"36","author":"B Liu","year":"2021","unstructured":"Liu, B., Han, G.L., Luo, H.Y.: Twin convolution neural network image fusion algorithm based on multi-scale details. Liq. Cryst. Disp. 36(09), 1283\u20131293 (2021)","journal-title":"Liq. Cryst. Disp."}],"container-title":["Lecture Notes of the Institute for Computer Sciences, Social Informatics and Telecommunications Engineering","Multimedia Technology and Enhanced Learning"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-50574-4_14","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,2,20]],"date-time":"2024-02-20T08:07:56Z","timestamp":1708416476000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-50574-4_14"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024]]},"ISBN":["9783031505737","9783031505744"],"references-count":10,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-50574-4_14","relation":{},"ISSN":["1867-8211","1867-822X"],"issn-type":[{"type":"print","value":"1867-8211"},{"type":"electronic","value":"1867-822X"}],"subject":[],"published":{"date-parts":[[2024]]},"assertion":[{"value":"21 February 2024","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ICMTEL","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Conference on Multimedia Technology and Enhanced Learning","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Leicester","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"United Kingdom","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2023","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"28 April 2023","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"29 April 2023","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"5","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"icmtel2023","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/icmtel.eai-conferences.org\/2023\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Single-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Confy Plus","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"285","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"121","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"0","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"42% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3.1","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"6.5","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"No","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}