{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,17]],"date-time":"2026-01-17T20:44:57Z","timestamp":1768682697886,"version":"3.49.0"},"reference-count":29,"publisher":"Springer Science and Business Media LLC","issue":"4","license":[{"start":{"date-parts":[[2020,10,21]],"date-time":"2020-10-21T00:00:00Z","timestamp":1603238400000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"},{"start":{"date-parts":[[2020,10,21]],"date-time":"2020-10-21T00:00:00Z","timestamp":1603238400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"}],"funder":[{"name":"National Key R&D Program of China","award":["No. 2018YFC0810500"],"award-info":[{"award-number":["No. 2018YFC0810500"]}]},{"name":"Scientific and Technological Innovation Foundation of Shunde Graduate School, USTB","award":["No. BK19BE003"],"award-info":[{"award-number":["No. BK19BE003"]}]},{"name":"the Fundamental Research Funds for the Central Universities","award":["No. RF-TP-20-009A3"],"award-info":[{"award-number":["No. RF-TP-20-009A3"]}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Appl Intell"],"published-print":{"date-parts":[[2021,4]]},"DOI":"10.1007\/s10489-020-01931-w","type":"journal-article","created":{"date-parts":[[2020,10,21]],"date-time":"2020-10-21T23:08:45Z","timestamp":1603321725000},"page":"1947-1958","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":31,"title":["Local-CycleGAN: a general end-to-end network for visual enhancement in complex deep-water environment"],"prefix":"10.1007","volume":"51","author":[{"given":"Xianhui","family":"Zong","sequence":"first","affiliation":[]},{"given":"Zhehan","family":"Chen","sequence":"additional","affiliation":[]},{"given":"Dadong","family":"Wang","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2020,10,21]]},"reference":[{"key":"1931_CR1","unstructured":"He KM, Sun J, Tang X (2009) Single image haze removal using Dark Channel prior. IEEE Trans Pattern Anal Mach Intell 33(12):1956\u20131963"},{"key":"1931_CR2","doi-asserted-by":"publisher","first-page":"108","DOI":"10.1038\/scientificamerican1277-108","volume":"237","author":"EH Land","year":"1977","unstructured":"Land EH (1977) The Retinex theory of color vision. Sci Am 237:108\u2013128","journal-title":"Sci Am"},{"key":"1931_CR3","doi-asserted-by":"publisher","first-page":"266","DOI":"10.5201\/ipol.2012.g-ace","volume":"2","author":"P Getreuer","year":"2012","unstructured":"Getreuer P (2012) Automatic color enhancement (ACE) and its fast implementation. Image Process Line 2:266\u2013277","journal-title":"Image Process Line"},{"key":"1931_CR4","unstructured":"Lan G, Jean P-A, Mehdi M, et al, 2014. Generative adversarial networks[J]. arXiv:1406.2661"},{"key":"1931_CR5","doi-asserted-by":"crossref","unstructured":"Zhu JY, Park T, Isola P et al (2017) Unpaired image-to-image translation using cycle-consistent adversarial networks.\u00a0arXiv:1703.10593","DOI":"10.1109\/ICCV.2017.244"},{"key":"1931_CR6","unstructured":"Yan XC (2009) A new method for underwater image enhancement based on local complexity. Modern Manuf Eng (12):101\u2013103"},{"issue":"06","key":"1931_CR7","first-page":"197","volume":"32","author":"WZ Yang","year":"2016","unstructured":"Yang WZ, Xu YL, Qiao X et al (2016) Method for image intensification of Underwater Sea cucumber based on contrast-limited adaptive histogram equalization. Trans Chin Soc Agric Eng 32(06):197\u2013203","journal-title":"Trans Chin Soc Agric Eng"},{"key":"1931_CR8","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1016\/j.neucom.2017.03.029","volume":"245","author":"S Zhang","year":"2017","unstructured":"Zhang S, Wang T, Dong JY et al (2017) Underwater image enhancement via extended multi-scale Retinex. Neurocomputing. 245:1\u20139","journal-title":"Neurocomputing."},{"key":"1931_CR9","doi-asserted-by":"crossref","unstructured":"Javier P, Mitch B, Stefan BW et al (2020) Recovering depth from still images for underwater Dehazing using deep learning. Sensors 20(16):4580","DOI":"10.3390\/s20164580"},{"key":"1931_CR10","doi-asserted-by":"crossref","unstructured":"Ho SL, Sang WM et al (2020) Underwater image enhancement using successive color correction and Superpixel Dark Channel prior. Symmetry 12(8):1220","DOI":"10.3390\/sym12081220"},{"issue":"12","key":"1931_CR11","doi-asserted-by":"publisher","first-page":"5664","DOI":"10.1109\/TIP.2016.2612882","volume":"25","author":"CY Li","year":"2016","unstructured":"Li CY, Guo JC, Cong RM, Pang YW, Wang B (2016) Underwater image enhancement by Dehazing with minimum information loss and histogram distribution prior. IEEE Trans Image Process 25(12):5664\u20135677","journal-title":"IEEE Trans Image Process"},{"key":"1931_CR12","doi-asserted-by":"crossref","unstructured":"Zhang MH, Peng JH (2018) Underwater image restoration based on a new underwater image formation model IEEE access 6:58634\u201358644","DOI":"10.1109\/ACCESS.2018.2875344"},{"key":"1931_CR13","doi-asserted-by":"crossref","unstructured":"Ma XM, Chen ZH, Feng ZP 2019 Underwater image restoration through a combination of improved Dark Channel prior and gray world algorithms. J Electron Imaging 28(5)","DOI":"10.1117\/1.JEI.28.5.053033"},{"issue":"09","key":"1931_CR14","first-page":"856","volume":"31","author":"W Song","year":"2018","unstructured":"Song W, Wang Y, Huang DM et al (2018) Combining background light fusion and underwater Dark Channel prior with color balancing for underwater. Pattern Recogn Artificial Intell 31(09):856\u2013868","journal-title":"Pattern Recogn Artificial Intell"},{"issue":"2","key":"1931_CR15","first-page":"222","volume":"40","author":"ZQ Tang","year":"2018","unstructured":"Tang ZQ, Zhou B, Dai XZ (2018) Underwater robot visual enhancements based on the improved DCP algorithm. Robot 40(2):222\u2013230","journal-title":"Robot"},{"key":"1931_CR16","doi-asserted-by":"crossref","unstructured":"Yang SD, Chen ZH, Feng ZP 2019 Underwater Image Enhancement Using Scene Depth-Based Adaptive Background Light Estimation and Dark Channel Prior Algorithms IEEE Access","DOI":"10.1109\/ACCESS.2019.2953463"},{"key":"1931_CR17","doi-asserted-by":"publisher","first-page":"20373","DOI":"10.1007\/s11042-020-08701-3","volume":"79","author":"H Yu","year":"2020","unstructured":"Yu H, Li X, Lou Q, Lei C, Liu Z (2020) Underwater image enhancement based on DCP and depth transmission map. Multimed Tools Appl 79:20373\u201320390","journal-title":"Multimed Tools Appl"},{"key":"1931_CR18","doi-asserted-by":"crossref","unstructured":"Jin WP, Guo JC, Qi Q (2019) Underwater image enhancement based on conditional generative adversarial network. Signal Processing: Image Communication 81","DOI":"10.1016\/j.image.2019.115723"},{"key":"1931_CR19","doi-asserted-by":"publisher","first-page":"94614","DOI":"10.1109\/ACCESS.2019.2928976","volume":"7","author":"P Liu","year":"2019","unstructured":"Liu P, Wang GY, Qi H et al (2019) Underwater image enhancement with a deep residual framework. IEEE Access 7:94614\u201394629","journal-title":"IEEE Access"},{"key":"1931_CR20","doi-asserted-by":"publisher","first-page":"105","DOI":"10.1016\/j.optlastec.2018.05.048","volume":"110","author":"JY Lu","year":"2019","unstructured":"Lu JY, Li N, Zhang AY et al (2019) Multi-scale adversarial network for underwater image restoration. Opt Laser Technol 110:105\u2013113","journal-title":"Opt Laser Technol"},{"key":"1931_CR21","doi-asserted-by":"crossref","unstructured":"Fu XY, Cao XY 2020 Underwater image enhancement with global\u2013local networks and compressed-histogram equalization. Signal Process: Image Commun 86","DOI":"10.1016\/j.image.2020.115892"},{"key":"1931_CR22","doi-asserted-by":"crossref","unstructured":"Li CY, Anwar S, Porikli F 2020 Underwater Scene Prior Inspired Deep Underwater Image and Video Enhancement. Pattern Recogn 98","DOI":"10.1016\/j.patcog.2019.107038"},{"key":"1931_CR23","unstructured":"Radford A Metz L, Chintala S (2016) Unsupervised representation learning with deep convolutional generative adversarial networks. The International Conference on Learning Representations 10667:97\u2013108"},{"key":"1931_CR24","doi-asserted-by":"crossref","unstructured":"Li J, Skinner K, Eustice R et al (2018) WaterGAN: unsupervised generative network to enable real-time color correction of monocular underwater images. IEEE Robotics Automation Letters\u00a03(1):387\u2013394","DOI":"10.1109\/LRA.2017.2730363"},{"key":"1931_CR25","unstructured":"Mirza M, Osindero S, 2014 Conditional generative adversarial nets. arXiv:1411.1784"},{"key":"1931_CR26","unstructured":"Chen X, Duan Y, Houthooft R et al (2016) InfoGAN: interpretable representation learning by information maximizing generative adversarial nets.\u00a0arXiv:1606.03657"},{"key":"1931_CR27","doi-asserted-by":"crossref","unstructured":"Li T, Qian RH, Chao D et al. 2018 BeautyGAN: instance-level facial makeup transfer with deep generative adversarial network. ACM Multimedia Conference","DOI":"10.1145\/3240508.3240618"},{"key":"1931_CR28","unstructured":"Junho K, Minjae K, Hyeonwoo K et al 2019 U-GAT-IT: unsupervised generative Attentional networks with adaptive layer-instance normalization for image-to-image translation. arXiv:1907.10830"},{"key":"1931_CR29","doi-asserted-by":"crossref","unstructured":"Huang X., Liu M.Y., Belongie S., et al., 2018. Multimodal unsupervised image-to-image translation. European Conference on Computer Vision, Multimodal Unsupervised Image-to-Image Translation","DOI":"10.1007\/978-3-030-01219-9_11"}],"container-title":["Applied Intelligence"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s10489-020-01931-w.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s10489-020-01931-w\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s10489-020-01931-w.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2021,10,20]],"date-time":"2021-10-20T23:46:44Z","timestamp":1634773604000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s10489-020-01931-w"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2020,10,21]]},"references-count":29,"journal-issue":{"issue":"4","published-print":{"date-parts":[[2021,4]]}},"alternative-id":["1931"],"URL":"https:\/\/doi.org\/10.1007\/s10489-020-01931-w","relation":{},"ISSN":["0924-669X","1573-7497"],"issn-type":[{"value":"0924-669X","type":"print"},{"value":"1573-7497","type":"electronic"}],"subject":[],"published":{"date-parts":[[2020,10,21]]},"assertion":[{"value":"5 September 2020","order":1,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"21 October 2020","order":2,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Compliance with ethical standards"}},{"value":"This research was funded by the National Key R&D Program of China (Grant No. 2018YFC0810500) and the Scientific, the Fundamental Research Funds for the Central Universities (Grant No. RF-TP-20-009A3) and Technological Innovation Foundation of Shunde Graduate School, USTB (Grant No. BK19BE003).The authors declare that they have no known competing financial interests or personal relationships that could have appeared to influence the work reported in this paper.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}}]}}