{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,5,5]],"date-time":"2026-05-05T13:22:04Z","timestamp":1777987324305,"version":"3.51.4"},"reference-count":51,"publisher":"Elsevier BV","license":[{"start":{"date-parts":[[2026,8,1]],"date-time":"2026-08-01T00:00:00Z","timestamp":1785542400000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.elsevier.com\/tdm\/userlicense\/1.0\/"},{"start":{"date-parts":[[2026,8,1]],"date-time":"2026-08-01T00:00:00Z","timestamp":1785542400000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.elsevier.com\/legal\/tdmrep-license"},{"start":{"date-parts":[[2026,8,1]],"date-time":"2026-08-01T00:00:00Z","timestamp":1785542400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-017"},{"start":{"date-parts":[[2026,8,1]],"date-time":"2026-08-01T00:00:00Z","timestamp":1785542400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"},{"start":{"date-parts":[[2026,8,1]],"date-time":"2026-08-01T00:00:00Z","timestamp":1785542400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-012"},{"start":{"date-parts":[[2026,8,1]],"date-time":"2026-08-01T00:00:00Z","timestamp":1785542400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2026,8,1]],"date-time":"2026-08-01T00:00:00Z","timestamp":1785542400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-004"}],"funder":[{"DOI":"10.13039\/501100012166","name":"National Key Research and Development Program of China","doi-asserted-by":"publisher","award":["2022YFD2001704"],"award-info":[{"award-number":["2022YFD2001704"]}],"id":[{"id":"10.13039\/501100012166","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["elsevier.com","sciencedirect.com"],"crossmark-restriction":true},"short-container-title":["Expert Systems with Applications"],"published-print":{"date-parts":[[2026,8]]},"DOI":"10.1016\/j.eswa.2026.132175","type":"journal-article","created":{"date-parts":[[2026,3,27]],"date-time":"2026-03-27T08:15:58Z","timestamp":1774599358000},"page":"132175","update-policy":"https:\/\/doi.org\/10.1016\/elsevier_cm_policy","source":"Crossref","is-referenced-by-count":0,"special_numbering":"C","title":["SAM2-WaveUNet: A frequency-enhanced segmentation network for fine-grained marine organism delineation"],"prefix":"10.1016","volume":"322","author":[{"given":"Shuzhou","family":"Lv","sequence":"first","affiliation":[]},{"given":"Shubin","family":"Zhang","sequence":"additional","affiliation":[]},{"given":"Xiaoshuang","family":"Huang","sequence":"additional","affiliation":[]},{"given":"Dong","family":"An","sequence":"additional","affiliation":[]},{"given":"Jincun","family":"Liu","sequence":"additional","affiliation":[]},{"given":"Yan","family":"Meng","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-4888-8558","authenticated-orcid":false,"given":"Yaoguang","family":"Wei","sequence":"additional","affiliation":[]}],"member":"78","reference":[{"key":"10.1016\/j.eswa.2026.132175_bib0001","unstructured":"Agarap, A. F. (2019). Deep learning using rectified linear units (reLU). 1803.08375."},{"key":"10.1016\/j.eswa.2026.132175_bib0002","unstructured":"Ball\u00e9, J., Laparra, V., & Simoncelli, E. P. (2016). Density modeling of images using a generalized normalization transformation. 1511.06281."},{"key":"10.1016\/j.eswa.2026.132175_sbref0003","series-title":"TransUNet: Transformers make strong encoders for medical image segmentation","author":"Chen","year":"2021"},{"key":"10.1016\/j.eswa.2026.132175_bib0004","series-title":"ICASSP 2022 - 2022 IEEE international conference on acoustics, speech and signal processing (ICASSP)","first-page":"2629","article-title":"A robust object segmentation network for underwater scenes","author":"Chen","year":"2022"},{"key":"10.1016\/j.eswa.2026.132175_bib0005","doi-asserted-by":"crossref","unstructured":"Chen, T., Zhu, L., Ding, C., Cao, R., Wang, Y., Li, Z., Sun, L., Mao, P., & Zang, Y. (2023). Sam fails to segment anything? \u2013 Sam-adapter: Adapting sam in underperformed scenes: Camouflage, shadow, medical image segmentation, and more. 2304.09148.","DOI":"10.1109\/ICCVW60793.2023.00361"},{"issue":"9","key":"10.1016\/j.eswa.2026.132175_bib0006","doi-asserted-by":"crossref","DOI":"10.3390\/s25092642","article-title":"SECrackSeg: A high-accuracy crack segmentation network based on proposed unet with sam2 s-adapter and edge-aware attention","volume":"25","author":"Chen","year":"2025","journal-title":"Sensors"},{"issue":"11","key":"10.1016\/j.eswa.2026.132175_bib0007","doi-asserted-by":"crossref","first-page":"6595","DOI":"10.1109\/TCSVT.2023.3264442","article-title":"Bidirectional collaborative mentoring network for marine organism detection and beyond","volume":"33","author":"Cheng","year":"2023","journal-title":"IEEE Transactions on Circuits and Systems for Video Technology"},{"key":"10.1016\/j.eswa.2026.132175_bib0008","unstructured":"Dosovitskiy, A., Beyer, L., Kolesnikov, A., Weissenborn, D., Zhai, X., Unterthiner, T., Dehghani, M., Minderer, M., Heigold, G., Gelly, S., Uszkoreit, J., & Houlsby, N. (2021). An image is worth 16x16 words: Transformers for image recognition at scale. 2010.11929."},{"key":"10.1016\/j.eswa.2026.132175_bib0009","doi-asserted-by":"crossref","DOI":"10.1186\/s13173-021-00117-7","article-title":"Underwater image segmentation in the wild using deep learning","volume":"27","author":"Drews-Jr","year":"2021","journal-title":"Journal of the Brazilian Computer Society"},{"key":"10.1016\/j.eswa.2026.132175_bib0010","series-title":"2020 IEEE\/CVF conference on computer vision and pattern recognition (CVPR)","first-page":"2774","article-title":"Camouflaged object detection","author":"Fan","year":"2020"},{"key":"10.1016\/j.eswa.2026.132175_bib0011","doi-asserted-by":"crossref","unstructured":"Fan, D.-P., Ji, G.-P., Zhou, T., Chen, G., Fu, H., Shen, J., & Shao, L. (2020b). PraNet: Parallel reverse attention network for polyp segmentation. 2006.11392.","DOI":"10.1007\/978-3-030-59725-2_26"},{"key":"10.1016\/j.eswa.2026.132175_bib0012","article-title":"TSCA-Net: Transformer based spatial-channel attention segmentation network for medical images","volume":"170","author":"Fu","year":"2024","journal-title":"Computers in Biology and Medicine"},{"issue":"3","key":"10.1016\/j.eswa.2026.132175_bib0013","doi-asserted-by":"crossref","first-page":"1104","DOI":"10.1109\/JOE.2023.3252760","article-title":"MASNet: A robust deep marine animal segmentation network","volume":"49","author":"Fu","year":"2024","journal-title":"IEEE Journal of Oceanic Engineering"},{"issue":"9","key":"10.1016\/j.eswa.2026.132175_bib0014","doi-asserted-by":"crossref","first-page":"2763","DOI":"10.1109\/TMI.2023.3264513","article-title":"H2Former: An efficient hierarchical hybrid transformer for medical image segmentation","volume":"42","author":"He","year":"2023","journal-title":"IEEE Transactions on Medical Imaging"},{"issue":"2","key":"10.1016\/j.eswa.2026.132175_bib0015","doi-asserted-by":"crossref","first-page":"386","DOI":"10.1109\/TPAMI.2018.2844175","article-title":"Mask r-CNN","volume":"42","author":"He","year":"2020","journal-title":"IEEE Transactions on Pattern Analysis and Machine Intelligence"},{"key":"10.1016\/j.eswa.2026.132175_bib0016","series-title":"2016 IEEE conference on computer vision and pattern recognition (CVPR)","first-page":"770","article-title":"Deep residual learning for image recognition","author":"He","year":"2016"},{"key":"10.1016\/j.eswa.2026.132175_bib0017","series-title":"2018 IEEE\/CVF conference on computer vision and pattern recognition","first-page":"7132","article-title":"Squeeze-and-excitation networks","author":"Hu","year":"2018"},{"key":"10.1016\/j.eswa.2026.132175_bib0018","series-title":"2017 IEEE conference on computer vision and pattern recognition (CVPR)","first-page":"2261","article-title":"Densely connected convolutional networks","author":"Huang","year":"2017"},{"key":"10.1016\/j.eswa.2026.132175_bib0019","unstructured":"Islam, M. J., Luo, P., & Sattar, J. (2020). Simultaneous enhancement and super-resolution of underwater imagery for improved visual perception. 2002.01155."},{"issue":"1","key":"10.1016\/j.eswa.2026.132175_bib0020","doi-asserted-by":"crossref","DOI":"10.1080\/17538947.2025.2496790","article-title":"Semantic enhancement and change consistency network for semantic change detection in remote sensing images","volume":"18","author":"Jiang","year":"2025","journal-title":"International Journal of Digital Earth"},{"key":"10.1016\/j.eswa.2026.132175_bib0021","doi-asserted-by":"crossref","unstructured":"Kirillov, A., Mintun, E., Ravi, N., Mao, H., Rolland, C., Gustafson, L., Xiao, T., Whitehead, S., Berg, A. C., Lo, W.-Y., Doll\u00e1r, P., & Girshick, R. (2023). Segment anything. 2304.02643.","DOI":"10.1109\/ICCV51070.2023.00371"},{"key":"10.1016\/j.eswa.2026.132175_bib0022","unstructured":"Kong, L., Dong, J., Yang, M.-H., & Pan, J. (2024). Efficient visual state space model for image deblurring. 2405.14343."},{"key":"10.1016\/j.eswa.2026.132175_bib0023","series-title":"Biometric recognition","first-page":"180","article-title":"Detect any deepfakes: Segment anything meets face forgery detection and localization","author":"Lai","year":"2023"},{"issue":"4","key":"10.1016\/j.eswa.2026.132175_bib0024","doi-asserted-by":"crossref","first-page":"2303","DOI":"10.1109\/TCSVT.2021.3093890","article-title":"Marine animal segmentation","volume":"32","author":"Li","year":"2022","journal-title":"IEEE Transactions on Circuits and Systems for Video Technology"},{"key":"10.1016\/j.eswa.2026.132175_bib0025","series-title":"Benchmarking, measuring, and optimizing","first-page":"194","article-title":"Mas3k: An open dataset for marine animal segmentation","author":"Li","year":"2021"},{"key":"10.1016\/j.eswa.2026.132175_bib0026","first-page":"1","article-title":"Lightweight remote sensing change detection with progressive feature aggregation and supervised attention","volume":"61","author":"Li","year":"2023","journal-title":"IEEE Transactions on Geoscience and Remote Sensing"},{"key":"10.1016\/j.eswa.2026.132175_bib0027","series-title":"2022 ieee\/cvf winter conference on applications of computer vision (wacv)","first-page":"2613","article-title":"Modeling aleatoric uncertainty for camouflaged object detection","author":"Liu","year":"2022"},{"key":"10.1016\/j.eswa.2026.132175_bib0028","doi-asserted-by":"crossref","DOI":"10.1016\/j.eswa.2025.126912","article-title":"Seamless detection: Unifying salient object detection and camouflaged object detection","volume":"274","author":"Liu","year":"2025","journal-title":"Expert Systems with Applications"},{"issue":"7","key":"10.1016\/j.eswa.2026.132175_bib0029","doi-asserted-by":"crossref","first-page":"6633","DOI":"10.1109\/TITS.2023.3342811","article-title":"TCGNet: Type-correlation guidance for salient object detection","volume":"25","author":"Liu","year":"2024","journal-title":"IEEE Transactions on Intelligent Transportation Systems"},{"key":"10.1016\/j.eswa.2026.132175_bib0030","unstructured":"Loshchilov, I., & Hutter, F. (2019). Decoupled weight decay regularization. 1711.05101."},{"issue":"17","key":"10.1016\/j.eswa.2026.132175_bib0031","doi-asserted-by":"crossref","first-page":"33387","DOI":"10.1109\/JSEN.2025.3593256","article-title":"BG-UISNET: A novel boundary-guided network for robust and precise underwater image segmentation","volume":"25","author":"Lu","year":"2025","journal-title":"IEEE Sensors Journal"},{"key":"10.1016\/j.eswa.2026.132175_bib0032","series-title":"2021 IEEE\/CVF conference on computer vision and pattern recognition","first-page":"11586","article-title":"Simultaneously localize, segment and rank the camouflaged objects","author":"Lv","year":"2021"},{"key":"10.1016\/j.eswa.2026.132175_bib0033","series-title":"Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR)","first-page":"8772","article-title":"Camouflaged object segmentation with distraction mining","author":"Mei","year":"2021"},{"key":"10.1016\/j.eswa.2026.132175_bib0034","doi-asserted-by":"crossref","unstructured":"Pang, Y., Zhao, X., Xiang, T.-Z., Zhang, L., & Lu, H. (2022). Zoom in and out: A mixed-scale triplet network for camouflaged object detection. arXiv: 2203.02688.","DOI":"10.1109\/CVPR52688.2022.00220"},{"key":"10.1016\/j.eswa.2026.132175_bib0035","doi-asserted-by":"crossref","DOI":"10.1016\/j.patcog.2020.107404","article-title":"U2-Net: Going deeper with nested U-structure for salient object detection","volume":"106","author":"Qin","year":"2020","journal-title":"Pattern Recognition"},{"key":"10.1016\/j.eswa.2026.132175_bib0036","series-title":"IEEE\/CVF conference on computer vision and pattern recognition (cvpr)","first-page":"7471","article-title":"BASNet: Boundary-aware salient object detection","author":"Qin","year":"2019"},{"key":"10.1016\/j.eswa.2026.132175_bib0037","unstructured":"Ravi, N., Gabeur, V., Hu, Y.-T., Hu, R., Ryali, C., Ma, T., Khedr, H., R\u00e4dle, R., Rolland, C., Gustafson, L., Mintun, E., Pan, J., Alwala, K. V., Carion, N., Wu, C.-Y., Girshick, R., Doll\u00e1r, P., & Feichtenhofer, C. (2024). Sam 2: Segment anything in images and videos. 2408.00714."},{"key":"10.1016\/j.eswa.2026.132175_bib0038","series-title":"Medical image computing and computer-assisted intervention \u2013 MICCAI 2015","first-page":"234","article-title":"U-net: Convolutional networks for biomedical image segmentation","author":"Ronneberger","year":"2015"},{"key":"10.1016\/j.eswa.2026.132175_bib0039","series-title":"2017 IEEE international conference on computer vision (ICCV)","first-page":"618","article-title":"Grad-CAM: Visual explanations from deep networks via gradient-based localization","author":"Selvaraju","year":"2017"},{"key":"10.1016\/j.eswa.2026.132175_sbref0040","article-title":"Context-aware cross-level fusion network for camouflaged object detection","author":"Sun","year":"2021","journal-title":"CoRR"},{"key":"10.1016\/j.eswa.2026.132175_bib0041","series-title":"IEEE\/CVF international conference on computer vision (ICCV)","first-page":"7263","article-title":"Stacked cross refinement network for edge-aware salient object detection","author":"Wu","year":"2019"},{"key":"10.1016\/j.eswa.2026.132175_bib0042","series-title":"Computer vision \u2013 ECCV 2024","first-page":"90","article-title":"I-medSAM: Implicit medical image segmentation with segment anything","author":"Wei","year":"2025"},{"key":"10.1016\/j.eswa.2026.132175_bib0043","doi-asserted-by":"crossref","DOI":"10.1016\/j.media.2025.103547","article-title":"Medical sam adapter: Adapting segment anything model for medical image segmentation","volume":"102","author":"Wu","year":"2025","journal-title":"Medical Image Analysis"},{"key":"10.1016\/j.eswa.2026.132175_bib0044","unstructured":"Xiong, X., Wu, Z., Tan, S., Li, W., Tang, F., Chen, Y., Li, S., Ma, J., & Li, G. (2024). Sam2-unet: Segment anything 2 makes strong encoder for natural and medical image segmentation. arXiv: 2408.08870."},{"key":"10.1016\/j.eswa.2026.132175_bib0045","series-title":"Proceedings of the thirty-third international joint conference on artificial intelligence, IJCAI-24","first-page":"6886","article-title":"MAS-SAM: Segment any marine animal with aggregated features","author":"Yan","year":"2024"},{"key":"10.1016\/j.eswa.2026.132175_bib0046","series-title":"2024 IEEE\/CVF conference on computer vision and pattern recognition (CVPR)","first-page":"2578","article-title":"Fantastic animals and where to find them: Segment any marine animal with dual SAM","author":"Zhang","year":"2024"},{"key":"10.1016\/j.eswa.2026.132175_bib0047","series-title":"Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition (CVPR)","first-page":"2578","article-title":"Fantastic animals and where to find them: Segment any marine animal with dual SAM","author":"Zhang","year":"2024"},{"key":"10.1016\/j.eswa.2026.132175_bib0048","unstructured":"Zhao, Q., Zhang, X., Tang, H., Gu, C., & Zhu, S. (2023). Enlighten anything: When segment anything model meets low-light image enhancement. 2306.10286."},{"key":"10.1016\/j.eswa.2026.132175_bib0049","series-title":"IEEE\/CVF conference on computer vision and pattern recognition (CVPR)","first-page":"3080","article-title":"Pyramid feature attention network for saliency detection","author":"Zhao","year":"2019"},{"key":"10.1016\/j.eswa.2026.132175_bib0050","series-title":"2021 IEEE\/CVF conference on computer vision and pattern recognition (CVPR)","first-page":"6877","article-title":"Rethinking semantic segmentation from a sequence-to-sequence perspective with transformers","author":"Zheng","year":"2021"},{"key":"10.1016\/j.eswa.2026.132175_bib0051","first-page":"3","article-title":"UNet++: A nested 540 U-net architecture for medical image segmentation","author":"Zhou","year":"2018"}],"container-title":["Expert Systems with Applications"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/api.elsevier.com\/content\/article\/PII:S0957417426010882?httpAccept=text\/xml","content-type":"text\/xml","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/api.elsevier.com\/content\/article\/PII:S0957417426010882?httpAccept=text\/plain","content-type":"text\/plain","content-version":"vor","intended-application":"text-mining"}],"deposited":{"date-parts":[[2026,5,5]],"date-time":"2026-05-05T12:32:24Z","timestamp":1777984344000},"score":1,"resource":{"primary":{"URL":"https:\/\/linkinghub.elsevier.com\/retrieve\/pii\/S0957417426010882"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2026,8]]},"references-count":51,"alternative-id":["S0957417426010882"],"URL":"https:\/\/doi.org\/10.1016\/j.eswa.2026.132175","relation":{},"ISSN":["0957-4174"],"issn-type":[{"value":"0957-4174","type":"print"}],"subject":[],"published":{"date-parts":[[2026,8]]},"assertion":[{"value":"Elsevier","name":"publisher","label":"This article is maintained by"},{"value":"SAM2-WaveUNet: A frequency-enhanced segmentation network for fine-grained marine organism delineation","name":"articletitle","label":"Article Title"},{"value":"Expert Systems with Applications","name":"journaltitle","label":"Journal Title"},{"value":"https:\/\/doi.org\/10.1016\/j.eswa.2026.132175","name":"articlelink","label":"CrossRef DOI link to publisher maintained version"},{"value":"article","name":"content_type","label":"Content Type"},{"value":"\u00a9 2026 Elsevier Ltd. All rights are reserved, including those for text and data mining, AI training, and similar technologies.","name":"copyright","label":"Copyright"}],"article-number":"132175"}}