{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,30]],"date-time":"2026-01-30T07:09:56Z","timestamp":1769756996122,"version":"3.49.0"},"reference-count":37,"publisher":"Springer Science and Business Media LLC","issue":"1","license":[{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"funder":[{"name":"Jiangsu Petrochemical Process Key Equipment Digital Twin Technology Engineering Research Center Open Project","award":["DTEC202103"],"award-info":[{"award-number":["DTEC202103"]}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Earth Sci Inform"],"published-print":{"date-parts":[[2025,1]]},"DOI":"10.1007\/s12145-024-01679-8","type":"journal-article","created":{"date-parts":[[2025,1,3]],"date-time":"2025-01-03T00:23:18Z","timestamp":1735863798000},"update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":6,"title":["PAR-YOLO: a precise and real-time YOLO water surface garbage detection model"],"prefix":"10.1007","volume":"18","author":[{"given":"Ning","family":"Li","sequence":"first","affiliation":[]},{"given":"Mingliang","family":"Wang","sequence":"additional","affiliation":[]},{"given":"He","family":"Huang","sequence":"additional","affiliation":[]},{"given":"Bo","family":"Li","sequence":"additional","affiliation":[]},{"given":"Baohua","family":"Yuan","sequence":"additional","affiliation":[]},{"given":"Shoukun","family":"Xu","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2025,1,2]]},"reference":[{"key":"1679_CR1","unstructured":"Redmon J, Farhadi A (2018) Yolov3: An incremental improvement. arXiv preprint arXiv:1804.02767"},{"key":"1679_CR2","unstructured":"Bochkovskiy A, Wang CY, Liao HYM (2020) Yolov4: Optimal speed and accuracy of object detection. arXiv preprint arXiv:2004.10934"},{"key":"1679_CR3","unstructured":"Ge Z, Liu S, Wang F, Li Z, Sun J (2021) Yolox: Exceeding yolo series in 2021. arXiv preprint arXiv:2107.08430"},{"key":"1679_CR4","doi-asserted-by":"crossref","unstructured":"Liu W, Anguelov D, Erhan D, Szegedy C, Reed S, Fu CY, Berg AC (2016) Ssd: Single shot multibox detector. In: Computer Vision\u2013ECCV 2016: 14th European Conference, Amsterdam, The Netherlands, October 11\u201314, 2016, Proceedings, Part I 14, pp 21\u201337. Springer","DOI":"10.1007\/978-3-319-46448-0_2"},{"key":"1679_CR5","doi-asserted-by":"publisher","first-page":"303","DOI":"10.1007\/s11263-009-0275-4","volume":"88","author":"M Everingham","year":"2010","unstructured":"Everingham M, Van Gool L, Williams CK, Winn J, Zisserman A (2010) The pascal visual object classes (voc) challenge. Int J Comput Vis 88:303\u2013338","journal-title":"Int J Comput Vis"},{"key":"1679_CR6","doi-asserted-by":"crossref","unstructured":"Cai Z, Vasconcelos N (2018) Cascade r-cnn: Delving into high quality object detection. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp 6154\u20136162","DOI":"10.1109\/CVPR.2018.00644"},{"key":"1679_CR7","doi-asserted-by":"crossref","unstructured":"Zhang H, Chang H, Ma B, Wang N, Chen X (2020) Dynamic r-cnn: Towards high quality object detection via dynamic training. In: Computer Vision\u2013ECCV 2020: 16th European Conference, Glasgow, UK, August 23\u201328, 2020, Proceedings, Part XV 16, pp 260\u2013275. Springer","DOI":"10.1007\/978-3-030-58555-6_16"},{"key":"1679_CR8","unstructured":"Vaswani A, Shazeer N, Parmar N, Uszkoreit J, Jones L, Gomez AN, Kaiser L, Polosukhin I (2017) Attention is all you need. Adv Neural Inf Process Syst 30"},{"key":"1679_CR9","doi-asserted-by":"crossref","unstructured":"Carion N, Massa F, Synnaeve G, Usunier N, Kirillov A, Zagoruyko S (2020) End-to-end object detection with transformers. In: European Conference on Computer Vision, pp 213\u2013229. Springer","DOI":"10.1007\/978-3-030-58452-8_13"},{"key":"1679_CR10","unstructured":"Zhu X, Su W, Lu L, Li B, Wang X, Dai J (2020) Deformable detr: Deformable transformers for end-to-end object detection. arXiv preprint arXiv:2010.04159"},{"key":"1679_CR11","unstructured":"Zhang H, Li F, Liu S, Zhang L, Su H, Zhu J, Ni L.M, Shum HY (2022) Dino: Detr with improved denoising anchor boxes for end-to-end object detection. arXiv preprint arXiv:2203.03605"},{"key":"1679_CR12","first-page":"1","volume":"60","author":"L Wen","year":"2021","unstructured":"Wen L, Ding J, Xu Z (2021) Multiframe detection of sea-surface small target using deep convolutional neural network. IEEE Trans Geosci Remote Sens 60:1\u201316","journal-title":"IEEE Trans Geosci Remote Sens"},{"key":"1679_CR13","doi-asserted-by":"publisher","first-page":"81147","DOI":"10.1109\/ACCESS.2021.3085348","volume":"9","author":"L Zhang","year":"2021","unstructured":"Zhang L, Wei Y, Wang H, Shao Y, Shen J (2021) Real-time detection of river surface floating object based on improved refinedet. IEEE Access 9:81147\u201381160","journal-title":"IEEE Access"},{"key":"1679_CR14","doi-asserted-by":"crossref","unstructured":"Zhang S, Wen L, Bian X, Lei Z, Li SZ (2018) Single-shot refinement neural network for object detection. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp 4203\u20134212","DOI":"10.1109\/CVPR.2018.00442"},{"key":"1679_CR15","doi-asserted-by":"crossref","unstructured":"Zhang H, Wang Y, Dayoub F, Sunderhauf N (2021) Varifocalnet: An iou-aware dense object detector. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp 8514\u20138523","DOI":"10.1109\/CVPR46437.2021.00841"},{"issue":"22","key":"1679_CR16","doi-asserted-by":"publisher","first-page":"4366","DOI":"10.3390\/math10224366","volume":"10","author":"X Yang","year":"2022","unstructured":"Yang X, Zhao J, Zhao L, Zhang H, Li L, Ji Z, Ganchev I (2022) Detection of river floating garbage based on improved yolov5. Math 10(22):4366","journal-title":"Math"},{"issue":"1","key":"1679_CR17","first-page":"154","volume":"3","author":"N Yi","year":"2023","unstructured":"Yi N, Luo W (2023) Research on water garbage detection algorithm based on gfl network. Front Comput Intel Syst 3(1):154\u2013157","journal-title":"Front Comput Intel Syst"},{"key":"1679_CR18","doi-asserted-by":"crossref","unstructured":"Pang Y, Qin B (2021) Gdt-net: A model based on deep learning for water surface garbage detection. In: Proceedings of the 2021 5th International Conference on Deep Learning Technologies, pp 1\u20137","DOI":"10.1145\/3480001.3480014"},{"key":"1679_CR19","doi-asserted-by":"crossref","unstructured":"Cheng Y, Zhu J, Jiang M, Fu J, Pang C, Wang P, Sankaran K, Onabola O, Liu Y, Liu D et\u00a0al (2021) Flow: A dataset and benchmark for floating waste detection in inland waters. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp 10953\u201310962","DOI":"10.1109\/ICCV48922.2021.01077"},{"key":"1679_CR20","doi-asserted-by":"crossref","unstructured":"Ma L, Wu B, Deng J, Lian J (2023) Small-target water-floating garbage detection and recognition based on unet-yolov5s. In: 2023 5th International Conference on Communications, Information System and Computer Engineering (CISCE), pp 391\u2013395. IEEE","DOI":"10.1109\/CISCE58541.2023.10142409"},{"key":"1679_CR21","doi-asserted-by":"crossref","unstructured":"Ronneberger O, Fischer P, Brox T (2015) U-net: Convolutional networks for biomedical image segmentation. In: Medical Image Computing and Computer-Assisted Intervention\u2013MICCAI 2015: 18th International Conference, Munich, Germany, October 5-9, 2015, Proceedings, Part III 18, pp 234\u2013241. Springer","DOI":"10.1007\/978-3-319-24574-4_28"},{"key":"1679_CR22","unstructured":"Howard AG, Zhu M, Chen B, Kalenichenko D, Wang W, Weyand T, Andreetto M, Adam H (2017) Mobilenets: Efficient convolutional neural networks for mobile vision applications. arXiv preprint arXiv:1704.04861"},{"key":"1679_CR23","doi-asserted-by":"crossref","unstructured":"Sandler M, Howard A, Zhu M, Zhmoginov A, Chen LC (2018) Mobilenetv2: Inverted residuals and linear bottlenecks. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp 4510\u20134520","DOI":"10.1109\/CVPR.2018.00474"},{"key":"1679_CR24","doi-asserted-by":"crossref","unstructured":"Howard A, Sandler M, Chu G, Chen LC, Chen B, Tan M, Wang W, Zhu Y, Pang R, Vasudevan V et\u00a0al (2019) Searching for mobilenetv3. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp 1314\u20131324","DOI":"10.1109\/ICCV.2019.00140"},{"key":"1679_CR25","doi-asserted-by":"crossref","unstructured":"Zhang X, Zhou X, Lin M, Sun J (2018) Shufflenet: An extremely efficient convolutional neural network for mobile devices. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp 6848\u20136856","DOI":"10.1109\/CVPR.2018.00716"},{"key":"1679_CR26","doi-asserted-by":"crossref","unstructured":"Ma N, Zhang X, Zheng HT, Sun J (2018) Shufflenet v2: Practical guidelines for efficient cnn architecture design. In: Proceedings of the European Conference on Computer Vision (ECCV), pp 116\u2013131","DOI":"10.1007\/978-3-030-01264-9_8"},{"key":"1679_CR27","first-page":"1","volume":"60","author":"L Wen","year":"2021","unstructured":"Wen L, Ding J, Xu Z (2021) Multiframe detection of sea-surface small target using deep convolutional neural network. IEEE Trans Geosci Remote Sens 60:1\u201316","journal-title":"IEEE Trans Geosci Remote Sens"},{"key":"1679_CR28","doi-asserted-by":"crossref","unstructured":"Han K, Wang Y, Tian Q, Guo J, Xu C, Xu C (2020) Ghostnet: More features from cheap operations. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp 1580\u20131589","DOI":"10.1109\/CVPR42600.2020.00165"},{"issue":"22","key":"1679_CR29","doi-asserted-by":"publisher","first-page":"4366","DOI":"10.3390\/math10224366","volume":"10","author":"X Yang","year":"2022","unstructured":"Yang X, Zhao J, Zhao L, Zhang H, Li L, Ji Z, Ganchev I (2022) Detection of river floating garbage based on improved yolov5. Math 10(22):4366","journal-title":"Math"},{"issue":"1","key":"1679_CR30","first-page":"154","volume":"3","author":"N Yi","year":"2023","unstructured":"Yi N, Luo W (2023) Research on water garbage detection algorithm based on gfl network. Front Comput Intel Syst 3(1):154\u2013157","journal-title":"Front Comput Intel Syst"},{"key":"1679_CR31","doi-asserted-by":"publisher","first-page":"81147","DOI":"10.1109\/ACCESS.2021.3085348","volume":"9","author":"L Zhang","year":"2021","unstructured":"Zhang L, Wei Y, Wang H, Shao Y, Shen J (2021) Real-time detection of river surface floating object based on improved refinedet. IEEE Access 9:81147\u201381160","journal-title":"IEEE Access"},{"key":"1679_CR32","doi-asserted-by":"crossref","unstructured":"Hou Q, Zhou D, Feng J (2021) Coordinate attention for efficient mobile network design. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp 13713\u201313722","DOI":"10.1109\/CVPR46437.2021.01350"},{"key":"1679_CR33","doi-asserted-by":"crossref","unstructured":"Misra D, Nalamada T, Arasanipalai AU, Hou Q (2021) Rotate to attend: Convolutional triplet attention module. In: Proceedings of the IEEE\/CVF Winter Conference on Applications of Computer Vision, pp 3139\u20133148","DOI":"10.1109\/WACV48630.2021.00318"},{"key":"1679_CR34","doi-asserted-by":"crossref","unstructured":"Lin TY, Goyal P, Girshick R, He K, Doll\u00e1r P (2017) Focal loss for dense object detection. In: Proceedings of the IEEE International Conference on Computer Vision, pp 2980\u20132988","DOI":"10.1109\/ICCV.2017.324"},{"key":"1679_CR35","doi-asserted-by":"crossref","unstructured":"Lin TY, Maire M, Belongie S, Hays J, Perona P, Ramanan D, Doll\u00e1r P, Zitnick CL (2014) Microsoft coco: Common objects in context. In: Computer Vision\u2013ECCV 2014: 13th European Conference, Zurich, Switzerland, September 6-12, 2014, Proceedings, Part V 13, pp 740\u2013755. Springer","DOI":"10.1007\/978-3-319-10602-1_48"},{"key":"1679_CR36","doi-asserted-by":"publisher","first-page":"303","DOI":"10.1007\/s11263-009-0275-4","volume":"88","author":"M Everingham","year":"2010","unstructured":"Everingham M, Van Gool L, Williams CK, Winn J, Zisserman A (2010) The pascal visual object classes (voc) challenge. Int J Comput Vis 88:303\u2013338","journal-title":"Int J Comput Vis"},{"key":"1679_CR37","doi-asserted-by":"publisher","unstructured":"Ultralytics (2020) ultralytics\/yolov5. https:\/\/github.com\/ultralytics\/yolov5.com, https:\/\/doi.org\/10.5281\/zenodo.7347926. Accessed: 26 Jun 2020","DOI":"10.5281\/zenodo.7347926"}],"container-title":["Earth Science Informatics"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s12145-024-01679-8.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s12145-024-01679-8\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s12145-024-01679-8.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,4,26]],"date-time":"2025-04-26T08:05:47Z","timestamp":1745654747000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s12145-024-01679-8"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,1]]},"references-count":37,"journal-issue":{"issue":"1","published-print":{"date-parts":[[2025,1]]}},"alternative-id":["1679"],"URL":"https:\/\/doi.org\/10.1007\/s12145-024-01679-8","relation":{},"ISSN":["1865-0473","1865-0481"],"issn-type":[{"value":"1865-0473","type":"print"},{"value":"1865-0481","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025,1]]},"assertion":[{"value":"13 September 2024","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"25 December 2024","order":2,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"2 January 2025","order":3,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"All authors of this research paper declare that they have no conflict of interest.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflicts of Interest"}},{"value":"The authors declare no competing interests.","order":3,"name":"Ethics","group":{"name":"EthicsHeading","label":"Competing interests"}}],"article-number":"135"}}