{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,23]],"date-time":"2026-02-23T14:10:15Z","timestamp":1771855815095,"version":"3.50.1"},"reference-count":50,"publisher":"Springer Science and Business Media LLC","issue":"2","license":[{"start":{"date-parts":[[2025,4,1]],"date-time":"2025-04-01T00:00:00Z","timestamp":1743465600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2025,4,1]],"date-time":"2025-04-01T00:00:00Z","timestamp":1743465600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["J Real-Time Image Proc"],"published-print":{"date-parts":[[2025,4]]},"DOI":"10.1007\/s11554-025-01667-1","type":"journal-article","created":{"date-parts":[[2025,4,3]],"date-time":"2025-04-03T19:19:45Z","timestamp":1743707985000},"update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":1,"title":["L-DEYO: An optimized lightweight model for intelligent coal gangue recognition"],"prefix":"10.1007","volume":"22","author":[{"given":"Sitong","family":"Yan","sequence":"first","affiliation":[]},{"given":"Wei","family":"Liu","sequence":"additional","affiliation":[]},{"given":"Ziyi","family":"Yang","sequence":"additional","affiliation":[]},{"given":"Enqi","family":"Zhang","sequence":"additional","affiliation":[]},{"given":"Yasheng","family":"Chang","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2025,4,1]]},"reference":[{"key":"1667_CR1","doi-asserted-by":"publisher","first-page":"112","DOI":"10.1016\/j.minpro.2011.11.004","volume":"102","author":"M Yang","year":"2012","unstructured":"Yang, M., Guo, Z., Deng, Y., Xing, X., Qiu, K., Long, J., Li, J.: Preparation of cao\u2013al2o3\u2013sio2 glass ceramics from coal gangue. Int. J. Mineral Pro-cessing 102, 112\u2013115 (2012)","journal-title":"Int. J. Mineral Pro-cessing"},{"issue":"6","key":"1667_CR2","doi-asserted-by":"publisher","first-page":"14091","DOI":"10.1007\/s11356-022-24866-w","volume":"30","author":"B Xue","year":"2023","unstructured":"Xue, B., Zhang, Y., Li, J., Wang, Y.: A review of coal gangue identification research\u2014application to china\u2019s top coal release process. Env-iron. Sci. Pollution Res 30(6), 14091\u201314103 (2023)","journal-title":"Env-iron. Sci. Pollution Res"},{"issue":"12","key":"1667_CR3","first-page":"20","volume":"21","author":"X Qi","year":"2002","unstructured":"Qi, X., Zhang, Y.: The automatic sorting machine of gangue operated by fuzzy mode reco-gnition and fuzzy control. Process Autom. Instrum. 21(12), 20\u201322 (2002)","journal-title":"Process Autom. Instrum."},{"issue":"10","key":"1667_CR4","first-page":"3636","volume":"45","author":"M Li","year":"2020","unstructured":"Li, M., Duan, Y., Cao, X., Liu, C., Sun, K., Liu, H.: Image identification method and system for coal and gangue sorting robot. J. China Coal Soc. 45(10), 3636\u20133644 (2020)","journal-title":"J. China Coal Soc."},{"issue":"169697\u2013169704","key":"1667_CR5","first-page":"2","volume":"7","author":"F Hu","year":"2019","unstructured":"Hu, F., Zhou, M., Yan, P., Bian, K., Dai, R.: Mul-tispectral imaging: A new solution for identification of coal and gangue. IEEE Access 7(169697\u2013169704), 2 (2019)","journal-title":"IEEE Access"},{"key":"1667_CR6","doi-asserted-by":"crossref","unstructured":"L. Su, X. Cao, H. Ma, and Y. Li. Research on coal gangue identification by using convolutional neural network. In 2018 2nd IEEE Advanced Information Management, Communications, Electronic and Au-tomation Control Conference (IMCEC), pages 810\u2013814. IEEE, 2018. 2","DOI":"10.1109\/IMCEC.2018.8469674"},{"key":"1667_CR7","doi-asserted-by":"crossref","unstructured":"K. He, X. Zhang, S. Ren, and J. Sun. Deep residual learning for image recognition. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 770\u2013778, 2016. 2","DOI":"10.1109\/CVPR.2016.90"},{"key":"1667_CR8","unstructured":"X. Zhu, W. Su, L. Lu, B. Li, X. Wang, and J. Dai. Deformable detr: Deformable transformers for end-to-end object detection. In Proceedings of the Inter-national Conferenceon Learning Representations (ICLR), 2021. 2"},{"issue":"50\u201361","key":"1667_CR9","first-page":"2","volume":"25","author":"L Wang","year":"2021","unstructured":"Wang, L., et al.: Eapt: Efficient attention pyramid tran-sformer for image processing. IEEE Trans. Multimedia 25(50\u201361), 2 (2021)","journal-title":"IEEE Trans. Multimedia"},{"key":"1667_CR10","doi-asserted-by":"crossref","unstructured":"J. Redmon, S. Divvala, R. Girshick, and A. Farhadi. You only look once: Unified real-time object detec-tion. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 779\u2013788, 2016. 2,3,4","DOI":"10.1109\/CVPR.2016.91"},{"issue":"77599\u201377610","key":"1667_CR11","first-page":"2","volume":"9","author":"Q Liu","year":"2021","unstructured":"Liu, Q., Li, J., Li, Y., Gao, M.: Recognition met-hods for coal and coal gangue based on deep learning. IEEE Access 9(77599\u201377610), 2 (2021)","journal-title":"IEEE Access"},{"key":"1667_CR12","unstructured":"Alexey Bochkovskiy, Chien-Yao Wang, and Hong-Yuan Mark Liao. Yolov4: Optimal speed and accuracy of object detection. arXiv preprint arX-iv:2004.10934, 2020. 2, 3"},{"key":"1667_CR13","doi-asserted-by":"crossref","unstructured":"T. Y. Lin, P. Goyal, R. Girshick, K. He, and P. Dollar. Focal loss for dense object detection. In Proceedings of the IEEE International Conference on Computer Vision (ICCV), pages 2980\u20132988, 2017. 2,3","DOI":"10.1109\/ICCV.2017.324"},{"issue":"8","key":"1667_CR14","first-page":"2345","volume":"13","author":"Y Liu","year":"2024","unstructured":"Liu, Y., Wang, M.: Research on lightweight al-gorithm for gangue detection based on improved yo-lov5. Scientific. Rep. 13(8), 2345\u20132356 (2024)","journal-title":"Scientific. Rep."},{"issue":"5","key":"1667_CR15","first-page":"567","volume":"18","author":"W Zhang","year":"2024","unstructured":"Zhang, W., Chen, Li.: The real-time detection method for coal gangue based on yolov8s-gsc. J. Real-Time. Image. Processing. 18(5), 567\u2013576 (2024)","journal-title":"J. Real-Time. Image. Processing."},{"issue":"8","key":"1667_CR16","first-page":"4499","volume":"34","author":"J Zhang","year":"2021","unstructured":"Zhang, J., et al.: Bagfn: Broad attention graph fusion network for high-order feature interactions. IEEE. Transact. Neural. Networks. Learning. Sy-st. 34(8), 4499\u20134513 (2021)","journal-title":"IEEE. Transact. Neural. Networks. Learning. Sy-st."},{"key":"1667_CR17","first-page":"213","volume-title":"European Conferenceon Computer Vision","author":"N Carion","year":"2020","unstructured":"Carion, N., Massa, F., Synnaeve, G., Usunier, N., Kirillov, A., Za-goruyko, S.: End-to- end object detection with transf-ormers. In: Vedaldi, A., Bischof, H., Brox, T., Frahm, J.-M. (eds.) European Conferenceon Computer Vision, pp. 213\u2013229. Springer, Cham (2020)"},{"key":"1667_CR18","first-page":"2","volume":"30","author":"A Vaswani","year":"2017","unstructured":"Vaswani, A., Shazeer, N., Parmar, N., Uszkoreit, J., Jones, L., Gomez, A.N., Kaiser, L., Polosukhin, I.: Attention is all you need. In Proceed. Ad-vances Neural Inform. Processing Syst. (NeurIPS) 30, 2 (2017)","journal-title":"In Proceed. Ad-vances Neural Inform. Processing Syst. (NeurIPS)"},{"key":"1667_CR19","doi-asserted-by":"crossref","unstructured":"Z. Liu, Y. Lin Y. Cao, H. Hu, Y. Wei, Z. Zhang, S. Lin, and B. Guo. Swin transformer: Hierarchical vision transformer using shifted windows. In Proc-eedings of the IEEE International Conference on Co-mputer Vision (ICCV), pages 10012\u201310022, 2021. 2,4","DOI":"10.1109\/ICCV48922.2021.00986"},{"issue":"1","key":"1667_CR20","doi-asserted-by":"publisher","first-page":"57","DOI":"10.1016\/j.vrih.2022.07.006","volume":"5","author":"M Zhang","year":"2023","unstructured":"Zhang, M., Tian, X.: Transformer architecture based on mutual attention for imageanomaly dete-ction. Virtual. Reality. Intell. Hardware. 5(1), 57\u201367 (2023)","journal-title":"Virtual. Reality. Intell. Hardware."},{"issue":"1","key":"1667_CR21","first-page":"1038","volume":"28","author":"Y Chen","year":"2022","unstructured":"Chen, Y., et al.: Ilidviz: An incremental learning-based visual analysis system for network anomaly detection. IEEE. Transact. Visualization. Comput. Graphics. 28(1), 1038\u20131048 (2022)","journal-title":"IEEE. Transact. Visualization. Comput. Graphics."},{"key":"1667_CR22","unstructured":"HaodongOuyang. Deyo: Detr with yolo for end-to-end object detection. arXiv preprint arXiv:2402.16370 (2024)"},{"key":"1667_CR23","doi-asserted-by":"crossref","unstructured":"Wei Liu, Dragomir Anguelov, Dumitru Erhan, Ch-ristian Szegedy, Scott Reed, Cheng-Yang Fu, and Ale-xander C Berg. Ssd: Single shot multibox detector. In Computer Vision\u2013ECCV 2016: 14th European Conference, Amsterdam, The Netherlands, October 11\u201314, 2016, Proceedings, Part I 14, pages 21\u201337. Springer, (2016)","DOI":"10.1007\/978-3-319-46448-0_2"},{"key":"1667_CR24","doi-asserted-by":"crossref","unstructured":"Joseph Redmon and Ali Farhadi. Yolo9000: better, faster, stronger. In Proceedings of the IEEE con-ference on computer vision and pattern recognition, pages 7263\u20137271 (2017)","DOI":"10.1109\/CVPR.2017.690"},{"key":"1667_CR25","unstructured":"Joseph Redmon. Yolov3: An incremental improve-ment. arXiv preprint arXiv:1804.02767 (2018)"},{"key":"1667_CR26","unstructured":"Glenn Jocher. Yolov5. https:\/\/github.com\/ ultralytics\/-yolov5, 2020. Accessed 2024\u201309\u201306.3"},{"key":"1667_CR27","unstructured":"Chien-Yao Wang, I-Hau Yeh, and Hong-Yuan Mark Liao. You only learn one representation: Unified ne-twork for multipletasks. arXiv preprint arXiv:2105.04206 (2021)"},{"key":"1667_CR28","doi-asserted-by":"crossref","unstructured":"Chien-Yao Wang, Alexey Bochkovskiy, and Hong-Yuan Mark Liao. Yolov7: Trainable bag-of-free-bies sets new state-of-the-art for real-time object detectors. In Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition, pages 7464\u20137475 (2023)","DOI":"10.1109\/CVPR52729.2023.00721"},{"key":"1667_CR29","unstructured":"Z Ge. Yolox: Exceeding yolo series in 2021. arXiv preprint arXiv:2107.08430, (2021)"},{"key":"1667_CR30","unstructured":"Glenn Jocher.Yolov8. https:\/\/github.com\/ultralytics\/-yolov8, 2023. Accessed 2024\u201309\u201306. 3"},{"issue":"4","key":"1667_CR31","first-page":"6","volume":"3","author":"X Zhu","year":"2020","unstructured":"Zhu, X., Weijie, Su., Lewei, Lu., Li, B., Wang, X., Dai, J.: Deformable detr: Deformable transformers for end-to-end object detection. In. Int. Conf. Learning Representations 3(4), 6 (2020)","journal-title":"In. Int. Conf. Learning Representations"},{"key":"1667_CR32","unstructured":"Peize Sun, Jinkun Cao, Yi Jiang, Rufeng Zhang, Ping Luo, Jifeng Dai, and Xiaogang Li. Motr: End-to-end multiple- object tracking with transformer. In Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pages 10316\u201310325, (2021)"},{"key":"1667_CR33","doi-asserted-by":"crossref","unstructured":"Depu Meng, Xiaokang Chen, Zejia Fan Gang Zeng, Houqiang Li, Yuhui Yuan, and Lei Sun. Conditional detr for fast training convergence. In Proceedings of the IEEE\/CVF International Conference on Computer Vision, pages 3651\u2013 3660 (2021)","DOI":"10.1109\/ICCV48922.2021.00363"},{"key":"1667_CR34","doi-asserted-by":"crossref","unstructured":"Xinlong Wang Rufeng Li, Yue Zhang, Zhuang Liu, and Stephen Lin. Pnp-detr: Towards efficient visual an-alysis with transformers. arXiv preprint arXiv:2206.12071, (2022)","DOI":"10.1109\/ICCV48922.2021.00462"},{"key":"1667_CR35","doi-asserted-by":"crossref","unstructured":"Zhigang Dai, Bolun Cai, Yugeng Lin, and Junying Chen. Up-detr: Unsupervised pretraining for object detection with transformers. In Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pages 1601\u2013 1610 (2021)","DOI":"10.1109\/CVPR46437.2021.00165"},{"key":"1667_CR36","doi-asserted-by":"crossref","unstructured":"Yukun Wang, Hongwei Zhu, Hartwig Adam, Alan Yuille, and Liang-Chieh Chen. Max-deeplab: End-to-end panoptic segmentation with mask transformers. In Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pages 5463\u20135474 (2021)","DOI":"10.1109\/CVPR46437.2021.00542"},{"key":"1667_CR37","unstructured":"Bowen Cheng, Alexander Schwing, and Alexander Kirillov. Masked-attention mask transformer for universal image segmentation. In Proceedings of the IEEE\/CVF International Conference on Computer Vision, pages 1290\u20131299 (2021)"},{"key":"1667_CR38","unstructured":"Alexey Bochkovskiy, Chien-Yao Wang, and Hong- Yuan Mark Liao. Yolov4: Optimal speed and accuracy of object detection. arXiv preprint arXiv:2004.10934, (2020)"},{"key":"1667_CR39","unstructured":"Kaiming He, Haoqi Fan, Yuxin Wu, Saining Xie, and Ross Girshick. Momentum contrast for unsupervised visual representation learning. Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pages 9729\u20139738 (2020)"},{"key":"1667_CR40","unstructured":"Alexey Dosovitskiy, Lucas Beyer, Alexander Kole-snikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, et al. An image is worth 16x16 words: Transformers for image recognition at scale. International Conference on Learning Representations (2021)"},{"key":"1667_CR41","doi-asserted-by":"crossref","unstructured":"Shifeng Zhang, Cheng Chi, Yongqiang Yao, Zhen Lei, and Stan Z. Li. Bridging the gap between anchor-based and anchor-free detection via adaptive training samplese-lection. Proceedings of the IEEE\/CVF Conference on Computer Vi- sion and Pattern Recognition, pages 9759\u20139768 (2020)","DOI":"10.1109\/CVPR42600.2020.00978"},{"key":"1667_CR42","unstructured":"Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszko-reit, Llion Jones, Aidan N Gomez, \u0141ukasz Kaiser, and Illia Polosukhin. Attention is all you need. Advances in Neural Information Processing Systems, pages 5998\u20136008 (2017)"},{"key":"1667_CR43","unstructured":"Mingxing Tan and Quoc Le. Efficientnet: Rethinking model scaling for convolutional neural networks. Proceedings of the International Conference on Machine Learning, pages 6105\u20136114 (2019)"},{"key":"1667_CR44","unstructured":"Tsung-YiLin, Priya Goyal, Ross Girshick, Kaiming He, and Piotr Doll\u00e1r. Focal oss for dense object detection. Proceedings of the IEEE International Conference on Computer Vision, pages 2980\u20132988 (2017)"},{"key":"1667_CR45","doi-asserted-by":"crossref","unstructured":"Wenhai Wang, Enze Xie, Xiang Li, Deng-Ping Fan, Kaitao Song, Ding Liang, Tong Lu, Ping Luo, and Ling Shao. Pyramid vision transformer: A versatile backbone for dense prediction without convolutions. In Proceedings of the IEEE\/CVF international conf-erence on computer vision, pages 568\u2013578 (2021)","DOI":"10.1109\/ICCV48922.2021.00061"},{"key":"1667_CR46","first-page":"125","volume-title":"Mobilenetv3","author":"B Koonce","year":"2021","unstructured":"Koonce, B., Koonce, B.: Conv-olutional Neural Networks with Swift for Tensorflow: Im-age Recognition and Dataset Categorization. In: Koonce, B. (ed.) Mobilenetv3, pp. 125\u2013144. Apress, Berkeley (2021)"},{"key":"1667_CR47","doi-asserted-by":"crossref","unstructured":"Kai Han, Yunhe Wang, Qi Tian, Jianyuan Guo, Chu-njing Xu, and Chang Xu. Ghostnet: More features from cheap operations. In Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition, pages 1580\u20131589 (2020)","DOI":"10.1109\/CVPR42600.2020.00165"},{"key":"1667_CR48","unstructured":"Mingxing Tan, Ruoming Pang, and Quoc V Le. Eff-icientdet: Scalable and efficient object detection. In Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition, pages 10781\u201310790 (2020)"},{"issue":"7","key":"1667_CR49","first-page":"1122","volume":"39","author":"Q Wang","year":"2023","unstructured":"Wang, Q., Li, J.: A fast-training gan for coal\u2013gangue image augmentation based on a few samples. The. Visual. Comput. 39(7), 1122\u20131133 (2023)","journal-title":"The. Visual. Comput."},{"issue":"111343","key":"1667_CR50","first-page":"7","volume":"285","author":"S Mardieva","year":"2024","unstructured":"Mardieva, S., Ahmad, S., Umirzakova, S., Rasool, M.A., Whangbo, T.K.: Lightweight image super-resolution for IoT devices using deep residual feature distillation network. Knowl.-Based Syst. 285(111343), 7 (2024)","journal-title":"Knowl.-Based Syst."}],"container-title":["Journal of Real-Time Image Processing"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11554-025-01667-1.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s11554-025-01667-1\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11554-025-01667-1.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,5,3]],"date-time":"2025-05-03T06:23:10Z","timestamp":1746253390000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s11554-025-01667-1"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,4]]},"references-count":50,"journal-issue":{"issue":"2","published-print":{"date-parts":[[2025,4]]}},"alternative-id":["1667"],"URL":"https:\/\/doi.org\/10.1007\/s11554-025-01667-1","relation":{},"ISSN":["1861-8200","1861-8219"],"issn-type":[{"value":"1861-8200","type":"print"},{"value":"1861-8219","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025,4]]},"assertion":[{"value":"24 December 2024","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"12 March 2025","order":2,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"1 April 2025","order":3,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors declare no competing interests.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}}],"article-number":"91"}}