{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,8]],"date-time":"2026-04-08T15:24:30Z","timestamp":1775661870254,"version":"3.50.1"},"reference-count":37,"publisher":"Springer Science and Business Media LLC","issue":"6","license":[{"start":{"date-parts":[[2022,1,5]],"date-time":"2022-01-05T00:00:00Z","timestamp":1641340800000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"},{"start":{"date-parts":[[2022,1,5]],"date-time":"2022-01-05T00:00:00Z","timestamp":1641340800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"}],"funder":[{"DOI":"10.13039\/501100001809","name":"national natural science foundation of china","doi-asserted-by":"publisher","award":["52075027"],"award-info":[{"award-number":["52075027"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100012226","name":"fundamental research funds for the central universities","doi-asserted-by":"publisher","award":["2020XJJD03"],"award-info":[{"award-number":["2020XJJD03"]}],"id":[{"id":"10.13039\/501100012226","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Int. J. Mach. Learn. &amp; Cyber."],"published-print":{"date-parts":[[2022,6]]},"DOI":"10.1007\/s13042-021-01488-1","type":"journal-article","created":{"date-parts":[[2022,1,5]],"date-time":"2022-01-05T16:06:23Z","timestamp":1641398783000},"page":"1781-1794","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":19,"title":["Fault detection of train mechanical parts using multi-mode aggregation feature enhanced convolution neural network"],"prefix":"10.1007","volume":"13","author":[{"ORCID":"https:\/\/orcid.org\/0000-0001-8202-0571","authenticated-orcid":false,"given":"Ye","family":"Tao","sequence":"first","affiliation":[]},{"given":"Zhang","family":"Jun","sequence":"additional","affiliation":[]},{"given":"Zhang","family":"Zhi-hao","sequence":"additional","affiliation":[]},{"given":"Zhang","family":"Yi","sequence":"additional","affiliation":[]},{"given":"Zhou","family":"Fu-qiang","sequence":"additional","affiliation":[]},{"given":"Gao","family":"Xiao-zhi","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2022,1,5]]},"reference":[{"key":"1488_CR1","first-page":"26","volume":"5","author":"R Liu","year":"2005","unstructured":"Liu R (2005) Principle and application of TFDS. Chinese Railways 5:26\u201327 ((in Chinese))","journal-title":"Chinese Railways"},{"key":"1488_CR2","doi-asserted-by":"crossref","unstructured":"Kong R, Sun F, A. Yao, H. Liu, M. Lu, Y. Chen (2017) \u201cRon: Reverse connection with objectness prior networks for object detection,\u201d in Proc. Comput. Vis. Pattern Recognit. (CVPR), 5936\u20135944","DOI":"10.1109\/CVPR.2017.557"},{"key":"1488_CR3","doi-asserted-by":"crossref","unstructured":"S. Zhang, L. Wen, X. Bian, Z. Lei, S. Z. Li, (2018) Single-Shot Refinement Neural Network for Object Detection,\" Proc. IEEE Comput. Vis. Pattern Recognit. (CVPR), pp. 4203\u20134212","DOI":"10.1109\/CVPR.2018.00442"},{"key":"1488_CR4","doi-asserted-by":"crossref","unstructured":"K. He, X. Zhang, S. Ren, J. Sun (2016) Deep Residual Learning for Image Recognition, Proc. IEEE Comput. Vis. Pattern Recognit. (CVPR), pp. 770\u2013778","DOI":"10.1109\/CVPR.2016.90"},{"key":"1488_CR5","doi-asserted-by":"crossref","unstructured":"N. Ma, X. Zhang, H. Zheng, and J. Sun, (2018) ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design, Proc. Springer Euro. Conf. Comput. Vis. (ECCV), pp. 122\u2013138","DOI":"10.1007\/978-3-030-01264-9_8"},{"key":"1488_CR6","unstructured":"M. Tan and Q. V. Le (2019) EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks, Proc. Int. Conf. Machine Learning, pp. 6105\u20136114"},{"issue":"13","key":"1488_CR7","doi-asserted-by":"publisher","first-page":"7181","DOI":"10.1109\/JSEN.2020.2977366","volume":"20","author":"L Xiao","year":"2020","unstructured":"Xiao L, Wu B, Hu Y (2020) Surface defect detection using image pyramid. IEEE Sens J 20(13):7181\u20137188","journal-title":"IEEE Sens J"},{"issue":"8","key":"1488_CR8","doi-asserted-by":"publisher","first-page":"2849","DOI":"10.1109\/TIM.2018.2871353","volume":"68","author":"J Zhong","year":"2019","unstructured":"Zhong J, Liu Z, Han Z, Han Y, Zhang W (2019) A CNN-based defect inspection method for catenary split pins in high-speed railway. IEEE Trans Instrum Meas 68(8):2849\u20132860","journal-title":"IEEE Trans Instrum Meas"},{"key":"1488_CR9","doi-asserted-by":"crossref","unstructured":"K. He, G. Gkioxari, P. Dollar and R. Girshick (2017) Mask R-CNN,\" Proc. IEEE Int. Conf. Comput. Vis. (ICCV), pp. 2980\u20132988","DOI":"10.1109\/ICCV.2017.322"},{"key":"1488_CR10","unstructured":"L. Chen, G. Papandreou, F. Schroff, H. J. a. C. V. Adam, and P. Recognition, \"Rethinking Atrous Convolution for Semantic Image Segmentation\", arXiv: 1706.05587, 2017. [Online]. Available: https:\/\/arxiv.org\/abs\/1706.05587."},{"key":"1488_CR11","doi-asserted-by":"crossref","unstructured":"S. Mehta, M. Rastegari, A. Caspi, L. G. Shapiro, H. Hajishirzi (2018) ESPNet Efficient Spatial Pyramid of Dilated Convolutions for Semantic Segmentation,\" Proc. Springer Euro. Conf. Comput. Vis. (ECCV), Springer, Cham 561\u2013580","DOI":"10.1007\/978-3-030-01249-6_34"},{"key":"1488_CR12","doi-asserted-by":"crossref","unstructured":"R. Girshick, J. Donahue, T. Darrell, and J. Malik (2014) Rich Feature Hierarchies for Accurate Object Detection and Semantic Segmentation Proc. IEEE Comput Vis Pattern Recognit (CVPR), pp. 580\u2013587","DOI":"10.1109\/CVPR.2014.81"},{"key":"1488_CR13","unstructured":"S. Ren, K. He, R. Girshick, and J. Sun (2015) Faster R-CNN: towards real-time object detection with region proposal networks, Proc. Neural Inf. Process. Syst. (NIPS), pp. 91\u201399"},{"key":"1488_CR14","doi-asserted-by":"crossref","unstructured":"T. Lin, P. Dollar, R. Girshick, K. He, B. Hariharan, S. Belongie (2017) Feature Pyramid Networks for Object Detection Proc. IEEE Comput. Vis. Pattern Recognit. (CVPR), pp. 936\u2013944","DOI":"10.1109\/CVPR.2017.106"},{"key":"1488_CR15","doi-asserted-by":"crossref","unstructured":"W. Liu et al., (2016) SSD: Single shot multibox detector, Proc. Springer Euro. Conf. Comput. Vis. (ECCV), pp. 21\u201337","DOI":"10.1007\/978-3-319-46448-0_2"},{"key":"1488_CR16","doi-asserted-by":"crossref","unstructured":"J. Redmon, S. K. Divvala, R. Girshick, A. Farhadi, (2016) You Only Look Once: Unified, Real-Time Object Detection,\" Proc. IEEE Comput. Vis. Pattern Recognit. (CVPR), pp. 779\u2013788,","DOI":"10.1109\/CVPR.2016.91"},{"key":"1488_CR17","doi-asserted-by":"crossref","unstructured":"T. Lin, P. Goyal, R. Girshick, K. He, P. Dollar (2017) Focal Loss for Dense Object Detection Proc. Springer Euro. Conf. Comput. Vis. (ECCV), pp. 2999\u20133007, pp. 318\u2013327","DOI":"10.1109\/TPAMI.2018.2858826"},{"key":"1488_CR18","unstructured":"J. Redmon A. Farhadi, \"YOLOv3: An incremental improvement\", 2018, [online] Available: https:\/\/arxiv.org\/abs\/1804.02767."},{"issue":"7","key":"1488_CR19","doi-asserted-by":"publisher","first-page":"794","DOI":"10.1177\/0954409713495532","volume":"228","author":"F Zhou","year":"2014","unstructured":"Zhou F, Zou R, Qiu Y, Gao H (2014) Automated visual inspection of angle cocks during train operation. Proc Inst Mech Eng Part F-J Rail Rapid Transit 228(7):794\u2013806","journal-title":"Proc Inst Mech Eng Part F-J Rail Rapid Transit"},{"issue":"7","key":"1488_CR20","first-page":"1629","volume":"230","author":"L Liu","year":"2015","unstructured":"Liu L, Zhou F, He Y (2015) Automated status inspection of fastening bolts on freight trains using a machine vision approach. IEEE Trans Instrum Meas 230(7):1629\u20131641","journal-title":"IEEE Trans Instrum Meas"},{"key":"1488_CR21","doi-asserted-by":"crossref","unstructured":"G. Nan and J. E. Yao (2015) A real-time visual inspection method of fastening bolts in train operation,\" Proc. Aopc 2015: Image Processing and Analysis 9675","DOI":"10.1117\/12.2202348"},{"key":"1488_CR22","doi-asserted-by":"crossref","unstructured":"A. James, W. Jie, Y. Xulei, Y. Chenghao, Z. Zeng (2018) TrackNet - A Deep-learning Based Fault Detection for Railway Track Inspection, Proc. 2018 Int. Conf. Intell. Rail Transportation (ICIRT)","DOI":"10.1109\/ICIRT.2018.8641608"},{"key":"1488_CR23","doi-asserted-by":"crossref","unstructured":"R. Singh Pahwa, J. Chao, J. paul (2019) FaultNet: Faulty Rail-Valves Detection using Deep-learning and Computer Vision,\" Proc. Inst. Mech. Eng. Part F-J. Rail Rapid Transit (IEEE-ITSC), pp. 59\u2013566","DOI":"10.1109\/ITSC.2019.8917062"},{"issue":"3","key":"1488_CR24","doi-asserted-by":"publisher","first-page":"298","DOI":"10.1177\/0954409718793464","volume":"233","author":"F Zhou","year":"2018","unstructured":"Zhou F, Li J, Li X, Li Z, Cao Y (2018) Train target detection in a complex background based on convolutional neural networks. Proc Inst Mech Eng Part F-J Rail Rapid Transit 233(3):298\u2013311","journal-title":"Proc Inst Mech Eng Part F-J Rail Rapid Transit"},{"key":"1488_CR25","first-page":"5998","volume":"30","author":"A Vaswani","year":"2017","unstructured":"Vaswani A, Shazeer N, Parmar N (2017) Attention is all you need. Proc Neural Inf Process Syst (NIPS) 30:5998\u20136008","journal-title":"Proc Neural Inf Process Syst (NIPS)"},{"key":"1488_CR26","doi-asserted-by":"crossref","unstructured":"X. Wang, R. Girshick, A. Gupta, K. He (2018) Non-local Neural Networks,\" Proc. IEEE Comput. Vis. Pattern Recognit. (CVPR), pp. 7794\u20137803","DOI":"10.1109\/CVPR.2018.00813"},{"key":"1488_CR27","doi-asserted-by":"publisher","first-page":"1028","DOI":"10.1016\/j.cviu.2019.102827","volume":"189","author":"J Yi","year":"2019","unstructured":"Yi J, Wu P, Metaxas D, Understanding I (2019) ASSD: Attentive single shot multibox detector. Comput Vis Image Underst 189:1028","journal-title":"Comput Vis Image Underst"},{"key":"1488_CR28","doi-asserted-by":"crossref","unstructured":"Chang C Y, Chang S E, Hsiao P Y, et al. (2020) EPSNet: Efficient Panoptic Segmentation Network with Cross-layer Attention Fusion[C]\/\/Proceedings of the Asian Conference on Computer Vision","DOI":"10.1007\/978-3-030-69525-5_41"},{"key":"1488_CR29","doi-asserted-by":"crossref","unstructured":"J. Yu, Y. Jiang, Z. Wang, Z. Cao, T. S. Huang (2016) UnitBox: An Advanced Object Detection Network,\" Proc. acm multimedia, pp. 516\u2013520","DOI":"10.1145\/2964284.2967274"},{"key":"1488_CR30","doi-asserted-by":"crossref","unstructured":"H. Rezatofighi, N. Tsoi, J. Gwak, A. Sadeghian, I. Reid, S. Savarese (2019) Generalized Intersection Over Union: A Metric and a Loss for Bounding Box Regression,\" Proc. IEEE Comput. Vis. Pattern Recognit. (CVPR), pp. 658\u2013666","DOI":"10.1109\/CVPR.2019.00075"},{"key":"1488_CR31","first-page":"12993","volume":"34","author":"Z Zheng","year":"2020","unstructured":"Zheng Z, Wang P, Liu W, Li J, Ye R, Ren D (2020) Distance-IoU Loss: Faster and Better learning for bounding box regression,\" in national conference on artificial intelligence. Proc AAAI Conf Artif Intell 34:12993\u201313000","journal-title":"Proc AAAI Conf Artif Intell"},{"key":"1488_CR32","unstructured":"K. Simonyan and A. Zisserman (2014) Very deep convolutional networks for large-scale image recognition,\u201d arXiv: 1409.1556, [Online]. Available: https:\/\/arxiv.org\/abs\/1409.1556."},{"key":"1488_CR33","doi-asserted-by":"crossref","unstructured":"C. Szegedy et al. (2015) Going deeper with convolutions, Proc. IEEE Comput. Vis. Pattern Recognit. (CVPR), pp. 1\u20139","DOI":"10.1109\/CVPR.2015.7298594"},{"key":"1488_CR34","doi-asserted-by":"crossref","unstructured":"S. Liu, D. Huang, Y. Wang (2018) Receptive field block net for accurate and fast object detection, Proc. Springer Euro. Conf. Comput. Vis. (ECCV), Cham 404\u2013419","DOI":"10.1007\/978-3-030-01252-6_24"},{"issue":"8","key":"1488_CR35","doi-asserted-by":"publisher","first-page":"2011","DOI":"10.1109\/TPAMI.2019.2913372","volume":"42","author":"J Hu","year":"2020","unstructured":"Hu J, Shen L, Albanie S, Sun G, Wu E (2020) Squeeze-and-excitation networks. IEEE Trans Pattern Anal Mach Intell 42(8):2011\u20132023","journal-title":"IEEE Trans Pattern Anal Mach Intell"},{"key":"1488_CR36","unstructured":"Z. Li, and F. Zhou, \"FSSD: Feature Fusion Single Shot Multibox Detector,\" arXiv: 1712.00960, 2017, [Online]. Available: https:\/\/arxiv.org\/abs\/1712.00960."},{"key":"1488_CR37","doi-asserted-by":"crossref","unstructured":"Bolei Zhou, Aditya Khosla, Agata Lapedriza Aude Oliva, and Antonio Torralba (2016) \u201cLearning deep features for discriminative localization\u201d. Proc. IEEE CVPR, pp. 2921\u20132929","DOI":"10.1109\/CVPR.2016.319"}],"container-title":["International Journal of Machine Learning and Cybernetics"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s13042-021-01488-1.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s13042-021-01488-1\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s13042-021-01488-1.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,9,15]],"date-time":"2024-09-15T19:04:51Z","timestamp":1726427091000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s13042-021-01488-1"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022,1,5]]},"references-count":37,"journal-issue":{"issue":"6","published-print":{"date-parts":[[2022,6]]}},"alternative-id":["1488"],"URL":"https:\/\/doi.org\/10.1007\/s13042-021-01488-1","relation":{},"ISSN":["1868-8071","1868-808X"],"issn-type":[{"value":"1868-8071","type":"print"},{"value":"1868-808X","type":"electronic"}],"subject":[],"published":{"date-parts":[[2022,1,5]]},"assertion":[{"value":"7 July 2021","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"4 December 2021","order":2,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"5 January 2022","order":3,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors declare that they have no known competing financial interests or personal relationships that could have influenced the work reported in this paper.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}}]}}