{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,13]],"date-time":"2026-02-13T08:19:42Z","timestamp":1770970782407,"version":"3.50.1"},"reference-count":34,"publisher":"Springer Science and Business Media LLC","issue":"37","license":[{"start":{"date-parts":[[2024,4,22]],"date-time":"2024-04-22T00:00:00Z","timestamp":1713744000000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,4,22]],"date-time":"2024-04-22T00:00:00Z","timestamp":1713744000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["51804249"],"award-info":[{"award-number":["51804249"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"name":"Shaanxi Province Qin Chuang yuan \u201cScientists+Engineers\u201d Team Construction","award":["2022KXJ-38"],"award-info":[{"award-number":["2022KXJ-38"]}]},{"DOI":"10.13039\/501100017596","name":"Natural Science Basic Research Program of Shaanxi Province","doi-asserted-by":"publisher","award":["2021JQ-574"],"award-info":[{"award-number":["2021JQ-574"]}],"id":[{"id":"10.13039\/501100017596","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Multimed Tools Appl"],"DOI":"10.1007\/s11042-024-19164-1","type":"journal-article","created":{"date-parts":[[2024,4,22]],"date-time":"2024-04-22T05:01:48Z","timestamp":1713762108000},"page":"84523-84538","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":4,"title":["Dual-branch deep learning architecture enabling miner behavior recognition"],"prefix":"10.1007","volume":"83","author":[{"ORCID":"https:\/\/orcid.org\/0000-0001-9052-0015","authenticated-orcid":false,"given":"Zheng","family":"Wang","sequence":"first","affiliation":[]},{"given":"Yan","family":"Liu","sequence":"additional","affiliation":[]},{"given":"Yi","family":"Yang","sequence":"additional","affiliation":[]},{"given":"Siyuan","family":"Duan","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,4,22]]},"reference":[{"issue":"01","key":"19164_CR1","first-page":"1","volume":"44","author":"W Hu","year":"2023","unstructured":"Hu W, Zhai Y, Yun R (2023) Overview and prospect of visual detection methods for underground unsafe behaviors in China. Colliery Mech Electr Technol 44(01):1\u20137","journal-title":"Colliery Mech Electr Technol"},{"issue":"6","key":"19164_CR2","doi-asserted-by":"publisher","first-page":"973","DOI":"10.26599\/TST.2021.9010068","volume":"27","author":"N Ma","year":"2022","unstructured":"Ma N, Wu Z, Cheung Y-M, Guo Y, Gao Y, Li J-H, Jiang B-Y (2022) A survey of human action recognition and posture prediction. Tsinghua Sci Technol 27(6):973\u20131001","journal-title":"Tsinghua Sci Technol"},{"key":"19164_CR3","first-page":"84","volume":"09","author":"P Lyu","year":"2018","unstructured":"Lyu P, He M, Chen X, Bao Y (2018) Development and prospect of wisdom mine. Ind Mine Autom 09:84\u201388","journal-title":"Ind Mine Autom"},{"key":"19164_CR4","doi-asserted-by":"publisher","first-page":"846","DOI":"10.1109\/TMM.2021.3060280","volume":"24","author":"A Zhao","year":"2021","unstructured":"Zhao A, Dong J, Li J, Qi L, Zhou H (2021) Associated spatio-temporal capsule network for gait recognition. IEEE Trans Multimed 24:846\u2013860","journal-title":"IEEE Trans Multimed"},{"issue":"7","key":"19164_CR5","doi-asserted-by":"publisher","first-page":"4584","DOI":"10.1109\/TII.2020.3018487","volume":"17","author":"S Jiang","year":"2020","unstructured":"Jiang S, Qi Y, Zhang H, Bai Z, Lu X, Wang P (2020) D3d: dual 3-d convolutional network for real-time action recognition. IEEE Trans Industr Inf 17(7):4584\u20134593","journal-title":"IEEE Trans Industr Inf"},{"issue":"5","key":"19164_CR6","doi-asserted-by":"publisher","first-page":"14885","DOI":"10.1007\/s11042-020-08806-9","volume":"83","author":"MA Khan","year":"2024","unstructured":"Khan MA, Javed K, Khan SA, Saba T, Habib U, Khan JA, Abbasi AA (2024) Human action recognition using fusion of multiview and deep features: an application to video surveillance. Multimed Tools Appl 83(5):14885\u201314911","journal-title":"Multimed Tools Appl"},{"key":"19164_CR7","first-page":"20","volume":"02","author":"B Liu","year":"2023","unstructured":"Liu B, Jia H, Yang Y, Shen J, Gai M, Song T (2023) Research on miners\u2019dangerous behavior recognition based on improved OpenPose algorithm. Ideo Eng 02:20\u201323","journal-title":"Ideo Eng"},{"key":"19164_CR8","first-page":"118","volume":"10","author":"X Luo","year":"2020","unstructured":"Luo X, Yuan Y, Wang D, Zhong S, Zhang B, Li Q (2020) Research on continuous learning model of complex behavior recognition in coal mine video. Metal Mine 10:118\u2013123","journal-title":"Metal Mine"},{"issue":"03","key":"19164_CR9","first-page":"41","volume":"30","author":"T Wen","year":"2020","unstructured":"Wen T, Wang G, Kong X, Liu M, Bo J (2020) Identification of miners\u2019 unsafe behaviors based on transfer learning and residual network. China Saf Sci J 30(03):41\u201346","journal-title":"China Saf Sci J"},{"issue":"04","key":"19164_CR10","first-page":"75","volume":"46","author":"W Dang","year":"2020","unstructured":"Dang W, Zhang Z, Bai S, Gong D, Wu Z (2020) Inspection behavior recognition of underground power distribution room based on improved two-stream CNN method. Ind Mine Autom 46(04):75\u201380","journal-title":"Ind Mine Autom"},{"issue":"04","key":"19164_CR11","first-page":"62","volume":"47","author":"H Huang","year":"2021","unstructured":"Huang H, Cheng X, Yun X, Zhou Y, Sun Y (2021) DA-GCN-based coal mine personnel action recognition method. Ind Mine Autom 47(04):62\u201366","journal-title":"Ind Mine Autom"},{"key":"19164_CR12","doi-asserted-by":"publisher","first-page":"3101","DOI":"10.1109\/TMM.2022.3155927","volume":"25","author":"X Zhao","year":"2023","unstructured":"Zhao X, Wu X, Miao J, Chen W, Chen PC, Li Z (2023) Alike: accurate and lightweight keypoint detection and descriptor extraction. IEEE Trans Multimed 25:3101\u20133112","journal-title":"IEEE Trans Multimed"},{"key":"19164_CR13","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1109\/TIM.2021.3091511","volume":"70","author":"A Dairi","year":"2021","unstructured":"Dairi A, Harrou F, Khadraoui S, Sun Y (2021) Integrated multiple directed attention-based deep learning for improved air pollution forecasting. IEEE Trans Instrum Meas 70:1\u201315","journal-title":"IEEE Trans Instrum Meas"},{"issue":"26","key":"19164_CR14","doi-asserted-by":"publisher","first-page":"40761","DOI":"10.1007\/s11042-023-15168-5","volume":"82","author":"F Gu","year":"2023","unstructured":"Gu F, Lu J, Cai C (2023) A robust attention-enhanced network with transformer for visual tracking. Multimed Tools Appl 82(26):40761\u201340782","journal-title":"Multimed Tools Appl"},{"key":"19164_CR15","doi-asserted-by":"crossref","unstructured":"Graham B, El-Nouby A, Touvron H, Stock P, Joulin A, J\u00e9gou H, Douze M (2021) Levit: a vision transformer in convnet\u2019s clothing for faster inference. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp 12259\u201312269","DOI":"10.1109\/ICCV48922.2021.01204"},{"key":"19164_CR16","doi-asserted-by":"crossref","unstructured":"Yuan K, Guo S, Liu Z, Zhou A, Yu F, Wu W (2021) Incorporating convolution designs into visual transformers. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp 579\u2013588","DOI":"10.1109\/ICCV48922.2021.00062"},{"key":"19164_CR17","doi-asserted-by":"crossref","unstructured":"Girshick R (2015) Fast r-cnn. Proceedings of the IEEE international conference on computer vision, pp 1440\u20131448\u00a0","DOI":"10.1109\/ICCV.2015.169"},{"key":"19164_CR18","doi-asserted-by":"crossref","unstructured":"He K, Gkioxari G, Doll\u00e1r P, Girshick R (2017) Mask r-cnn. In: Proceedings of the IEEE International Conference on Computer Vision, pp 2961\u20132969","DOI":"10.1109\/ICCV.2017.322"},{"issue":"8","key":"19164_CR19","doi-asserted-by":"publisher","first-page":"3703","DOI":"10.1109\/TIP.2019.2901707","volume":"28","author":"M Lu","year":"2019","unstructured":"Lu M, Li. N Z, Wang Y, Pan G (2019) Deep attention network for egocentric action recognition. IEEE Trans Image Process 28(8):3703\u20133713","journal-title":"IEEE Trans Image Process"},{"key":"19164_CR20","first-page":"1","volume":"71","author":"X Wang","year":"2021","unstructured":"Wang X, Zhang L, Huang W, Wang S, Wu H, He J, Song A (2021) Deep convolutional networks with tunable speed\u2013accuracy tradeoff for human activity recognition using wearables. IEEE Trans Instrum Meas 71:1\u201312","journal-title":"IEEE Trans Instrum Meas"},{"key":"19164_CR21","first-page":"1","volume":"70","author":"W Gao","year":"2021","unstructured":"Gao W, Zhang L, Huang W, Min F, He J, Song A (2021) Deep neural networks for sensor-based human activity recognition using selective kernel convolution. IEEE Trans Instrum Meas 70:1\u201313","journal-title":"IEEE Trans Instrum Meas"},{"issue":"7","key":"19164_CR22","doi-asserted-by":"publisher","first-page":"3992","DOI":"10.1109\/TIM.2019.2945467","volume":"69","author":"Z Chen","year":"2019","unstructured":"Chen Z, Jiang C, Xiang S, Ding J, Wu M, Li X (2019) Smartphone sensor-based human activity recognition using feature fusion and maximum full posteriori. IEEE Trans Instrum Meas 69(7):3992\u20134001","journal-title":"IEEE Trans Instrum Meas"},{"issue":"1","key":"19164_CR23","doi-asserted-by":"publisher","first-page":"113","DOI":"10.1109\/TIP.2018.2865280","volume":"28","author":"Y Zhu","year":"2018","unstructured":"Zhu Y, Zhao C, Guo H, Wang J, Zhao X, Lu H (2018) Attention CoupleNet: fully convolutional attention coupling network for object detection. IEEE Trans Image Process 28(1):113\u2013126","journal-title":"IEEE Trans Image Process"},{"key":"19164_CR24","doi-asserted-by":"publisher","first-page":"5595","DOI":"10.1007\/s11042-019-08422-2","volume":"79","author":"H Ling","year":"2020","unstructured":"Ling H, Wu J, Huang J, Chen J, Li P (2020) Attention-based convolutional neural network for deep face recognition. Multimed Tools Appl 79:5595\u20135616","journal-title":"Multimed Tools Appl"},{"key":"19164_CR25","doi-asserted-by":"publisher","first-page":"2608","DOI":"10.1109\/TMM.2023.3301225","volume":"26","author":"J Shi","year":"2024","unstructured":"Shi J, Wang Y, Yu Z, Li G, Hong X, Wang F, Gong Y (2024) Exploiting multi-scale parallel self-attention and local variation via dual-branch Transformer-CNN structure for face super-resolution. IEEE Trans Multimed 26:2608\u20132620","journal-title":"IEEE Trans Multimed"},{"key":"19164_CR26","doi-asserted-by":"publisher","first-page":"2621","DOI":"10.1109\/TMM.2023.3301238","volume":"26","author":"F Zhang","year":"2024","unstructured":"Zhang F, Liu N, Duan F (2024) Coarse-to-fine depth super-resolution with adaptive RGB-D feature attention. IEEE Trans Multimed 26:2621\u20132633","journal-title":"IEEE Trans Multimed"},{"key":"19164_CR27","doi-asserted-by":"crossref","unstructured":"Ramesh M, Mahesh K (2019) Sports video classification with deep convolution neural network: a test on UCF101 dataset. Int J Eng Adv Technol 8(4S2):2249\u20138958","DOI":"10.35940\/ijeat.D1007.0484S219"},{"key":"19164_CR28","doi-asserted-by":"crossref","unstructured":"Kuehne H, Jhuang H, Garrote E, Poggio T, Serre T (2011) HMDB: a large video database for human motion recognition. In: 2011 International Conference on Computer Vision, pp 2556\u20132563","DOI":"10.1109\/ICCV.2011.6126543"},{"key":"19164_CR29","doi-asserted-by":"publisher","DOI":"10.1016\/j.imavis.2022.104378","volume":"119","author":"Y Zhou","year":"2022","unstructured":"Zhou Y, Song Y, Chen L, Chen Y, Ben X, Cao Y (2022) A novel micro-expression detection algorithm based on BERT and 3DCNN. Image Vis Comput 119:104378","journal-title":"Image Vis Comput"},{"key":"19164_CR30","doi-asserted-by":"publisher","first-page":"605","DOI":"10.1016\/j.jmsy.2020.04.007","volume":"56","author":"Q Xiong","year":"2020","unstructured":"Xiong Q, Zhang J, Wang P, Liu D, Gao R-X (2020) Transferable two-stream convolutional neural network for human action recognition. J Manuf Syst 56:605\u2013614","journal-title":"J Manuf Syst"},{"issue":"6","key":"19164_CR31","doi-asserted-by":"publisher","first-page":"7047","DOI":"10.1007\/s12652-021-03558-2","volume":"14","author":"T Kujani","year":"2023","unstructured":"Kujani T, Kumar VD (2023) Head movements for behavior recognition from real time video based on deep learning ConvNet transfer learning. J Ambient Intell Humaniz Comput 14(6):7047\u20137061","journal-title":"J Ambient Intell Humaniz Comput"},{"key":"19164_CR32","doi-asserted-by":"crossref","unstructured":"Arnab A, Dehghani M, Heigold G, Sun C, Lu\u010di\u0107 M, Schmid C (2021) Vivit: a video vision transformer. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp 6836\u20136846","DOI":"10.1109\/ICCV48922.2021.00676"},{"key":"19164_CR33","doi-asserted-by":"crossref","unstructured":"Duan H, Zhao Y, Xiong Y, Liu W, Lin D (2020) Omni-sourced webly-supervised learning for video recognition. In: European Conference on Computer Vision, pp 670\u2013688","DOI":"10.1007\/978-3-030-58555-6_40"},{"key":"19164_CR34","doi-asserted-by":"crossref","unstructured":"Selvaraju R-R, Cogswell M, Das A, Vedantam R, Parikh D, Batra D (2017) Grad-cam: visual explanations from deep networks via gradient-based localization. In: Proceedings of the IEEE International Conference on Computer Vision, pp 618\u2013626","DOI":"10.1109\/ICCV.2017.74"}],"container-title":["Multimedia Tools and Applications"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11042-024-19164-1.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s11042-024-19164-1\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11042-024-19164-1.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,11,15]],"date-time":"2024-11-15T08:08:49Z","timestamp":1731658129000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s11042-024-19164-1"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,4,22]]},"references-count":34,"journal-issue":{"issue":"37","published-online":{"date-parts":[[2024,11]]}},"alternative-id":["19164"],"URL":"https:\/\/doi.org\/10.1007\/s11042-024-19164-1","relation":{},"ISSN":["1573-7721"],"issn-type":[{"value":"1573-7721","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,4,22]]},"assertion":[{"value":"10 September 2023","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"2 January 2024","order":2,"name":"revised","label":"Revised","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"2 April 2024","order":3,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"22 April 2024","order":4,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors declare that they have no confict of interest.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Confict of interest"}}]}}