{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,24]],"date-time":"2026-04-24T15:08:27Z","timestamp":1777043307074,"version":"3.51.4"},"reference-count":43,"publisher":"Springer Science and Business Media LLC","issue":"3","license":[{"start":{"date-parts":[[2023,5,22]],"date-time":"2023-05-22T00:00:00Z","timestamp":1684713600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2023,5,22]],"date-time":"2023-05-22T00:00:00Z","timestamp":1684713600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["CCF Trans. Pervasive Comp. Interact."],"published-print":{"date-parts":[[2023,9]]},"DOI":"10.1007\/s42486-023-00132-x","type":"journal-article","created":{"date-parts":[[2023,5,22]],"date-time":"2023-05-22T09:02:49Z","timestamp":1684746169000},"page":"321-332","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":5,"title":["Human activity recognition based on multi-modal fusion"],"prefix":"10.1007","volume":"5","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-8593-1423","authenticated-orcid":false,"given":"Cheng","family":"Zhang","sequence":"first","affiliation":[]},{"given":"Tianqi","family":"Zu","sequence":"additional","affiliation":[]},{"given":"Yibin","family":"Hou","sequence":"additional","affiliation":[]},{"given":"Jian","family":"He","sequence":"additional","affiliation":[]},{"given":"Shengqi","family":"Yang","sequence":"additional","affiliation":[]},{"given":"Ruihai","family":"Dong","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2023,5,22]]},"reference":[{"key":"132_CR1","doi-asserted-by":"crossref","unstructured":"Abebe, G., Cavallaro, A.: Inertial-vision: cross-domain knowledge transfer for wearable sensors. In: Proceedings of the IEEE International Conference on Computer Vision Workshops, pp. 1392\u20131400 (2017)","DOI":"10.1109\/ICCVW.2017.165"},{"issue":"2","key":"132_CR2","doi-asserted-by":"publisher","first-page":"255","DOI":"10.1007\/s00138-010-0298-4","volume":"23","author":"M Ahad","year":"2012","unstructured":"Ahad, M., Rahman, A., Tan, J., Kim, H., Ishikawa, S.: Motion history image: its variants and applications. Mach. Vis. Appl. 23(2), 255\u2013281 (2012)","journal-title":"Mach. Vis. Appl."},{"issue":"1\u20132","key":"132_CR3","doi-asserted-by":"publisher","first-page":"37","DOI":"10.1177\/0020294018813692","volume":"52","author":"S Balli","year":"2019","unstructured":"Balli, S., Sa\u011fba\u015f, E.A., Peker, M.: Human activity recognition from smart watch sensor data using a hybrid of principal component analysis and random forest algorithm. Meas. Control 52(1\u20132), 37\u201345 (2019)","journal-title":"Meas. Control"},{"key":"132_CR4","doi-asserted-by":"crossref","unstructured":"Barros, P., Parisi, G.I., Jirak, D., Wermter, S.: Real-time gesture recognition using a humanoid robot with a deep neural architecture. In: 2014 IEEE-RAS International Conference on Humanoid Robots. IEEE, pp. 646\u2013651 (2014)","DOI":"10.1109\/HUMANOIDS.2014.7041431"},{"issue":"3","key":"132_CR5","doi-asserted-by":"publisher","first-page":"257","DOI":"10.1109\/34.910878","volume":"23","author":"AF Bobick","year":"2001","unstructured":"Bobick, A.F., Davis, J.W.: The recognition of human movement using temporal templates. IEEE Trans. Pattern Anal. Mach. Intell. 23(3), 257\u2013267 (2001)","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"issue":"8","key":"132_CR6","doi-asserted-by":"publisher","first-page":"2350","DOI":"10.3390\/s20082350","volume":"20","author":"RF Brena","year":"2020","unstructured":"Brena, R.F., Aguileta, A.A., Trejo, L.A., Molino-Minero-Re, E., Mayora, O.: Choosing the best sensor fusion method: a machine-learning approach. Sensors 20(8), 2350 (2020)","journal-title":"Sensors"},{"issue":"3","key":"132_CR7","doi-asserted-by":"publisher","first-page":"4405","DOI":"10.1007\/s11042-015-3177-1","volume":"76","author":"C Chen","year":"2017","unstructured":"Chen, C., Jafari, R., Kehtarnavaz, N.: A survey of depth and inertial sensor fusion for human action recognition. Multim. Tools Appl. 76(3), 4405\u20134425 (2017)","journal-title":"Multim. Tools Appl."},{"key":"132_CR8","doi-asserted-by":"crossref","unstructured":"Cippitelli, E., Gasparrini, S., Gambi, E., Spinsante, S.: A human activity recognition system using skeleton data from rgbd sensors. Comput. Intell. Neurosci. 2016 (2016)","DOI":"10.1155\/2016\/4351435"},{"key":"132_CR9","first-page":"210","volume":"8","author":"F Demrozi","year":"2010","unstructured":"Demrozi, F., Pravadelli, G., Bihorac, A., Rashidi, P.: Human activity recognition using inertial, physiological and environmental sensors: a comprehensive survey. IEEE Access 8, 210-210 836 (2010). (836)","journal-title":"IEEE Access"},{"key":"132_CR10","doi-asserted-by":"publisher","first-page":"238","DOI":"10.1016\/j.neucom.2020.03.038","volume":"400","author":"CC dos Santos","year":"2020","unstructured":"dos Santos, C.C., Samatelo, J.L.A., Vassallo, R.F.: Dynamic gesture recognition by using cnns and star rgb: a temporal information condensation. Neurocomputing 400, 238\u2013254 (2020)","journal-title":"Neurocomputing"},{"key":"132_CR11","doi-asserted-by":"publisher","DOI":"10.1016\/j.compbiomed.2019.103520","volume":"115","author":"R Espinosa","year":"2019","unstructured":"Espinosa, R., Ponce, H., Guti\u00e9rrez, S., Mart\u00ednez-Villase\u00f1or, L., Brieva, J., Moya-Albor, E.: A vision-based approach for fall detection using multiple cameras and convolutional neural networks: a case study using the up-fall detection dataset. Comput. Biol. Med. 115, 103520 (2019)","journal-title":"Comput. Biol. Med."},{"key":"132_CR12","doi-asserted-by":"crossref","unstructured":"Feichtenhofer, C., Pinz, A., Zisserman, A.: Convolutional two-stream network fusion for video action recognition. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 1933\u20131941 (2016)","DOI":"10.1109\/CVPR.2016.213"},{"key":"132_CR13","doi-asserted-by":"crossref","unstructured":"Feichtenhofer, C.: X3d: expanding architectures for efficient video recognition. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 203\u2013213 (2020)","DOI":"10.1109\/CVPR42600.2020.00028"},{"key":"132_CR14","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1016\/j.cviu.2015.02.008","volume":"134","author":"D Fortun","year":"2015","unstructured":"Fortun, D., Bouthemy, P., Kervrann, C.: Optical flow modeling and computation: A survey. Comput. Vis. Image Underst. 134, 1\u201321 (2015)","journal-title":"Comput. Vis. Image Underst."},{"key":"132_CR15","doi-asserted-by":"publisher","DOI":"10.1016\/j.eswa.2020.114226","volume":"168","author":"YM Galv\u00e3o","year":"2021","unstructured":"Galv\u00e3o, Y.M., Ferreira, J., Albuquerque, V.A., Barros, P., Fernandes, B.J.: A multimodal approach using deep learning for fall detection. Expert Syst. Appl. 168, 114226 (2021)","journal-title":"Expert Syst. Appl."},{"key":"132_CR16","doi-asserted-by":"crossref","unstructured":"Gjoreski, H., Stankoski, S., Kiprijanovska, I., Nikolovska, A., Mladenovska, N., Trajanoska, M., Velichkovska, B., Gjoreski, M., Lu\u0161trek, M., Gams, M.: Wearable sensors data-fusion and machine-learning method for fall detection and activity recognition. In: Challenges and Trends in Multimodal Fall Detection for Healthcare. Springer, pp. 81\u201396 (2020)","DOI":"10.1007\/978-3-030-38748-8_4"},{"key":"132_CR17","unstructured":"Han, J., Bhanu, B.: Human activity recognition in thermal infrared imagery. In: 2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR\u201905)-Workshops. IEEE, pp. 17 (2005)"},{"key":"132_CR18","doi-asserted-by":"crossref","unstructured":"He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 770\u2013778 (2016)","DOI":"10.1109\/CVPR.2016.90"},{"issue":"13","key":"132_CR19","doi-asserted-by":"publisher","first-page":"5110","DOI":"10.1109\/JSEN.2019.2903482","volume":"19","author":"J He","year":"2019","unstructured":"He, J., Zhang, Z., Wang, X., Yang, S.: A low power fall sensing technology based on fd-cnn. IEEE Sens. J. 19(13), 5110\u20135118 (2019)","journal-title":"IEEE Sens. J."},{"key":"132_CR20","doi-asserted-by":"publisher","first-page":"248","DOI":"10.1016\/j.neucom.2019.07.103","volume":"390","author":"J He","year":"2020","unstructured":"He, J., Zhang, C., He, X., Dong, R.: Visual recognition of traffic police gestures with convolutional pose machine and handcrafted features. Neurocomputing 390, 248\u2013259 (2020)","journal-title":"Neurocomputing"},{"issue":"1\u20133","key":"132_CR21","doi-asserted-by":"publisher","first-page":"185","DOI":"10.1016\/0004-3702(81)90024-2","volume":"17","author":"BK Horn","year":"1981","unstructured":"Horn, B.K., Schunck, B.G.: Determining optical flow. Artif. Intell. 17(1\u20133), 185\u2013203 (1981)","journal-title":"Artif. Intell."},{"key":"132_CR22","doi-asserted-by":"crossref","unstructured":"Hwang, I., Cha, G., Oh, S.: Multi-modal human action recognition using deep neural networks fusing image and inertial sensor data. In: 2017 IEEE International Conference on Multisensor Fusion and Integration for Intelligent Systems (MFI). IEEE, pp. 278\u2013283 (2017)","DOI":"10.1109\/MFI.2017.8170441"},{"key":"132_CR23","doi-asserted-by":"crossref","unstructured":"Li, Z., Wu, H.: A survey of maneuvering target tracking using Kalman filter. In: 2015 4th International Conference on Mechatronics, Materials, Chemistry and Computer Engineering. Atlantis Press, pp. 542\u2013545 (2015)","DOI":"10.2991\/icmmcce-15.2015.109"},{"key":"132_CR24","unstructured":"Liu, W., Wen, Y., Yu, Z., Yang, M.: Large-margin softmax loss for convolutional neural networks. In: International Conference on Machine Learning. PMLR, pp. 507\u2013516 (2016)"},{"issue":"23","key":"132_CR25","doi-asserted-by":"publisher","first-page":"11 403","DOI":"10.1109\/JSEN.2019.2934678","volume":"19","author":"Y Lu","year":"2019","unstructured":"Lu, Y., Velipasalar, S.: Autonomous human activity classification from wearable multi-modal sensors. IEEE Sens. J. 19(23), 11 403-11 412 (2019)","journal-title":"IEEE Sens. J."},{"key":"132_CR26","unstructured":"Lucas, B.D., Kanade, T. et\u00a0al.: An iterative image registration technique with an application to stereo vision. Vancouver 81 (1981)"},{"issue":"8","key":"132_CR27","doi-asserted-by":"publisher","first-page":"7432","DOI":"10.1109\/JIOT.2020.2984544","volume":"7","author":"F Luo","year":"2020","unstructured":"Luo, F., Poslad, S., Bodanese, E.: Temporal convolutional networks for multiperson activity recognition using a 2-d lidar. IEEE Internet Things J. 7(8), 7432\u20137442 (2020)","journal-title":"IEEE Internet Things J."},{"key":"132_CR28","doi-asserted-by":"crossref","unstructured":"Luong, M.-T., Pham, H., Manning, C.D.: Effective approaches to attention-based neural machine translation. arXiv:1508.04025 (2015)","DOI":"10.18653\/v1\/D15-1166"},{"key":"132_CR29","doi-asserted-by":"crossref","unstructured":"Mallat, R., Bonnet, V., Khalil, M., Mohammed, S.: Toward an affordable multi-modal motion capture system framework for human kinematics and kinetics assessment. In: International Symposium on Wearable Robotics. Springer, pp. 65\u201369 (2018)","DOI":"10.1007\/978-3-030-01887-0_13"},{"issue":"9","key":"132_CR30","doi-asserted-by":"publisher","first-page":"2096","DOI":"10.3390\/s17092096","volume":"17","author":"A Mao","year":"2017","unstructured":"Mao, A., Ma, X., He, Y., Luo, J.: Highly portable, sensor-based system for human fall monitoring. Sensors 17(9), 2096 (2017)","journal-title":"Sensors"},{"issue":"9","key":"132_CR31","doi-asserted-by":"publisher","first-page":"1988","DOI":"10.3390\/s19091988","volume":"19","author":"L Mart\u00ednez-Villase\u00f1or","year":"2019","unstructured":"Mart\u00ednez-Villase\u00f1or, L., Ponce, H., Brieva, J., Moya-Albor, E., N\u00fa\u00f1ez-Mart\u00ednez, J., Pe\u00f1afort-Asturiano, C.: Up-fall detection dataset: a multimodal approach. Sensors 19(9), 1988 (2019)","journal-title":"Sensors"},{"key":"132_CR32","doi-asserted-by":"publisher","DOI":"10.1016\/j.comnet.2021.108074","volume":"193","author":"A Ometov","year":"2021","unstructured":"Ometov, A., Shubina, V., Klus, L., Skibi\u0144ska, J., Saafi, S., Pascacio, P., Flueratoru, L., Gaibor, D.Q., Chukhno, N., Chukhno, O., et al.: A survey on wearable technology: history, state-of-the-art and current challenges. Comput. Netw. 193, 108074 (2021)","journal-title":"Comput. Netw."},{"key":"132_CR33","doi-asserted-by":"crossref","unstructured":"Ponce, H., Mart\u00ednez-Villase\u00f1or, L.: Approaching fall classification using the up-fall detection dataset: Analysis and results from an international competition. In: Challenges and Trends in Multimodal Fall Detection for Healthcare. Springer, pp. 121\u2013133 (2020)","DOI":"10.1007\/978-3-030-38748-8_6"},{"key":"132_CR34","unstructured":"Ravi, N., Dandekar, N., Mysore, P., Littman, M.L.: Activity recognition from accelerometer data. In: Aaai, vol.\u00a05, no. 2005. Pittsburgh, PA, pp. 1541\u20131546 (2005)"},{"issue":"4","key":"132_CR35","first-page":"114","volume":"6","author":"P Rivera","year":"2017","unstructured":"Rivera, P., Valarezo, E., Choi, M.-T., Kim, T.-S.: Recognition of human hand activities based on a single wrist imu using recurrent neural networks. Int. J. Pharma Med. Biol. Sci. 6(4), 114\u2013118 (2017)","journal-title":"Int. J. Pharma Med. Biol. Sci."},{"key":"132_CR36","doi-asserted-by":"publisher","DOI":"10.1016\/j.bspc.2020.102094","volume":"62","author":"A Salehzadeh","year":"2020","unstructured":"Salehzadeh, A., Calitz, A.P., Greyling, J.: Human activity recognition using deep electroencephalography learning. Biomed. Signal Process. Control 62, 102094 (2020)","journal-title":"Biomed. Signal Process. Control"},{"issue":"9","key":"132_CR37","doi-asserted-by":"publisher","first-page":"2892","DOI":"10.3390\/s18092892","volume":"18","author":"O Steven Eyobu","year":"2018","unstructured":"Steven Eyobu, O., Han, D.S.: Feature representation and data augmentation for human activity classification based on wearable imu sensor data using a deep lstm neural network. Sensors 18(9), 2892 (2018)","journal-title":"Sensors"},{"issue":"9","key":"132_CR38","doi-asserted-by":"publisher","first-page":"3071","DOI":"10.3390\/s21093071","volume":"21","author":"M Stoeve","year":"2021","unstructured":"Stoeve, M., Schuldhaus, D., Gamp, A., Zwick, C., Eskofier, B.M.: From the laboratory to the field: Imu-based shot and pass detection in football training and game scenarios using deep learning. Sensors 21(9), 3071 (2021)","journal-title":"Sensors"},{"key":"132_CR39","doi-asserted-by":"crossref","unstructured":"Szegedy, C., Vanhoucke, V., Ioffe, S., Shlens, J., Wojna, Z.: Rethinking the inception architecture for computer vision. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 2818\u20132826 (2016)","DOI":"10.1109\/CVPR.2016.308"},{"issue":"8","key":"132_CR40","doi-asserted-by":"publisher","first-page":"1897","DOI":"10.1007\/s11760-014-0677-9","volume":"9","author":"D-M Tsai","year":"2015","unstructured":"Tsai, D.-M., Chiu, W.-Y., Lee, M.-H.: Optical flow-motion history image (of-mhi) for action recognition. Signal Image Video Process. 9(8), 1897\u20131906 (2015)","journal-title":"Signal Image Video Process."},{"issue":"2","key":"132_CR41","doi-asserted-by":"publisher","first-page":"743","DOI":"10.1007\/s11036-019-01445-x","volume":"25","author":"S Wan","year":"2020","unstructured":"Wan, S., Qi, L., Xu, X., Tong, C., Gu, Z.: Deep learning models for real-time human activity recognition with smartphones. Mob. Netw. Appl. 25(2), 743\u2013755 (2020)","journal-title":"Mob. Netw. Appl."},{"issue":"5","key":"132_CR42","doi-asserted-by":"publisher","first-page":"155014771984935","DOI":"10.1177\/1550147719849357","volume":"15","author":"Y Zhu","year":"2019","unstructured":"Zhu, Y., Yu, J., Hu, F., Li, Z., Ling, Z.: Human activity recognition via smart-belt in wireless body area networks. Int. J. Distrib. Sens. Netw. 15(5), 1550147719849357 (2019)","journal-title":"Int. J. Distrib. Sens. Netw."},{"issue":"1","key":"132_CR43","doi-asserted-by":"publisher","first-page":"302","DOI":"10.3390\/s18010302","volume":"18","author":"T Zimmermann","year":"2018","unstructured":"Zimmermann, T., Taetz, B., Bleser, G.: Imu-to-segment assignment and orientation alignment for the lower body using deep learning. Sensors 18(1), 302 (2018)","journal-title":"Sensors"}],"container-title":["CCF Transactions on Pervasive Computing and Interaction"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s42486-023-00132-x.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s42486-023-00132-x\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s42486-023-00132-x.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,8,15]],"date-time":"2023-08-15T07:25:16Z","timestamp":1692084316000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s42486-023-00132-x"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,5,22]]},"references-count":43,"journal-issue":{"issue":"3","published-print":{"date-parts":[[2023,9]]}},"alternative-id":["132"],"URL":"https:\/\/doi.org\/10.1007\/s42486-023-00132-x","relation":{},"ISSN":["2524-521X","2524-5228"],"issn-type":[{"value":"2524-521X","type":"print"},{"value":"2524-5228","type":"electronic"}],"subject":[],"published":{"date-parts":[[2023,5,22]]},"assertion":[{"value":"7 January 2023","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"27 April 2023","order":2,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"22 May 2023","order":3,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"On behalf of all authors, the corresponding author states that there is no conflict of interest.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}}]}}