{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,24]],"date-time":"2026-02-24T06:47:20Z","timestamp":1771915640696,"version":"3.50.1"},"publisher-location":"Singapore","reference-count":25,"publisher":"Springer Nature Singapore","isbn-type":[{"value":"9789819500055","type":"print"},{"value":"9789819500062","type":"electronic"}],"license":[{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025]]},"DOI":"10.1007\/978-981-95-0006-2_17","type":"book-chapter","created":{"date-parts":[[2025,7,24]],"date-time":"2025-07-24T07:33:31Z","timestamp":1753342411000},"page":"198-209","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["Non-invasive Emotion Perception from Gait by Sparse and Spatial-Temporal Excitation Based Graph Convolutional Network"],"prefix":"10.1007","author":[{"given":"Liangyu","family":"Lu","sequence":"first","affiliation":[]},{"given":"Chengju","family":"Zhou","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2025,7,25]]},"reference":[{"key":"17_CR1","doi-asserted-by":"crossref","unstructured":"Yan, S., Xiong, Y., Lin, D.: Spatial Temporal Graph Convolutional Networks for Skeleton-Based Action Recognition (2018)","DOI":"10.1609\/aaai.v32i1.12328"},{"key":"17_CR2","doi-asserted-by":"publisher","first-page":"437","DOI":"10.1016\/j.neucom.2022.09.061","volume":"511","author":"S Chai","year":"2022","unstructured":"Chai, S., et al.: A multi-head pseudo nodes based spatial\u2013temporal graph convolutional network for emotion perception from GAIT. Neurocomputing 511, 437\u2013447 (2022)","journal-title":"Neurocomputing"},{"key":"17_CR3","doi-asserted-by":"publisher","first-page":"8561","DOI":"10.1609\/aaai.v33i01.33018561","volume":"33","author":"B Li","year":"2019","unstructured":"Li, B., Li, X., Zhang, Z., Wu, F.: Spatio-temporal graph routing for skeleton-based action recognition. AAAI 33, 8561\u20138568 (2019)","journal-title":"AAAI"},{"key":"17_CR4","doi-asserted-by":"crossref","unstructured":"Li, M., Chen, S., Chen, X., Zhang, Y., Wang, Y., Tian, Q.: Actional-structural graph convolutional networks for skeleton-based action recognition. In: 2019 IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), Long Beach, CA, USA. IEEE (2019)","DOI":"10.1109\/CVPR.2019.00371"},{"key":"17_CR5","doi-asserted-by":"crossref","unstructured":"Shi, L., Zhang, Y., Cheng, J., Lu, H.: Two-stream adaptive graph convolutional networks for skeleton-based action recognition. In: 2019 IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), Long Beach, CA, USA. pp. 12018\u201312027. IEEE (2019)","DOI":"10.1109\/CVPR.2019.01230"},{"key":"17_CR6","doi-asserted-by":"crossref","unstructured":"Chen, Y., Zhang, Z., Yuan, C., Li, B., Deng, Y., Hu, W.: Channel-wise Topology Refinement Graph Convolution for Skeleton-Based Action Recognition (2021)","DOI":"10.1109\/ICCV48922.2021.01311"},{"key":"17_CR7","doi-asserted-by":"crossref","unstructured":"Zhuang, Y., Lin, L., Tong, R., Liu, J., Iwamot, Y., Chen, Y.-W.: G-GCSN: Global Graph Convolution Shrinkage Network for Emotion Perception from Gait (2020)","DOI":"10.1007\/978-3-030-69756-3_4"},{"key":"17_CR8","doi-asserted-by":"publisher","first-page":"31581","DOI":"10.1007\/s11042-019-07959-6","volume":"78","author":"G Yolcu","year":"2019","unstructured":"Yolcu, G., et al.: Facial expression recognition for monitoring neurological disorders based on convolutional neural network. Multimed Tools Appl. 78, 31581\u201331603 (2019)","journal-title":"Multimed Tools Appl."},{"key":"17_CR9","doi-asserted-by":"publisher","unstructured":"Gajjala, V.R., Reddy, S.P.T., Mukherjee, S., Dubey, S.R.: MERANet: facial micro-expression recognition using 3D residual attention network. Presented at the December 19 (2021). https:\/\/doi.org\/10.1145\/3490035.3490260","DOI":"10.1145\/3490035.3490260"},{"key":"17_CR10","doi-asserted-by":"publisher","first-page":"2583","DOI":"10.1109\/TPAMI.2018.2791608","volume":"40","author":"W Li","year":"2018","unstructured":"Li, W., Abtahi, F., Zhu, Z., Yin, L.: EAC-Net: deep nets with enhancing and cropping for facial action unit detection. IEEE Trans. Pattern Anal. Mach. Intell. 40, 2583\u20132596 (2018)","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"17_CR11","doi-asserted-by":"crossref","unstructured":"Zhao, K., Chu, W.-S., Zhang, H.: Deep region and multi-label learning for facial action unit detection. In: 2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR),Las Vegas, NV, USA, pp. 3391\u20133399. IEEE (2016)","DOI":"10.1109\/CVPR.2016.369"},{"key":"17_CR12","doi-asserted-by":"crossref","unstructured":"Schnell, B., Garner, P.N.: Improving emotional TTS with an emotion intensity input from unsupervised extraction. In: 11th ISCA Speech Synthesis Workshop (SSW 11), pp. 60\u201365. ISCA (2021)","DOI":"10.21437\/SSW.2021-11"},{"key":"17_CR13","doi-asserted-by":"crossref","unstructured":"Zhu, X., Yang, S., Yang, G., Xie, L.: Controlling emotion strength with relative attribute for end-to-end speech synthesis. In: 2019 IEEE Automatic Speech Recognition and Understanding Workshop (ASRU), SG, Singapore, pp. 192\u2013199. IEEE (2019)","DOI":"10.1109\/ASRU46091.2019.9003829"},{"key":"17_CR14","doi-asserted-by":"crossref","unstructured":"Randhavane, T., et al.: Learning gait emotions using affective and deep features. In: Proceedings of the 15th ACM SIGGRAPH Conference on Motion, Interaction and Games, pp. 1\u201310 (2022)","DOI":"10.1145\/3561975.3562957"},{"key":"17_CR15","unstructured":"Long, N.H.B.: STEP CATFormer: Spatial-Temporal Effective Body-Part Cross Attention Transformer for Skeleton-based Action Recognition (2023)"},{"key":"17_CR16","doi-asserted-by":"crossref","unstructured":"Wang, S., Pan, J., Huang, B., Liu, P., Li, Z., Zhou, C.: ICE-GCN: an interactional channel excitation-enhanced graph convolutional network for skeleton-based action recognition. Mach. Vis. Appl. 34 (2023)","DOI":"10.1007\/s00138-023-01386-2"},{"key":"17_CR17","doi-asserted-by":"publisher","first-page":"1342","DOI":"10.1609\/aaai.v34i02.5490","volume":"34","author":"U Bhattacharya","year":"2020","unstructured":"Bhattacharya, U., Mittal, T., Chandra, R., Randhavane, T., Bera, A., Manocha, D.: STEP: spatial temporal graph convolutional networks for emotion perception from gaits. AAAI 34, 1342\u20131350 (2020)","journal-title":"AAAI"},{"key":"17_CR18","doi-asserted-by":"crossref","unstructured":"Bhattacharya, U., et al.: Take an emotion walk: perceiving emotions from gaits using hierarchical attention pooling and affective mapping. Presented at the (2020)","DOI":"10.1007\/978-3-030-58607-2_9"},{"key":"17_CR19","doi-asserted-by":"crossref","unstructured":"Hu, C., Sheng, W., Dong, B., Li, X.: TNTC: two-stream network with transformer-based complementarity for gait-based emotion recognition. In: ICASSP 2022 - 2022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), Singapore, Singapore, pp. 3229\u20133233. IEEE (2022)","DOI":"10.1109\/ICASSP43922.2022.9746047"},{"key":"17_CR20","doi-asserted-by":"crossref","unstructured":"Narayanan, V., Manoghar, B.M., Dorbala, V.S., Manocha, D., Bera, A.: ProxEmo: Gait-based Emotion Learning and Multi-view Proxemic Fusion for Socially-Aware Robot Navigation (2020). http:\/\/arxiv.org\/abs\/2003.01062","DOI":"10.1109\/IROS45743.2020.9340710"},{"key":"17_CR21","doi-asserted-by":"crossref","unstructured":"Chen, C., Sun, X.: STA-GCN: spatial temporal adaptive graph convolutional network for gait emotion recognition. In: 2023 IEEE International Conference on Multimedia and Expo (ICME), Brisbane, Australia, pp. 1385\u20131390. IEEE (2023)","DOI":"10.1109\/ICME55011.2023.00240"},{"key":"17_CR22","doi-asserted-by":"publisher","first-page":"101150","DOI":"10.1016\/j.cogsys.2023.101150","volume":"82","author":"X Chen","year":"2023","unstructured":"Chen, X., Liu, Z., Xiao, J., Liu, T., Zhao, Y.: DDG: dependency-difference gait based on emotional information attention for perceiving emotions from gait. Cogn. Syst. Res. 82, 101150 (2023)","journal-title":"Cogn. Syst. Res."},{"key":"17_CR23","doi-asserted-by":"crossref","unstructured":"Hou, Q., Zhang, L., Cheng, M.-M., Feng, J.: Strip pooling: rethinking spatial pooling for scene parsing. In: 2020 IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), Seattle, WA, USA. IEEE (2020)","DOI":"10.1109\/CVPR42600.2020.00406"},{"key":"17_CR24","doi-asserted-by":"crossref","unstructured":"Lima, M.L., De Lima Costa, W., Mart\u00ednez, E.T., Teichrieb, V.: ST-Gait++: leveraging spatio-temporal convolutions for gait-based emotion recognition on videos. In: 2024 IEEE\/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW), Seattle, WA, USA. pp. 302\u2013310. IEEE (2024)","DOI":"10.1109\/CVPRW63382.2024.00035"},{"key":"17_CR25","unstructured":"Lu, H., Xu, S., Zhao, S., Hu, X., Ma, R., Hu, B.: EPIC: emotion perception by spatio-temporal interaction context of gait. IEEE J. Biomed. Health Inform. 1\u201310 (2023)"}],"container-title":["Lecture Notes in Computer Science","Advanced Intelligent Computing Technology and Applications"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-981-95-0006-2_17","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,2,24]],"date-time":"2026-02-24T02:59:06Z","timestamp":1771901946000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-981-95-0006-2_17"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025]]},"ISBN":["9789819500055","9789819500062"],"references-count":25,"URL":"https:\/\/doi.org\/10.1007\/978-981-95-0006-2_17","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025]]},"assertion":[{"value":"25 July 2025","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ICIC","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Conference on Intelligent Computing","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Ningbo","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"China","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2025","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"26 July 2025","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"29 July 2025","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"21","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"icic2025","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"http:\/\/www.ic-icc.cn\/icg\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}