{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,3,27]],"date-time":"2025-03-27T13:00:38Z","timestamp":1743080438819,"version":"3.40.3"},"publisher-location":"Singapore","reference-count":35,"publisher":"Springer Nature Singapore","isbn-type":[{"type":"print","value":"9789819787913"},{"type":"electronic","value":"9789819787920"}],"license":[{"start":{"date-parts":[[2024,11,9]],"date-time":"2024-11-09T00:00:00Z","timestamp":1731110400000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,11,9]],"date-time":"2024-11-09T00:00:00Z","timestamp":1731110400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025]]},"DOI":"10.1007\/978-981-97-8792-0_17","type":"book-chapter","created":{"date-parts":[[2024,11,8]],"date-time":"2024-11-08T06:57:30Z","timestamp":1731049050000},"page":"239-253","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["Behavior Capture Based Explainable Engagement Recognition"],"prefix":"10.1007","author":[{"given":"Yijun","family":"Bei","sequence":"first","affiliation":[]},{"given":"Songyuan","family":"Guo","sequence":"additional","affiliation":[]},{"given":"Kewei","family":"Gao","sequence":"additional","affiliation":[]},{"given":"Zunlei","family":"Feng","sequence":"additional","affiliation":[]},{"given":"Yining","family":"Tong","sequence":"additional","affiliation":[]},{"given":"Weimin","family":"Cai","sequence":"additional","affiliation":[]},{"given":"Lechao","family":"Cheng","sequence":"additional","affiliation":[]},{"given":"Liang","family":"Xue","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,11,9]]},"reference":[{"key":"17_CR1","doi-asserted-by":"crossref","unstructured":"Abedi, A., Khan, S.S.: Improving state-of-the-art in detecting student engagement with Resnet and TCN hybrid network. In: 2021 18th Conference on Robots and Vision (CRV), pp. 151\u2013157. IEEE (2021)","DOI":"10.1109\/CRV52889.2021.00028"},{"key":"17_CR2","doi-asserted-by":"crossref","unstructured":"Baltru\u0161aitis, T., Robinson, P., Morency, L.P.: Openface: an open source facial behavior analysis toolkit. In: 2016 IEEE Winter Conference on Applications of Computer Vision (WACV), pp. 1\u201310. IEEE (2016)","DOI":"10.1109\/WACV.2016.7477553"},{"key":"17_CR3","unstructured":"Bertasius, G., Wang, H., Torresani, L.: Is space-time attention all you need for video understanding? In: ICML, vol.\u00a02, p.\u00a04 (2021)"},{"key":"17_CR4","doi-asserted-by":"crossref","unstructured":"Binh, H.T., Trung, N.Q., Nguyen, H.A.T., Duy, B.T.: Detecting student engagement in classrooms for intelligent tutoring systems. In: 2019 23rd International Computer Science and Engineering Conference (ICSEC), pp. 145\u2013149. IEEE (2019)","DOI":"10.1109\/ICSEC47112.2019.8974739"},{"key":"17_CR5","doi-asserted-by":"crossref","unstructured":"Cao, Z., Simon, T., Wei, S.E., Sheikh, Y.: Realtime multi-person 2D pose estimation using part affinity fields. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 7291\u20137299 (2017)","DOI":"10.1109\/CVPR.2017.143"},{"key":"17_CR6","doi-asserted-by":"crossref","unstructured":"Carion, N., Massa, F., Synnaeve, G., Usunier, N., Kirillov, A., Zagoruyko, S.: End-to-end object detection with transformers. In: European Conference on Computer Vision, pp. 213\u2013229. Springer, Berlin (2020)","DOI":"10.1007\/978-3-030-58452-8_13"},{"key":"17_CR7","doi-asserted-by":"crossref","unstructured":"Chang, C., Zhang, C., Chen, L., Liu, Y.: An ensemble model using face and body tracking for engagement detection. In: Proceedings of the 20th ACM International Conference on Multimodal Interaction, pp. 616\u2013622 (2018)","DOI":"10.1145\/3242969.3264986"},{"key":"17_CR8","doi-asserted-by":"crossref","unstructured":"Chefer, H., Gur, S., Wolf, L.: Transformer interpretability beyond attention visualization. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 782\u2013791 (2021)","DOI":"10.1109\/CVPR46437.2021.00084"},{"key":"17_CR9","doi-asserted-by":"crossref","unstructured":"Dhall, A.: EmotiW 2019: automatic emotion, engagement and cohesion prediction tasks. In: 2019 International Conference on Multimodal Interaction, pp. 546\u2013550 (2019)","DOI":"10.1145\/3340555.3355710"},{"key":"17_CR10","doi-asserted-by":"crossref","unstructured":"Erhan, D., Szegedy, C., Toshev, A., Anguelov, D.: Scalable object detection using deep neural networks. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 2147\u20132154 (2014)","DOI":"10.1109\/CVPR.2014.276"},{"key":"17_CR11","unstructured":"Gong, Y., Jia, Y., Leung, T., Toshev, A., Ioffe, S.: Deep convolutional ranking for multilabel image annotation (2013). arXiv:1312.4894"},{"key":"17_CR12","doi-asserted-by":"crossref","unstructured":"Guo, D., Li, K., Hu, B., Zhang, Y., Wang, M.: Benchmarking micro-action recognition: dataset, method, and application. IEEE Trans. Circuits Syst. Video Technol. (2024)","DOI":"10.1109\/TCSVT.2024.3358415"},{"key":"17_CR13","unstructured":"Gupta, A., D\u2019Cunha, A., Awasthi, K., Balasubramanian, V.: Daisee: towards user engagement recognition in the wild (2016). arXiv:1609.01885"},{"key":"17_CR14","doi-asserted-by":"crossref","unstructured":"Huang, T., Mei, Y., Zhang, H., Liu, S., Yang, H.: Fine-grained engagement recognition in online learning environment. In: 2019 IEEE 9th International Conference on Electronics Information and Emergency Communication (ICEIEC), pp. 338\u2013341. IEEE (2019)","DOI":"10.1109\/ICEIEC.2019.8784559"},{"key":"17_CR15","first-page":"13352","volume":"34","author":"S Hwang","year":"2021","unstructured":"Hwang, S., Heo, M., Oh, S.W., Kim, S.J.: Video instance segmentation using inter-frame communication transformers. Adv. Neural. Inf. Process. Syst. 34, 13352\u201313363 (2021)","journal-title":"Adv. Neural. Inf. Process. Syst."},{"issue":"1\u20132","key":"17_CR16","doi-asserted-by":"publisher","first-page":"83","DOI":"10.1002\/nav.3800020109","volume":"2","author":"HW Kuhn","year":"1955","unstructured":"Kuhn, H.W.: The Hungarian method for the assignment problem. Nav. Res. Logist. Q. 2(1\u20132), 83\u201397 (1955)","journal-title":"Nav. Res. Logist. Q."},{"issue":"10","key":"17_CR17","doi-asserted-by":"publisher","first-page":"6609","DOI":"10.1007\/s10489-020-02139-8","volume":"51","author":"J Liao","year":"2021","unstructured":"Liao, J., Liang, Y., Pan, J.: Deep facial spatiotemporal network for engagement prediction in online learning. Appl. Intell. 51(10), 6609\u20136621 (2021)","journal-title":"Appl. Intell."},{"key":"17_CR18","doi-asserted-by":"crossref","unstructured":"Liu, W., Anguelov, D., Erhan, D., Szegedy, C., Reed, S., Fu, C.Y., Berg, A.C.: SSD: single shot multibox detector. In: Computer Vision\u2013ECCV 2016: 14th European Conference, Amsterdam, The Netherlands, October 11\u201314, 2016, Proceedings, Part I 14, pp. 21\u201337. Springer, Berlin (2016)","DOI":"10.1007\/978-3-319-46448-0_2"},{"issue":"12","key":"17_CR19","doi-asserted-by":"publisher","first-page":"13803","DOI":"10.1007\/s10489-022-03200-4","volume":"52","author":"NK Mehta","year":"2022","unstructured":"Mehta, N.K., Prasad, S.S., Saurav, S., Saini, R., Singh, S.: Three-dimensional denseNet self-attention neural network for automatic detection of student\u2019s engagement. Appl. Intell. 52(12), 13803\u201313823 (2022)","journal-title":"Appl. Intell."},{"key":"17_CR20","doi-asserted-by":"crossref","unstructured":"Neimark, D., Bar, O., Zohar, M., Asselmann, D.: Video transformer network. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 3163\u20133172 (2021)","DOI":"10.1109\/ICCVW54120.2021.00355"},{"key":"17_CR21","doi-asserted-by":"crossref","unstructured":"Parmar, P., Tran\u00a0Morris, B.: Learning to score Olympic events. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition Workshops, pp. 20\u201328 (2017)","DOI":"10.1109\/CVPRW.2017.16"},{"key":"17_CR22","doi-asserted-by":"crossref","unstructured":"Saleh, K., Yu, K., Chen, F.: Video-based student engagement estimation via time convolution neural networks for remote learning. In: Australasian Joint Conference on Artificial Intelligence, pp. 658\u2013667. Springer, Berlin (2022)","DOI":"10.1007\/978-3-030-97546-3_53"},{"key":"17_CR23","doi-asserted-by":"crossref","unstructured":"Sharma, P., Joshi, S., Gautam, S., Maharjan, S., Khanal, S.R., Reis, M.C., Barroso, J., de\u00a0Jesus\u00a0Filipe, V.M.: Student engagement detection using emotion analysis, eye tracking and head movement with machine learning. In: International Conference on Technology and Innovation in Learning, Teaching and Education, pp. 52\u201368. Springer, Berlin (2022)","DOI":"10.1007\/978-3-031-22918-3_5"},{"key":"17_CR24","doi-asserted-by":"crossref","unstructured":"Stewart, R., Andriluka, M., Ng, A.Y.: End-to-end people detection in crowded scenes. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 2325\u20132333 (2016)","DOI":"10.1109\/CVPR.2016.255"},{"issue":"10","key":"17_CR25","doi-asserted-by":"publisher","first-page":"952","DOI":"10.1089\/tmj.2018.0205","volume":"25","author":"D Su","year":"2019","unstructured":"Su, D., Michaud, T.L., Estabrooks, P., Schwab, R.J., Eiland, L.A., Hansen, G., DeVany, M., Zhang, D., Li, Y., Pag\u00e1n, J.A., et al.: Diabetes management through remote patient monitoring: the importance of patient activation and engagement with the technology. Telemed. E-Health 25(10), 952\u2013959 (2019)","journal-title":"Telemed. E-Health"},{"key":"17_CR26","doi-asserted-by":"crossref","unstructured":"Tian, X., Nunes, B.P., Liu, Y., Manrique, R.: Predicting student engagement using sequential ensemble model. IEEE Trans. Learn. Technol. (2023)","DOI":"10.1109\/TLT.2023.3342860"},{"key":"17_CR27","unstructured":"Vinyals, O., Bengio, S., Kudlur, M.: Order matters: Sequence to sequence for sets (2015). arXiv:1511.06391"},{"key":"17_CR28","doi-asserted-by":"crossref","unstructured":"Wang, J., Yang, Y., Mao, J., Huang, Z., Huang, C., Xu, W.: CNN-RNN: a unified framework for multi-label image classification. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 2285\u20132294 (2016)","DOI":"10.1109\/CVPR.2016.251"},{"key":"17_CR29","unstructured":"Wei, Y., Xia, W., Huang, J., Ni, B., Dong, J., Zhao, Y., Yan, S.: CNN: single-label to multi-label (2014). arXiv:1406.5726"},{"key":"17_CR30","doi-asserted-by":"crossref","unstructured":"Wu, J., Yang, B., Wang, Y., Hattori, G.: Advanced multi-instance learning method with multi-features engineering and conservative optimization for engagement intensity prediction. In: Proceedings of the 2020 International Conference on Multimodal Interaction, pp. 777\u2013783 (2020)","DOI":"10.1145\/3382507.3417959"},{"key":"17_CR31","doi-asserted-by":"crossref","unstructured":"Yang, J., Wang, K., Peng, X., Qiao, Y.: Deep recurrent multi-instance learning with Spatio-temporal features for engagement intensity prediction. In: Proceedings of the 20th ACM International Conference on Multimodal Interaction, pp. 594\u2013598 (2018)","DOI":"10.1145\/3242969.3264981"},{"key":"17_CR32","first-page":"11384","volume":"34","author":"X Zha","year":"2021","unstructured":"Zha, X., Zhu, W., Xun, L., Yang, S., Liu, J.: Shifted chunk transformer for Spatio-temporal representational learning. Adv. Neural. Inf. Process. Syst. 34, 11384\u201311396 (2021)","journal-title":"Adv. Neural. Inf. Process. Syst."},{"key":"17_CR33","doi-asserted-by":"crossref","unstructured":"Zhang, H., Cheng, L., Hao, Y., Ngo, C.w.: Long-term leap attention, short-term periodic shift for video classification. In: Proceedings of the 30th ACM International Conference on Multimedia, pp. 5773\u20135782 (2022)","DOI":"10.1145\/3503161.3547908"},{"key":"17_CR34","doi-asserted-by":"crossref","unstructured":"Zhang, H., Xiao, X., Huang, T., Liu, S., Xia, Y., Li, J.: An novel end-to-end network for automatic student engagement recognition. In: 2019 IEEE 9th International Conference on Electronics Information and Emergency Communication (ICEIEC), pp. 342\u2013345. IEEE (2019)","DOI":"10.1109\/ICEIEC.2019.8784507"},{"key":"17_CR35","doi-asserted-by":"crossref","unstructured":"Zhu, B., Lan, X., Guo, X., Barner, K.E., Boncelet, C.: Multi-rate attention based GRU model for engagement prediction. In: Proceedings of the 2020 International Conference on Multimodal Interaction, pp. 841\u2013848 (2020)","DOI":"10.1145\/3382507.3417965"}],"container-title":["Lecture Notes in Computer Science","Pattern Recognition and Computer Vision"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-981-97-8792-0_17","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,11,8]],"date-time":"2024-11-08T07:08:42Z","timestamp":1731049722000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-981-97-8792-0_17"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,11,9]]},"ISBN":["9789819787913","9789819787920"],"references-count":35,"URL":"https:\/\/doi.org\/10.1007\/978-981-97-8792-0_17","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"type":"print","value":"0302-9743"},{"type":"electronic","value":"1611-3349"}],"subject":[],"published":{"date-parts":[[2024,11,9]]},"assertion":[{"value":"9 November 2024","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"PRCV","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Chinese Conference on Pattern Recognition and Computer Vision  (PRCV)","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Urumqi","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"China","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2024","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"18 October 2024","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"20 October 2024","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"7","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"ccprcv2024","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"http:\/\/2024.prcv.cn\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}