{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,12,3]],"date-time":"2025-12-03T18:13:26Z","timestamp":1764785606626,"version":"3.41.0"},"publisher-location":"New York, NY, USA","reference-count":51,"publisher":"ACM","license":[{"start":{"date-parts":[[2025,3,31]],"date-time":"2025-03-31T00:00:00Z","timestamp":1743379200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by-nc-sa\/4.0\/"}],"funder":[{"name":"Bundesministerium f\u00fcr Bildung und Forschung (BMBF)"}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2025,3,31]]},"DOI":"10.1145\/3712676.3718344","type":"proceedings-article","created":{"date-parts":[[2025,3,26]],"date-time":"2025-03-26T13:13:23Z","timestamp":1742994803000},"page":"291-297","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":1,"title":["AMIS: An Audiovisual Dataset for Multimodal XR Research"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0009-0002-7394-534X","authenticated-orcid":false,"given":"Abhinav","family":"Bhattacharya","sequence":"first","affiliation":[{"name":"Audiovisual Technology Group, TU Ilmenau, Germany, Ilmenau, Germany"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-4537-5183","authenticated-orcid":false,"given":"Lu\u00eds Fernando de Souza","family":"Cardoso","sequence":"additional","affiliation":[{"name":"Virtual Worlds and Digital Games Group, TU Ilmenau, Germany, Ilmenau, Germany"}]},{"ORCID":"https:\/\/orcid.org\/0009-0005-8282-8024","authenticated-orcid":false,"given":"Andy","family":"Schleising","sequence":"additional","affiliation":[{"name":"Virtual Worlds and Digital Games Group, TU Ilmenau, Germany, Ilmenau, Germany"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-0399-8294","authenticated-orcid":false,"given":"Gareth","family":"Rendle","sequence":"additional","affiliation":[{"name":"Virtual Reality and Visualization Research Group, Bauhaus-Universit\u00e4t Weimar, Germany, Weimar, Germany"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-5032-7613","authenticated-orcid":false,"given":"Adrian","family":"Kreskowski","sequence":"additional","affiliation":[{"name":"Virtual Reality and Visualization Research Group, Bauhaus-Universit\u00e4t Weimar, Germany, Weimar, Germany"}]},{"ORCID":"https:\/\/orcid.org\/0009-0000-4777-7560","authenticated-orcid":false,"given":"Felix","family":"Immohr","sequence":"additional","affiliation":[{"name":"Audiovisual Technology Group, TU Ilmenau, Germany, Ilmenau, Germany"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-7069-1543","authenticated-orcid":false,"given":"Rakesh Rao Ramachandra","family":"Rao","sequence":"additional","affiliation":[{"name":"Audiovisual Technology Group, TU Ilmenau, Germany, Ilmenau, Germany"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-7483-1550","authenticated-orcid":false,"given":"Wolfgang","family":"Broll","sequence":"additional","affiliation":[{"name":"Virtual Worlds and Digital Games Group, TU Ilmenau, Germany, Ilmenau, Germany"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-9357-1763","authenticated-orcid":false,"given":"Alexander","family":"Raake","sequence":"additional","affiliation":[{"name":"Audiovisual Technology Group, TU Ilmenau, Germany, Ilmenau, Germany"}]}],"member":"320","published-online":{"date-parts":[[2025,3,31]]},"reference":[{"key":"e_1_3_2_1_1_1","unstructured":"Enas Altarawneh Ameeta Agrawal Michael Jenkin and Manos Papagelis. 2023. Predicting Evoked Emotions in Conversations. arXiv:2401.00383 [cs.CL]"},{"key":"e_1_3_2_1_2_1","volume-title":"Insights into the Predictors of Empathy in Virtual Reality Environments. Information 14, 8","author":"Bacca-Acosta Jorge","year":"2023","unstructured":"Jorge Bacca-Acosta, Cecilia Avila-Garzon, and Myriam Sierra-Puentes. 2023. Insights into the Predictors of Empathy in Virtual Reality Environments. Information 14, 8 (2023)."},{"key":"e_1_3_2_1_3_1","volume-title":"2024 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW). 983--984","author":"Baffour Kwame Agyemang","year":"2024","unstructured":"Kwame Agyemang Baffour and Oyewole Oyekoya. 2024. Generating Look-Alike Avatars: Perception of Head Shape, Texture Fidelity and Head Orientation of Another User's Look-Alike Avatar. In 2024 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW). 983--984."},{"key":"e_1_3_2_1_4_1","volume-title":"The OMG-Emotion Behavior Dataset. In 2018 International Joint Conference on Neural Networks (IJCNN). 1--7.","author":"Barros Pablo","year":"2018","unstructured":"Pablo Barros, Nikhil Churamani, Egor Lakomkin, Henrique Siqueira, Alexander Sutherland, and Stefan Wermter. 2018. The OMG-Emotion Behavior Dataset. In 2018 International Joint Conference on Neural Networks (IJCNN). 1--7."},{"key":"e_1_3_2_1_5_1","volume-title":"The OMG-Empathy Dataset: Evaluating the Impact of Affective Behavior in Storytelling. In 2019 8th International Conference on Affective Computing and Intelligent Interaction (ACII). 1--7.","author":"Barros Pablo","year":"2019","unstructured":"Pablo Barros, Nikhil Churamani, Angelica Lim, and Stefan Wermter. 2019. The OMG-Empathy Dataset: Evaluating the Impact of Affective Behavior in Storytelling. In 2019 8th International Conference on Affective Computing and Intelligent Interaction (ACII). 1--7."},{"volume-title":"Proc. of the 19th ACM SIGGRAPH European Conf. on Visual Media Production","author":"Berghi Davide","key":"e_1_3_2_1_6_1","unstructured":"Davide Berghi, Marco Volino, and Philip J. B. Jackson. 2022. Tragic Talkers: A Shakespearean Sound- and Light-Field Dataset for Audio-Visual Machine Learning Research. In Proc. of the 19th ACM SIGGRAPH European Conf. on Visual Media Production (London, United Kingdom) (CVMP '22). Association for Computing Machinery, New York, NY, USA, Article 5, 8 pages."},{"key":"e_1_3_2_1_7_1","volume-title":"Samuel Kim, Jeannette Chang, Sungbok Lee, and Shrikanth Narayanan.","author":"Busso Carlos","year":"2008","unstructured":"Carlos Busso, Murtaza Bulut, Chi-Chun Lee, Abe Kazemzadeh, Emily Mower Provost, Samuel Kim, Jeannette Chang, Sungbok Lee, and Shrikanth Narayanan. 2008. IEMOCAP: Interactive emotional dyadic motion capture database. Language Resources and Evaluation 42 (12 2008), 335--359."},{"key":"e_1_3_2_1_8_1","doi-asserted-by":"publisher","DOI":"10.21437\/ICSLP.1998-609"},{"key":"e_1_3_2_1_9_1","volume-title":"4th European Conference on Speech Communication and Technology.","author":"Chan Dominic","year":"1995","unstructured":"Dominic Chan, Adrian Fourcin, Dafydd Gibbon, Bj\u00f6rn Granstr\u00f6m, Mark Huckvale, Kokkinakis George, Knut Kvale, Lori Lamel, B\u00f8rge Lindberg, Asunci\u00f3n Moreno, Jiannis Mouropoulos, Francesco Senia, Isabel Trancoso, Corin Veld, and Jerome Zeiliger. 1995. EUROM - a spoken language resource for the EU - the SAM projects. In 4th European Conference on Speech Communication and Technology."},{"key":"e_1_3_2_1_10_1","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2020.3026276"},{"key":"e_1_3_2_1_11_1","volume-title":"2015 IEEE International Conference on Image Processing (ICIP). 168--172","author":"Chen Chen","year":"2015","unstructured":"Chen Chen, Roozbeh Jafari, and Nasser Kehtarnavaz. 2015. UTD-MHAD: A multimodal dataset for human action recognition utilizing a depth camera and a wearable inertial sensor. In 2015 IEEE International Conference on Image Processing (ICIP). 168--172."},{"key":"e_1_3_2_1_12_1","doi-asserted-by":"publisher","DOI":"10.1145\/237170.237269"},{"volume-title":"Advances in Neural Information Processing Systems","author":"DelPreto Joseph","key":"e_1_3_2_1_13_1","unstructured":"Joseph DelPreto, Chao Liu, Yiyue Luo, Michael Foshey, Yunzhu Li, Antonio Torralba, Wojciech Matusik, and Daniela Rus. 2022. ActionSense: A Multimodal Dataset and Recording Framework for Human Activities Using Wearable Sensors in a Kitchen Environment. In Advances in Neural Information Processing Systems, S. Koyejo, S. Mohamed, A. Agarwal, D. Belgrave, K. Cho, and A. Oh (Eds.), Vol. 35. Curran Associates, Inc., 13800--13813."},{"key":"e_1_3_2_1_14_1","volume-title":"2022 IEEE International Conference on Multimedia and Expo Workshops (ICMEW). IEEE, 1--6.","author":"Fela Randy F","year":"2022","unstructured":"Randy F Fela, Andr\u00e9as Pastor, Patrick Le Callet, Nick Zacharov, Toinon Vigier, and Soren Forchhammer. 2022. Perceptual evaluation on audio-visual dataset of 360 content. In 2022 IEEE International Conference on Multimedia and Expo Workshops (ICMEW). IEEE, 1--6."},{"key":"e_1_3_2_1_15_1","volume-title":"Schlittmeier","author":"Fels Janina","year":"2021","unstructured":"Janina Fels, Cosima A. Ermert, Jonathan Ehret, Chinthusa Mohanathasan, Andrea B\u00f6nsch, Torsten W. Kuhlen, and Sabine J. Schlittmeier. 2021. Listening to, and Remembering Conversations between Two Talkers: Cognitive Research using Embodied Conversational Agents in Audiovisual Virtual Environments. In [Fortschritte der Akustik - DAGA 2021, Wien, Austria] (2021-08-15). 47. Jahrestagung f\u00fcr Akustik, Wien (Austria), 15 Aug 2021 - 18 Aug 2021, Deutsche Gesellschaft f\u00fcr Akustik e.V. (DEGA), Berlin, 1328--1331."},{"key":"e_1_3_2_1_16_1","doi-asserted-by":"publisher","DOI":"10.1145\/3573381.3596503"},{"key":"e_1_3_2_1_17_1","volume-title":"2024 16th International Conference on Quality of Multimedia Experience (QoMEX). 207--213","author":"Fremerey Stephan","year":"2024","unstructured":"Stephan Fremerey, Carolin Breuer, Larissa Leist, Maria Klatte, Janina Fels, and Alexander Raake. 2024. AVT-ECoClass-VR: An open-source audiovisual 360\u00b0 video and immersive CGI multi-talker dataset to evaluate cognitive performance. In 2024 16th International Conference on Quality of Multimedia Experience (QoMEX). 207--213."},{"key":"e_1_3_2_1_18_1","doi-asserted-by":"crossref","unstructured":"Ohad Fried Ayush Tewari Michael Zollh\u00f6fer Adam Finkelstein Eli Shechtman Dan B Goldman Kyle Genova Zeyu Jin Christian Theobalt and Maneesh Agrawala. 2019. Text-based Editing of Talking-head Video. arXiv:1906.01524 [cs.CV]","DOI":"10.1145\/3306346.3323028"},{"key":"e_1_3_2_1_19_1","unstructured":"Scott Geng Revant Teotia Purva Tendulkar Sachit Menon and Carl Vondrick. 2023. Affective Faces for Goal-Driven Dyadic Communication. arXiv:2301.10939 [cs.CV]"},{"key":"e_1_3_2_1_20_1","unstructured":"Bo Han Heqing Zou Haoyang Li Guangcong Wang and Chng Eng Siong. 2024. Text-based Talking Video Editing with Cascaded Conditional Diffusion. arXiv:2407.14841 [cs.CV]"},{"key":"e_1_3_2_1_21_1","doi-asserted-by":"publisher","DOI":"10.1109\/FG.2018.00108"},{"key":"e_1_3_2_1_22_1","volume-title":"RWTH Aachen University","author":"Institute for Hearing Technology and Acoustics","year":"2020","unstructured":"Institute for Hearing Technology and Acoustics, RWTH Aachen University. 2020. Virtual Acoustics - A real-time auralization framework for scientific research. http:\/\/www.virtualacoustics.org\/. Accessed on 2020-04-21."},{"key":"e_1_3_2_1_23_1","unstructured":"Yili Jin Kaiyuan Hu Junhua Liu Fangxin Wang and Xue Liu. 2024. From Capture to Display: A Survey on Volumetric Video. arXiv:2309.05658 [cs.MM]"},{"key":"e_1_3_2_1_24_1","volume-title":"Effects on Co-Presence of a Virtual Human: A Comparison of Display and Interaction Types. Electronics 11, 3","author":"Kim Daehwan","year":"2022","unstructured":"Daehwan Kim and Dongsik Jo. 2022. Effects on Co-Presence of a Virtual Human: A Comparison of Display and Interaction Types. Electronics 11, 3 (2022)."},{"key":"e_1_3_2_1_25_1","doi-asserted-by":"publisher","DOI":"10.1109\/TVCG.2020.3037360"},{"key":"e_1_3_2_1_26_1","volume-title":"ROVER: A Standalone Overlay Tool for Questionnaires in Virtual Reality. In Companion Proc. of the 16th ACM SIGCHI Symp. on Engineering Interactive Computing Systems","author":"K\u00fcntzer Lucas","year":"2024","unstructured":"Lucas K\u00fcntzer, Sandra U. Schwab, Heike Spaderna, and Georg Rock. 2024. ROVER: A Standalone Overlay Tool for Questionnaires in Virtual Reality. In Companion Proc. of the 16th ACM SIGCHI Symp. on Engineering Interactive Computing Systems (Cagliari, Italy) (EICS '24 Companion). Association for Computing Machinery, New York, NY, USA, 31--39."},{"key":"e_1_3_2_1_27_1","doi-asserted-by":"publisher","DOI":"10.1109\/TVCG.2023.3320236"},{"volume-title":"Proceedings of the 14th Annual Conference on Computer Graphics and Interactive Techniques (SIGGRAPH '87)","author":"William","key":"e_1_3_2_1_28_1","unstructured":"William E. Lorensen and Harvey E. Cline. 1987. Marching cubes: A high resolution 3D surface construction algorithm. In Proceedings of the 14th Annual Conference on Computer Graphics and Interactive Techniques (SIGGRAPH '87). Association for Computing Machinery, New York, NY, USA, 163--169."},{"key":"e_1_3_2_1_29_1","unstructured":"Kasper Duemose Lund Axel Ahrens and Torsten Dau. 2019. A method for evaluating audio-visual scene analysis in multi-talker environments. NA."},{"key":"e_1_3_2_1_30_1","volume-title":"Kevin Bailey, David Soriano Fosas, C. Karen Liu, Ziwei Liu, Jakob Engel, Renzo De Nardi, and Richard Newcombe.","author":"Ma Lingni","year":"2024","unstructured":"Lingni Ma, Yuting Ye, Fangzhou Hong, Vladimir Guzov, Yifeng Jiang, Rowan Postyeni, Luis Pesqueira, Alexander Gamino, Vijay Baiyya, Hyo Jin Kim, Kevin Bailey, David Soriano Fosas, C. Karen Liu, Ziwei Liu, Jakob Engel, Renzo De Nardi, and Richard Newcombe. 2024. Nymeria: A Massive Collection of Multimodal Egocentric Daily Motion in the Wild. arXiv:2406.09905 [cs.CV]"},{"key":"e_1_3_2_1_31_1","volume-title":"EVOKE: Emotion Enabled Virtual Avatar Mapping Using Optimized Knowledge Distillation. In 2024 IEEE International Conference on Consumer Electronics (ICCE). 1--6.","author":"Nadeem Maryam","year":"2024","unstructured":"Maryam Nadeem, Raza Imam, Rouqaiah Al-Refai, Meriem Chkir, Mohamad Hoda, and Abdulmotaleb El Saddik. 2024. EVOKE: Emotion Enabled Virtual Avatar Mapping Using Optimized Knowledge Distillation. In 2024 IEEE International Conference on Consumer Electronics (ICCE). 1--6."},{"key":"e_1_3_2_1_32_1","volume-title":"Audio Engineering Society Convention 142","author":"Neidhardt Annika","year":"2017","unstructured":"Annika Neidhardt, Florian Klein, Niklas Knoop, and Thomas K\u00f6llmer. 2017. Flexible python tool for dynamic binaural synthesis applications. https:\/\/github.com\/tuil-emt\/pybinsim. In Audio Engineering Society Convention 142. Audio Engineering Society."},{"key":"e_1_3_2_1_33_1","volume-title":"ICASSP 2024-2024 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP). IEEE, 8210--8214","author":"Pastor Andreas","year":"2024","unstructured":"Andreas Pastor, Pierre Lebreton, Toinon Vigier, and Patrick Le Callet. 2024. Comparison of conditions for omnidirectional video with spatial audio in terms of subjective quality and impacts on objective metrics resolving power. In ICASSP 2024-2024 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP). IEEE, 8210--8214."},{"key":"e_1_3_2_1_34_1","volume-title":"Cristian Canton Ferrer, and Caner Hazirbas","author":"Porgali Bilal","year":"2023","unstructured":"Bilal Porgali, V\u00edtor Albiero, Jordan Ryda, Cristian Canton Ferrer, and Caner Hazirbas. 2023. The Casual Conversations v2 Dataset. arXiv:2303.04838 [cs.CV]"},{"key":"e_1_3_2_1_35_1","volume-title":"MELD: A Multimodal Multi-Party Dataset for Emotion Recognition in Conversations., 527--536 pages.","author":"Poria Soujanya","year":"2019","unstructured":"Soujanya Poria, Devamanyu Hazarika, Navonil Majumder, Gautam Naik, Erik Cambria, and Rada Mihalcea. 2019. MELD: A Multimodal Multi-Party Dataset for Emotion Recognition in Conversations., 527--536 pages."},{"volume-title":"2022 14th International Conference on Quality of Multimedia Experience (QoMEX). 1--6.","author":"Robotham Thomas","key":"e_1_3_2_1_36_1","unstructured":"Thomas Robotham, Ashutosh Singla, Olli S. Rummukainen, Alexander Raake, and Emanu\u00ebl A. P. Habets. 2022. Audiovisual Database with 360\u00b0 Video and Higher-Order Ambisonics Audio for Perception, Cognition, Behavior, and QoE Evaluation Research. In 2022 14th International Conference on Quality of Multimedia Experience (QoMEX). 1--6."},{"key":"e_1_3_2_1_37_1","volume-title":"An interactional view of social presence: Making the virtual other \"real\". Information Systems Journal 29 (12","author":"Schultze Ulrike","year":"2018","unstructured":"Ulrike Schultze and Jo Ann Brooks. 2018. An interactional view of social presence: Making the virtual other \"real\". Information Systems Journal 29 (12 2018)."},{"key":"e_1_3_2_1_38_1","volume-title":"Replay: Multi-modal Multi-view Acted Videos for Casual Holography. arXiv:2307.12067 [cs.CV]","author":"Shapovalov Roman","year":"2023","unstructured":"Roman Shapovalov, Yanir Kleiman, Ignacio Rocco, David Novotny, Andrea Vedaldi, Changan Chen, Filippos Kokkinos, Ben Graham, and Natalia Neverova. 2023. Replay: Multi-modal Multi-view Acted Videos for Casual Holography. arXiv:2307.12067 [cs.CV]"},{"key":"e_1_3_2_1_39_1","volume-title":"Saliency of Omnidirectional Videos with Different Audio Presentations: Analyses and Dataset. In 2023 15th International Conference on Quality of Multimedia Experience (QoMEX). 264--269","author":"Singla Ashutosh","year":"2023","unstructured":"Ashutosh Singla, Thomas Robotham, Abhinav Bhattacharya, William Menz, Emanu\u00ebl A. P. Habets, and Alexander Raake. 2023. Saliency of Omnidirectional Videos with Different Audio Presentations: Analyses and Dataset. In 2023 15th International Conference on Quality of Multimedia Experience (QoMEX). 264--269."},{"key":"e_1_3_2_1_40_1","volume-title":"2022 14th International Conference on Quality of Multimedia Experience (QoMEX). 1--6.","author":"Spang Robert P.","year":"2022","unstructured":"Robert P. Spang, Jan-Niklas Voigt-Antons, and Sebastian M\u00f6ller. 2022. The Story time Dataset: Simulated Videotelephony Clips for Quality Perception Research. In 2022 14th International Conference on Quality of Multimedia Experience (QoMEX). 1--6."},{"key":"e_1_3_2_1_41_1","volume-title":"Proceedings of the Tenth International Conference on Language Resources and Evaluation (LREC'16)","author":"Tolins Jackson","year":"2016","unstructured":"Jackson Tolins, Kris Liu, Yingying Wang, Jean E. Fox Tree, Marilyn A. Walker, and Michael Neff. 2016. A Multimodal Motion-Captured Corpus of Matched and Mismatched Extravert-Introvert Conversational Pairs. In Proceedings of the Tenth International Conference on Language Resources and Evaluation (LREC'16). European Language Resources Association (ELRA), Portoro\u017e, Slovenia, 3469--3476."},{"key":"e_1_3_2_1_42_1","volume-title":"A Methodological Framework for Assessing Social Presence in Music Interactions in Virtual Reality. Frontiers in Psychology 12","author":"Kerrebroeck Bavo Van","year":"2021","unstructured":"Bavo Van Kerrebroeck, Giusy Caruso, and Pieter-Jan Maes. 2021. A Methodological Framework for Assessing Social Presence in Music Interactions in Virtual Reality. Frontiers in Psychology 12 (2021)."},{"volume-title":"Proceedings of the Sixth International Conference on Language Resources and Evaluation (LREC'08)","author":"van Son Rob","key":"e_1_3_2_1_43_1","unstructured":"Rob van Son, Wieneke Wesseling, Eric Sanders, and Henk van den Heuvel. 2008. The IFADV Corpus: a Free Dialog Video Corpus. In Proceedings of the Sixth International Conference on Language Resources and Evaluation (LREC'08), Nicoletta Calzolari, Khalid Choukri, Bente Maegaard, Joseph Mariani, Jan Odijk, Stelios Piperidis, and Daniel Tapias (Eds.). European Language Resources Association (ELRA), Marrakech, Morocco."},{"key":"e_1_3_2_1_44_1","unstructured":"Jason Vandeventer Andrew J. Aubrey Paul L. Rosin and David Marshall. 2015. 4D Cardiff Conversation Database (4D CCDb): a 4D database of natural dyadic conversations. In Auditory-Visual Speech Processing. 157--162."},{"key":"e_1_3_2_1_45_1","volume-title":"Carlos Ribeiro, and Isabel Trancoso.","author":"Vianaz M.","year":"2002","unstructured":"M. Vianaz, Ciro Martins, M. Mascarenhasz, Hugo Meinedoy, Jo ao Netoy, Carlos Ribeiro, and Isabel Trancoso. 2002. Spoken Language Corpora for Speech Recognition and Synthesis in European Portuguese. (02 2002)."},{"key":"e_1_3_2_1_46_1","first-page":"1","article-title":"A Conversation Analysis: The Use of Small-Talk","volume":"4","author":"Wahab Isnaeni","year":"2021","unstructured":"Isnaeni Wahab, Zul Astri, Novalia Tanasy, and Nurul Fachrunnisa. 2021. A Conversation Analysis: The Use of Small-Talk. Seltics Journal: Scope of English Language Teaching Literature and Linguistics 4, 1 (Jun. 2021), 53--62.","journal-title":"Seltics Journal: Scope of English Language Teaching Literature and Linguistics"},{"key":"e_1_3_2_1_47_1","doi-asserted-by":"publisher","DOI":"10.1007\/s12144-023-04544-x"},{"key":"e_1_3_2_1_48_1","doi-asserted-by":"publisher","DOI":"10.1109\/TVCG.2023.3247072"},{"key":"e_1_3_2_1_49_1","doi-asserted-by":"crossref","unstructured":"Ming Yan Yan Zhang Shuqiang Cai Shuqi Fan Xincheng Lin Yudi Dai Siqi Shen Chenglu Wen Lan Xu Yuexin Ma and Cheng Wang. 2024. RELI11D: A Comprehensive Multimodal Human Motion Dataset and Method. arXiv:2403.19501 [cs.CV]","DOI":"10.1109\/CVPR52733.2024.00219"},{"key":"e_1_3_2_1_50_1","unstructured":"Yichao Yan Zanwei Zhou Zi Wang Jingnan Gao and Xiaokang Yang. 2023. DialogueNeRF: Towards Realistic Avatar Face-to-Face Conversation Video Generation. arXiv:2203.07931 [cs.CV]"},{"key":"e_1_3_2_1_51_1","doi-asserted-by":"crossref","unstructured":"Jinming Zhao Tenggan Zhang Jingwen Hu Yuchen Liu Qin Jin Xinchao Wang and Haizhou Li. 2022. M3ED: Multi-modal Multi-scene Multi-label Emotional Dialogue Database. 5699--5710 pages.","DOI":"10.18653\/v1\/2022.acl-long.391"}],"event":{"name":"MMSys '25: 16th ACM Multimedia Systems Conference","sponsor":["SIGMM ACM Special Interest Group on Multimedia","SIGCOMM ACM Special Interest Group on Data Communication","SIGMOBILE ACM Special Interest Group on Mobility of Systems, Users, Data and Computing"],"location":"Stellenbosch South Africa","acronym":"MMSys '25"},"container-title":["Proceedings of the 16th ACM Multimedia Systems Conference"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3712676.3718344","content-type":"unspecified","content-version":"vor","intended-application":"text-mining"}],"deposited":{"date-parts":[[2025,6,19]],"date-time":"2025-06-19T01:18:38Z","timestamp":1750295918000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3712676.3718344"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,3,31]]},"references-count":51,"alternative-id":["10.1145\/3712676.3718344","10.1145\/3712676"],"URL":"https:\/\/doi.org\/10.1145\/3712676.3718344","relation":{},"subject":[],"published":{"date-parts":[[2025,3,31]]},"assertion":[{"value":"2025-03-31","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}