{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,10,16]],"date-time":"2025-10-16T00:55:19Z","timestamp":1760576119851,"version":"build-2065373602"},"publisher-location":"New York, NY, USA","reference-count":24,"publisher":"ACM","license":[{"start":{"date-parts":[[2024,11,4]],"date-time":"2024-11-04T00:00:00Z","timestamp":1730678400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.acm.org\/publications\/policies\/copyright_policy#Background"}],"funder":[{"name":"Coordena\u00e7\u00e3o de Aperfei\u00e7oamento de Pessoal de Nivel Superior \u00f0 Brasil (CAPES)","award":["Finance Code 01"],"award-info":[{"award-number":["Finance Code 01"]}]},{"name":"Brazilian Ministry of Science, Technology, and Innovation (MCTI)","award":["DOU 01245.003479\/2024-1"],"award-info":[{"award-number":["DOU 01245.003479\/2024-1"]}]},{"name":"S\u00e3o Paulo Research Foundation (FAPESP)","award":["2020\/09838-0"],"award-info":[{"award-number":["2020\/09838-0"]}]}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2024,11,4]]},"DOI":"10.1145\/3686215.3688823","type":"proceedings-article","created":{"date-parts":[[2024,10,30]],"date-time":"2024-10-30T12:17:01Z","timestamp":1730290621000},"page":"170-174","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":0,"title":["Benchmarking Speech-Driven Gesture Generation Models for Generalization to Unseen Voices and Noisy Environments"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0009-0008-0468-6659","authenticated-orcid":false,"given":"Johsac Isbac Gomez","family":"Sanchez","sequence":"first","affiliation":[{"name":"Depto. de Engenharia de Computa\u00e7\u00e3o e Automa\u00e7\u00e3o (DCA), Faculdade de Engenharia El\u00e9trica e de Computa\u00e7\u00e3o (FEEC), Universidade Estadual de Campinas (UNICAMP), Brazil"}]},{"ORCID":"https:\/\/orcid.org\/0009-0001-6349-6379","authenticated-orcid":false,"given":"Kevin Adier Inofuente","family":"Colque","sequence":"additional","affiliation":[{"name":"Depto. de Engenharia de Computa\u00e7\u00e3o e Automa\u00e7\u00e3o (DCA), Faculdade de Engenharia El\u00e9trica e de Computa\u00e7\u00e3o (FEEC), Universidade Estadual de Campinas (UNICAMP), Brazil"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-8821-4972","authenticated-orcid":false,"given":"Leonardo Boulitreau de Menezes Martins","family":"Marques","sequence":"additional","affiliation":[{"name":"CPQD, Brazil"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-1534-5744","authenticated-orcid":false,"given":"Paula Dornhofer Paro","family":"Costa","sequence":"additional","affiliation":[{"name":"Depto. de Engenharia de Computa\u00e7\u00e3o e Automa\u00e7\u00e3o (DCA), Faculdade de Engenharia El\u00e9trica e de Computa\u00e7\u00e3o (FEEC), Universidade Estadual de Campinas (UNICAMP), Brazil"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-5800-654X","authenticated-orcid":false,"given":"Rodolfo Luis","family":"Tonoli","sequence":"additional","affiliation":[{"name":"Depto. de Engenharia de Computa\u00e7\u00e3o e Automa\u00e7\u00e3o (DCA), Faculdade de Engenharia El\u00e9trica e de Computa\u00e7\u00e3o (FEEC), Universidade Estadual de Campinas (UNICAMP), Brazil"}]}],"member":"320","published-online":{"date-parts":[[2024,11,4]]},"reference":[{"key":"e_1_3_2_1_1_1","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58523-5_15"},{"key":"e_1_3_2_1_2_1","doi-asserted-by":"publisher","DOI":"10.1109\/JSTSP.2022.3188113"},{"key":"e_1_3_2_1_3_1","volume-title":"Zero-Shot Style Transfer for Gesture Animation driven by Text and Speech using Adversarial Disentanglement of Multimodal Style Encoding. arXiv preprint arXiv:2208.01917","author":"Fares Mireille","year":"2022","unstructured":"Mireille Fares, Michele Grimaldi, Catherine Pelachaud, and Nicolas Obin. 2022. Zero-Shot Style Transfer for Gesture Animation driven by Text and Speech using Adversarial Disentanglement of Multimodal Style Encoding. arXiv preprint arXiv:2208.01917 (2022)."},{"key":"e_1_3_2_1_4_1","volume-title":"ZeroEGGS: Zero-shot Example-based Gesture Generation from Speech. arXiv preprint arXiv:2209.07556","author":"Ghorbani Saeed","year":"2022","unstructured":"Saeed Ghorbani, Ylva Ferstl, Daniel Holden, Nikolaus\u00a0F Troje, and Marc-Andr\u00e9 Carbonneau. 2022. ZeroEGGS: Zero-shot Example-based Gesture Generation from Speech. arXiv preprint arXiv:2209.07556 (2022)."},{"key":"e_1_3_2_1_5_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00361"},{"key":"e_1_3_2_1_6_1","doi-asserted-by":"publisher","DOI":"10.1145\/3474085.3475437"},{"key":"e_1_3_2_1_7_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2018.8461329"},{"key":"e_1_3_2_1_8_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2017.7953152"},{"key":"e_1_3_2_1_9_1","doi-asserted-by":"publisher","DOI":"10.1145\/3577190.3616120"},{"key":"e_1_3_2_1_10_1","volume-title":"Proceedings of the IEEE\/CVF International Conference on Computer Vision. 763\u2013772","author":"Lee Gilwoo","year":"2019","unstructured":"Gilwoo Lee, Zhiwei Deng, Shugao Ma, Takaaki Shiratori, Siddhartha\u00a0S Srinivasa, and Yaser Sheikh. 2019. Talking with hands 16.2 m: A large-scale dataset of synchronized body-finger motion and audio for conversational motion analysis and synthesis. In Proceedings of the IEEE\/CVF International Conference on Computer Vision. 763\u2013772."},{"key":"e_1_3_2_1_11_1","volume-title":"2020 Asia-Pacific Signal and Information Processing Association Annual Summit and Conference (APSIPA ASC). IEEE, 514\u2013519","author":"Lu Junchen","year":"2020","unstructured":"Junchen Lu, Kun Zhou, Berrak Sisman, and Haizhou Li. 2020. Vaw-gan for singing voice conversion with non-parallel training data. In 2020 Asia-Pacific Signal and Information Processing Association Annual Summit and Conference (APSIPA ASC). IEEE, 514\u2013519."},{"key":"e_1_3_2_1_12_1","doi-asserted-by":"publisher","unstructured":"Ziqian Ning Qicong Xie Pengcheng Zhu Zhichao Wang Liumeng Xue Jixun Yao Lei Xie and Mengxiao Bi. 2023. Expressive-VC: Highly Expressive Voice Conversion with Attention Fusion of Bottleneck and Perturbation Features. In ICASSP 2023 - 2023 IEEE International Conference on Acoustics Speech and Signal Processing (ICASSP). 1\u20135. https:\/\/doi.org\/10.1109\/ICASSP49357.2023.10096057","DOI":"10.1109\/ICASSP49357.2023.10096057"},{"key":"e_1_3_2_1_13_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP40776.2020.9054734"},{"key":"e_1_3_2_1_14_1","volume-title":"International Conference on Machine Learning. PMLR, 28492\u201328518","author":"Radford Alec","year":"2023","unstructured":"Alec Radford, Jong\u00a0Wook Kim, Tao Xu, Greg Brockman, Christine McLeavey, and Ilya Sutskever. 2023. Robust speech recognition via large-scale weak supervision. In International Conference on Machine Learning. PMLR, 28492\u201328518."},{"key":"e_1_3_2_1_15_1","doi-asserted-by":"publisher","DOI":"10.1145\/3610661.3616554"},{"key":"e_1_3_2_1_16_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP43922.2022.9746484"},{"key":"e_1_3_2_1_17_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2018.8462665"},{"key":"e_1_3_2_1_18_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP48485.2024.10448208"},{"key":"e_1_3_2_1_19_1","doi-asserted-by":"publisher","DOI":"10.21437\/ASVSPOOF.2021-8"},{"key":"e_1_3_2_1_20_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP48485.2024.10447978"},{"key":"e_1_3_2_1_21_1","doi-asserted-by":"publisher","DOI":"10.1145\/3577190.3616114"},{"key":"e_1_3_2_1_22_1","doi-asserted-by":"publisher","DOI":"10.1145\/3414685.3417838"},{"key":"e_1_3_2_1_23_1","volume-title":"Speech-driven Personalized Gesture Synthetics: Harnessing Automatic Fuzzy Feature Inference","author":"Zhang Fan","year":"2024","unstructured":"Fan Zhang, Zhaohan Wang, Xin Lyu, Siyuan Zhao, Mengjian Li, Weidong Geng, Naye Ji, Hui Du, Fuxing Gao, Hao Wu, 2024. Speech-driven Personalized Gesture Synthetics: Harnessing Automatic Fuzzy Feature Inference. IEEE Transactions on Visualization and Computer Graphics (2024)."},{"key":"e_1_3_2_1_24_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP39728.2021.9413391"}],"event":{"name":"ICMI '24: INTERNATIONAL CONFERENCE ON MULTIMODAL INTERACTION","sponsor":["SIGCHI ACM Special Interest Group on Computer-Human Interaction"],"location":"San Jose Costa Rica","acronym":"ICMI '24"},"container-title":["Companion Proceedings of the 26th International Conference on Multimodal Interaction"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3686215.3688823","content-type":"unspecified","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3686215.3688823","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,10,15]],"date-time":"2025-10-15T16:22:41Z","timestamp":1760545361000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3686215.3688823"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,11,4]]},"references-count":24,"alternative-id":["10.1145\/3686215.3688823","10.1145\/3686215"],"URL":"https:\/\/doi.org\/10.1145\/3686215.3688823","relation":{},"subject":[],"published":{"date-parts":[[2024,11,4]]},"assertion":[{"value":"2024-11-04","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}