{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,18]],"date-time":"2026-01-18T12:01:14Z","timestamp":1768737674365,"version":"3.49.0"},"publisher-location":"New York, NY, USA","reference-count":62,"publisher":"ACM","license":[{"start":{"date-parts":[[2023,9,19]],"date-time":"2023-09-19T00:00:00Z","timestamp":1695081600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by\/4.0\/"}],"funder":[{"name":"Swedish Research Council","award":["VR-2020-0239"],"award-info":[{"award-number":["VR-2020-0239"]}]},{"name":"Swedish Research Counci","award":["VR-2019-05003"],"award-info":[{"award-number":["VR-2019-05003"]}]},{"DOI":"10.13039\/501100004472","name":"Riksbankens Jubileumsfond","doi-asserted-by":"publisher","award":["P20-0298"],"award-info":[{"award-number":["P20-0298"]}],"id":[{"id":"10.13039\/501100004472","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2023,9,19]]},"DOI":"10.1145\/3570945.3607289","type":"proceedings-article","created":{"date-parts":[[2023,12,22]],"date-time":"2023-12-22T06:07:02Z","timestamp":1703225222000},"page":"1-9","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":2,"title":["Generation of speech and facial animation with controllable articulatory effort for amusing conversational characters"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-0397-6442","authenticated-orcid":false,"given":"Joakim","family":"Gustafson","sequence":"first","affiliation":[{"name":"KTH Royal Institute of Technology, Stockholm, Sweden"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-1175-840X","authenticated-orcid":false,"given":"\u00c9va","family":"Sz\u00e9kely","sequence":"additional","affiliation":[{"name":"KTH Royal Institute of Technology, Stockholm, Sweden"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-1399-6604","authenticated-orcid":false,"given":"Jonas","family":"Beskow","sequence":"additional","affiliation":[{"name":"KTH Royal Institute of Technology, Stockholm, Sweden"}]}],"member":"320","published-online":{"date-parts":[[2023,12,22]]},"reference":[{"key":"e_1_3_2_1_1_1","doi-asserted-by":"publisher","DOI":"10.1080\/088395199117333"},{"key":"e_1_3_2_1_2_1","doi-asserted-by":"publisher","DOI":"10.1145\/3290607.3310422"},{"key":"e_1_3_2_1_3_1","doi-asserted-by":"publisher","DOI":"10.1007\/11550617_26"},{"key":"e_1_3_2_1_4_1","first-page":"12449","article-title":"wav2vec 2.0: A framework for self-supervised learning of speech representations","volume":"33","author":"Baevski Alexei","year":"2020","unstructured":"Alexei Baevski, Yuhao Zhou, Abdelrahman Mohamed, and Michael Auli. 2020. wav2vec 2.0: A framework for self-supervised learning of speech representations. Advances in Neural Information Processing Systems 33 (2020), 12449--12460.","journal-title":"Advances in Neural Information Processing Systems"},{"key":"e_1_3_2_1_5_1","doi-asserted-by":"publisher","DOI":"10.21437\/Eurospeech.2003-259"},{"key":"e_1_3_2_1_6_1","unstructured":"Elisabetta Bevacqua Ken Prepin Radoslaw Niewiadomski Etienne de Sevin and Catherine Pelachaud. [n. d.]. Greta: Towards an interactive conversational virtual companion. ([n. d.])."},{"key":"e_1_3_2_1_7_1","doi-asserted-by":"publisher","DOI":"10.1016\/j.intcom.2005.09.002"},{"key":"e_1_3_2_1_8_1","doi-asserted-by":"publisher","DOI":"10.1145\/302979.303150"},{"key":"e_1_3_2_1_9_1","volume-title":"Embodied conversational agents: representation and intelligence in user interfaces. AI magazine 22, 4","author":"Cassell Justine","year":"2001","unstructured":"Justine Cassell. 2001. Embodied conversational agents: representation and intelligence in user interfaces. AI magazine 22, 4 (2001), 67--67."},{"key":"e_1_3_2_1_10_1","volume-title":"A Vector Quantized Approach for Text to Speech Synthesis on Real-World Spontaneous Speech. arXiv preprint arXiv:2302.04215","author":"Chen Li-Wei","year":"2023","unstructured":"Li-Wei Chen, Shinji Watanabe, and Alexander Rudnicky. 2023. A Vector Quantized Approach for Text to Speech Synthesis on Real-World Spontaneous Speech. arXiv preprint arXiv:2302.04215 (2023)."},{"key":"e_1_3_2_1_11_1","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2020-1336"},{"key":"e_1_3_2_1_12_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP43922.2022.9746395"},{"key":"e_1_3_2_1_13_1","volume-title":"Towards human-like spoken dialogue systems. Speech communication 50, 8-9","author":"Edlund Jens","year":"2008","unstructured":"Jens Edlund, Joakim Gustafson, Mattias Heldner, and Anna Hjalmarsson. 2008. Towards human-like spoken dialogue systems. Speech communication 50, 8-9 (2008), 630--645."},{"key":"e_1_3_2_1_14_1","doi-asserted-by":"publisher","DOI":"10.1145\/2897824.2925984"},{"key":"e_1_3_2_1_15_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00573"},{"key":"e_1_3_2_1_16_1","doi-asserted-by":"publisher","DOI":"10.1007\/11550617_4"},{"key":"e_1_3_2_1_17_1","doi-asserted-by":"publisher","DOI":"10.21437\/Eurospeech.1999-295"},{"key":"e_1_3_2_1_18_1","volume-title":"Proc. SSW.","author":"Gustafson Joakim","year":"2004","unstructured":"Joakim Gustafson and K\u00e5re Sj\u00f6lander. 2004. Voice creation for conversational fairy-tale characters. In Proc. SSW."},{"key":"e_1_3_2_1_19_1","doi-asserted-by":"publisher","DOI":"10.1145\/2493432.2493502"},{"key":"e_1_3_2_1_20_1","doi-asserted-by":"publisher","DOI":"10.1145\/3462244.3479883"},{"key":"e_1_3_2_1_21_1","volume-title":"Proc. LREC).","author":"Jonell Patrik","year":"2018","unstructured":"Patrik Jonell, Mattias Bystedt, Per Fallgren, Dimosthenis Kontogiorgos, Jos\u00e9 Lopes, Zofia Malisz, Samuel Mascarenhas, Catharine Oertel, Eran Raveh, and Todd Shore. 2018. Farmi: a framework for recording multi-modal interactions. In Proc. LREC)."},{"key":"e_1_3_2_1_22_1","volume-title":"Workshop paper (TEI'10)","author":"Knight Heather","year":"2011","unstructured":"Heather Knight, Scott Satkin, Varun Ramakrishna, and Santosh Divvala. 2011. A savvy robot standup comic: Online learning through audience tracking. In Workshop paper (TEI'10)."},{"key":"e_1_3_2_1_23_1","volume-title":"Proc. SSW.","author":"Kominek John","year":"2004","unstructured":"John Kominek and Alan W Black. 2004. The CMU Arctic speech databases. In Proc. SSW."},{"key":"e_1_3_2_1_24_1","first-page":"17022","article-title":"HiFi-GAN: Generative adversarial networks for efficient and high fidelity speech synthesis","volume":"33","author":"Kong Jungil","year":"2020","unstructured":"Jungil Kong, Jaehyeon Kim, and Jaekyoung Bae. 2020. HiFi-GAN: Generative adversarial networks for efficient and high fidelity speech synthesis. In Proc. NeurIPS, Vol. 33. 17022--17033.","journal-title":"Proc. NeurIPS"},{"key":"e_1_3_2_1_25_1","volume-title":"Proc. LREC. 119--127","author":"Kontogiorgos Dimosthenis","year":"2018","unstructured":"Dimosthenis Kontogiorgos, Vanya Avramova, Simon Alexanderson, Patrik Jonell, Catharine Oertel, Jonas Beskow, Gabriel Skantze, and Joakim Gustafson. 2018. A multimodal corpus for mutual gaze and joint attention in multiparty situated interaction. In Proc. LREC. 119--127."},{"key":"e_1_3_2_1_26_1","doi-asserted-by":"publisher","DOI":"10.1007\/11550617_28"},{"key":"e_1_3_2_1_27_1","volume-title":"The human takes it all: Humanlike synthesized voices are perceived as less eerie and more likable. evidence from a subjective ratings study. Frontiers in Neurorobotics","author":"K\u00fchne Katharina","year":"2020","unstructured":"Katharina K\u00fchne, Martin H Fischer, and Yuefang Zhou. 2020. The human takes it all: Humanlike synthesized voices are perceived as less eerie and more likable. evidence from a subjective ratings study. Frontiers in Neurorobotics (2020), 105."},{"key":"e_1_3_2_1_28_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP49357.2023.10097200"},{"key":"e_1_3_2_1_29_1","volume-title":"StyleTTS: A Style-Based Generative Model for Natural and Diverse Text-to-Speech Synthesis. arXiv preprint arXiv:2205.15439","author":"Li Yinghao Aaron","year":"2022","unstructured":"Yinghao Aaron Li, Cong Han, and Nima Mesgarani. 2022. StyleTTS: A Style-Based Generative Model for Natural and Diverse Text-to-Speech Synthesis. arXiv preprint arXiv:2205.15439 (2022)."},{"key":"e_1_3_2_1_30_1","volume-title":"The production of speech","author":"Lindblom Bj\u00f6rn","unstructured":"Bj\u00f6rn Lindblom. 1983. Economy of speech gestures. In The production of speech. Springer, 217--245."},{"key":"e_1_3_2_1_31_1","doi-asserted-by":"publisher","DOI":"10.1016\/j.csl.2021.101255"},{"key":"e_1_3_2_1_32_1","doi-asserted-by":"publisher","DOI":"10.1121\/1.1910340"},{"key":"e_1_3_2_1_33_1","doi-asserted-by":"publisher","DOI":"10.1145\/3383652.3423915"},{"key":"e_1_3_2_1_34_1","doi-asserted-by":"publisher","DOI":"10.1007\/s12369-020-00727-9"},{"key":"e_1_3_2_1_35_1","unstructured":"OpenAI. 2023. GPT-4 Technical Report. arXiv:2303.08774 [cs.CL]"},{"key":"e_1_3_2_1_36_1","doi-asserted-by":"publisher","DOI":"10.1109\/TVCG.2023.3247101"},{"key":"e_1_3_2_1_37_1","volume-title":"Tao Xu, Greg Brockman, Christine McLeavey, and Ilya Sutskever.","author":"Radford Alec","year":"2022","unstructured":"Alec Radford, Jong Wook Kim, Tao Xu, Greg Brockman, Christine McLeavey, and Ilya Sutskever. 2022. Robust speech recognition via large-scale weak supervision. arXiv preprint arXiv:2212.04356 (2022)."},{"key":"e_1_3_2_1_38_1","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2020-2861"},{"key":"e_1_3_2_1_39_1","doi-asserted-by":"publisher","DOI":"10.1080\/088395199117315"},{"key":"e_1_3_2_1_40_1","doi-asserted-by":"publisher","DOI":"10.1109\/VS-Games.2018.8493436"},{"key":"e_1_3_2_1_41_1","doi-asserted-by":"publisher","DOI":"10.1145\/3386867"},{"key":"e_1_3_2_1_42_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2018.8461368"},{"key":"e_1_3_2_1_43_1","doi-asserted-by":"publisher","DOI":"10.1016\/j.csl.2016.11.001"},{"key":"e_1_3_2_1_44_1","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-642-15892-6_30"},{"key":"e_1_3_2_1_45_1","volume-title":"Proc. LREC. 6368--6374","author":"Sz\u00e9kely \u00c9va","year":"2020","unstructured":"\u00c9va Sz\u00e9kely, Jens Edlund, and Joakim Gustafson. 2020. Augmented Prompt Selection for Evaluation of Spontaneous Speech Synthesis. In Proc. LREC. 6368--6374."},{"key":"e_1_3_2_1_46_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2019.8683846"},{"key":"e_1_3_2_1_47_1","volume-title":"Proc. Interspeech.","author":"Sz\u00e9kely \u00c9va","year":"2023","unstructured":"\u00c9va Sz\u00e9kely, Siyang Wang, and Joakim Gustafson. 2023. So-to-Speak: an exploratory platform for investigating the interplay between style and prosody in TTS. In Proc. Interspeech."},{"key":"e_1_3_2_1_48_1","doi-asserted-by":"publisher","DOI":"10.1145\/3072959.3073699"},{"key":"e_1_3_2_1_49_1","doi-asserted-by":"publisher","DOI":"10.1109\/VR51125.2022.00018"},{"key":"e_1_3_2_1_50_1","doi-asserted-by":"publisher","DOI":"10.1145\/267658.267823"},{"key":"e_1_3_2_1_51_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICMEW.2019.00069"},{"key":"e_1_3_2_1_52_1","doi-asserted-by":"publisher","DOI":"10.4108\/icst.pervasivehealth.2014.254943"},{"key":"e_1_3_2_1_53_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP40776.2020.9053732"},{"key":"e_1_3_2_1_54_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP40776.2020.9054556"},{"key":"e_1_3_2_1_55_1","first-page":"2579","article-title":"Visualizing data using t-SNE","volume":"9","author":"Der Maaten Laurens Van","year":"2008","unstructured":"Laurens Van Der Maaten and Geoffrey Hinton. 2008. Visualizing data using t-SNE. Journal of Machine Learning Research 9 (2008), 2579--2605.","journal-title":"Journal of Machine Learning Research"},{"key":"e_1_3_2_1_56_1","doi-asserted-by":"publisher","DOI":"10.21437\/SSW.2019-19"},{"key":"e_1_3_2_1_57_1","doi-asserted-by":"publisher","DOI":"10.1145\/3462244.3479914"},{"key":"e_1_3_2_1_58_1","volume-title":"International Conference on Machine Learning. 5180--5189","author":"Wang Yuxuan","year":"2018","unstructured":"Yuxuan Wang, Daisy Stanton, Yu Zhang, RJ-Skerry Ryan, Eric Battenberg, Joel Shor, Ying Xiao, Ye Jia, Fei Ren, and Rif A Saurous. 2018. Style tokens: Unsupervised style modeling, control and transfer in end-to-end speech synthesis. In International Conference on Machine Learning. 5180--5189."},{"key":"e_1_3_2_1_59_1","volume-title":"Prosodic patterns in English conversation","author":"Ward Nigel G","unstructured":"Nigel G Ward. 2019. Prosodic patterns in English conversation. Cambridge University Press."},{"key":"e_1_3_2_1_60_1","doi-asserted-by":"publisher","DOI":"10.1016\/j.specom.2009.05.006"},{"key":"e_1_3_2_1_61_1","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2021-341"},{"key":"e_1_3_2_1_62_1","volume-title":"Proc","author":"Zhu Hongbo","unstructured":"Hongbo Zhu, Chuang Yu, and Angelo Cangelosi. 2023. Affective Human-Robot Interaction with Multimodal Explanations. In Proc. ICSR. Springer, 241--252."}],"event":{"name":"IVA '23: ACM International Conference on Intelligent Virtual Agents","location":"W\u00fcrzburg Germany","acronym":"IVA '23","sponsor":["SIGAI ACM Special Interest Group on Artificial Intelligence"]},"container-title":["Proceedings of the 23rd ACM International Conference on Intelligent Virtual Agents"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3570945.3607289","content-type":"unspecified","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3570945.3607289","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,8,22]],"date-time":"2025-08-22T14:25:58Z","timestamp":1755872758000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3570945.3607289"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,9,19]]},"references-count":62,"alternative-id":["10.1145\/3570945.3607289","10.1145\/3570945"],"URL":"https:\/\/doi.org\/10.1145\/3570945.3607289","relation":{},"subject":[],"published":{"date-parts":[[2023,9,19]]},"assertion":[{"value":"2023-12-22","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}