{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,3]],"date-time":"2026-04-03T07:05:21Z","timestamp":1775199921280,"version":"3.50.1"},"reference-count":41,"publisher":"IEEE","license":[{"start":{"date-parts":[[2025,12,6]],"date-time":"2025-12-06T00:00:00Z","timestamp":1764979200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,12,6]],"date-time":"2025-12-06T00:00:00Z","timestamp":1764979200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025,12,6]]},"DOI":"10.1109\/asru65441.2025.11434687","type":"proceedings-article","created":{"date-parts":[[2026,4,2]],"date-time":"2026-04-02T19:48:04Z","timestamp":1775159284000},"page":"1-7","source":"Crossref","is-referenced-by-count":0,"title":["Enhancing In-the-Wild Speech Emotion Conversion with Resynthesis-based Duration Modeling"],"prefix":"10.1109","author":[{"given":"Navin Raj","family":"Prabhu","sequence":"first","affiliation":[{"name":"University of Hamburg,Signal Processing,Hamburg,Germany"}]},{"given":"Danilo","family":"De Oliveira","sequence":"additional","affiliation":[{"name":"University of Hamburg,Signal Processing,Hamburg,Germany"}]},{"given":"Nale","family":"Lehmann-Willenbrock","sequence":"additional","affiliation":[{"name":"University of Hamburg,Industrial and Organizational Psychology,Hamburg,Germany"}]},{"given":"Timo","family":"Gerkmann","sequence":"additional","affiliation":[{"name":"University of Hamburg,Signal Processing,Hamburg,Germany"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1145\/3129340"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2019.8683143"},{"key":"ref3","article-title":"Hifi-gan: Generative adversarial networks for efficient and high fidelity speech synthesis","volume-title":"Advances in Neural Inf. Proc. Systems (NeurIPS)","volume":"33","author":"Kong"},{"key":"ref4","article-title":"Gradtts: A diffusion probabilistic model for text-to-speech","volume-title":"Int. Conf. Machine Learning (ICML).","author":"Popov"},{"key":"ref5","article-title":"Difftts: A denoising diffusion model for text-to-speech","author":"Jeong","year":"2021","journal-title":"arXiv preprint arXiv:2104.01409"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/JPROC.2023.3250266"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2022-10249"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1016\/j.specom.2021.11.006"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/TAFFC.2025.3530920"},{"key":"ref10","article-title":"In-thewild speech emotion conversion using disentangled self-supervised representations and neural vocoder-based resynthesis","volume-title":"Proc. ITG Conf. on Speech Comm.","author":"Raj Prabhu"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.emnlp-main.769"},{"key":"ref12","article-title":"Auto-Encoding Variational Bayes","volume-title":"Int. Conf. on Learning Representations (ICLR)","author":"Kingma"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1145\/3422622"},{"key":"ref14","article-title":"Improved techniques for training score-based generative models","volume-title":"Advances in Neural Inf. Proc. Systems (NeurIPS)","author":"Song"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP48485.2024.10447372"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2024-1256"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.3758\/s13428-023-02139-9"},{"key":"ref18","article-title":"Emotion modelling for speech generation","volume-title":"PhD thesis, National University of Singapore, 2022","author":"Zhou"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2019.8683865"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP40776.2020.9053255"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/TAFFC.2022.3175578"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/SLT48900.2021.9383526"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/TAFFC.2017.2736999"},{"key":"ref24","article-title":"Fastspeech: Fast, robust and controllable text to speech","volume-title":"Advances in Neural Inf. Proc. Systems (NeurIPS)","volume":"32","author":"Ren"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2017-1452"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2018.8461368"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP40776.2020.9054119"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/icassp39728.2021.9415079"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/icassp49660.2025.10889084"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2021-475"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.01802"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.21437\/interspeech.2023-1758"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1109\/JSTSP.2022.3188113"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.2307\/2532051"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2023.3263585"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1007\/s10579-008-9076-6"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1037\/h0077714"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1109\/TAFFC.2023.3283595"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP49357.2023.10097255"},{"key":"ref40","first-page":"12449","article-title":"wav2vec 2.0: A framework for self-supervised learning of speech representations","volume-title":"Advances in Neural Inf. Proc. Systems (NeurIPS)","volume":"33","author":"Baevski"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.21437\/Odyssey.2018-28"}],"event":{"name":"2025 IEEE Automatic Speech Recognition and Understanding Workshop (ASRU)","location":"Honolulu, HI, USA","start":{"date-parts":[[2025,12,6]]},"end":{"date-parts":[[2025,12,10]]}},"container-title":["2025 IEEE Automatic Speech Recognition and Understanding Workshop (ASRU)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/11434577\/11433836\/11434687.pdf?arnumber=11434687","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,4,3]],"date-time":"2026-04-03T04:58:18Z","timestamp":1775192298000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11434687\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,12,6]]},"references-count":41,"URL":"https:\/\/doi.org\/10.1109\/asru65441.2025.11434687","relation":{},"subject":[],"published":{"date-parts":[[2025,12,6]]}}}