{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,31]],"date-time":"2026-03-31T14:47:14Z","timestamp":1774968434620,"version":"3.50.1"},"reference-count":33,"publisher":"IEEE","license":[{"start":{"date-parts":[[2024,4,14]],"date-time":"2024-04-14T00:00:00Z","timestamp":1713052800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2024,4,14]],"date-time":"2024-04-14T00:00:00Z","timestamp":1713052800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2024,4,14]]},"DOI":"10.1109\/icassp48485.2024.10445740","type":"proceedings-article","created":{"date-parts":[[2024,3,18]],"date-time":"2024-03-18T18:56:31Z","timestamp":1710788191000},"page":"10636-10640","source":"Crossref","is-referenced-by-count":12,"title":["Towards Improving Speech Emotion Recognition Using Synthetic Data Augmentation from Emotion Conversion"],"prefix":"10.1109","author":[{"given":"Karim M.","family":"Ibrahim","sequence":"first","affiliation":[{"name":"Emobot,France"}]},{"given":"Antony","family":"Perzo","sequence":"additional","affiliation":[{"name":"Emobot,France"}]},{"given":"Simon","family":"Leglaive","sequence":"additional","affiliation":[{"name":"CentraleSup&#x00E9;lec,IETR (UMR CNRS 6164),France"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2021.3068045"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/TAFFC.2021.3114365"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-75178-4"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2019-2561"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2022-10667"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1016\/j.apacoust.2023.109425"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.3390\/app9194050"},{"key":"ref8","article-title":"Fastspeech 2: Fast and high-quality end-to-end text to speech","volume-title":"Proceedings of the International Conference on Learning Representations","author":"Ren"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1080\/01690961003589492"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.emnlp-main.769"},{"key":"ref11","article-title":"wav2vec 2.0: A framework for self-supervised learning of speech representations","author":"Baevski","year":"2020","journal-title":"Advances in neural information processing systems"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2018-1883"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.5555\/2969033.2969125"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2019-2293"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.244"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2020-2898"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP40776.2020.9054579"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00916"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1587\/transinf.2015EDP7457"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2021-1253"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2017-1452"},{"key":"ref22","article-title":"Hifi-GAN: Generative adversarial networks for efficient and high fidelity speech synthesis","author":"Kong","year":"2020","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2021.3122291"},{"key":"ref24","article-title":"On generative spoken language modeling from raw audio","author":"Lakhotia","year":"2021","journal-title":"Transactions of the Association for Computational Linguistics"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2002.5743729"},{"key":"ref26","article-title":"Attention is all you need","author":"Vaswani","year":"2017","journal-title":"Advances in neural information processing systems"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2021-703"},{"key":"ref28","article-title":"A fine-tuned wav2vec 2.0\/hubert benchmark for speech emotion recognition, speaker verification and spoken language understanding","author":"Wang","year":"2021"},{"key":"ref29","article-title":"The emotional voices database: Towards controlling the emotion dimension in voice generation systems","author":"Adigwe","year":"2018"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1007\/s10579-008-9076-6"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.5281\/zenodo.1188976"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1016\/j.neunet.2017.02.013"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1088\/1742-6596\/1453\/1\/012085"}],"event":{"name":"ICASSP 2024 - 2024 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)","location":"Seoul, Korea, Republic of","start":{"date-parts":[[2024,4,14]]},"end":{"date-parts":[[2024,4,19]]}},"container-title":["ICASSP 2024 - 2024 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/10445798\/10445803\/10445740.pdf?arnumber=10445740","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,8,2]],"date-time":"2024-08-02T04:43:17Z","timestamp":1722573797000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10445740\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,4,14]]},"references-count":33,"URL":"https:\/\/doi.org\/10.1109\/icassp48485.2024.10445740","relation":{},"subject":[],"published":{"date-parts":[[2024,4,14]]}}}