{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,3]],"date-time":"2026-04-03T15:28:28Z","timestamp":1775230108270,"version":"3.50.1"},"reference-count":33,"publisher":"IEEE","license":[{"start":{"date-parts":[[2023,6,4]],"date-time":"2023-06-04T00:00:00Z","timestamp":1685836800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2023,6,4]],"date-time":"2023-06-04T00:00:00Z","timestamp":1685836800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2023,6,4]]},"DOI":"10.1109\/icassp49357.2023.10095621","type":"proceedings-article","created":{"date-parts":[[2023,5,5]],"date-time":"2023-05-05T13:28:30Z","timestamp":1683293310000},"page":"1-5","source":"Crossref","is-referenced-by-count":29,"title":["Emodiff: Intensity Controllable Emotional Text-to-Speech with Soft-Label Guidance"],"prefix":"10.1109","author":[{"given":"Yiwei","family":"Guo","sequence":"first","affiliation":[{"name":"Shanghai Jiao Tong University,MoE Key Lab of Artificial Intelligence, AI Institute X-LANCE Lab,Department of Computer Science and Engineering,Shanghai,China"}]},{"given":"Chenpeng","family":"Du","sequence":"additional","affiliation":[{"name":"Shanghai Jiao Tong University,MoE Key Lab of Artificial Intelligence, AI Institute X-LANCE Lab,Department of Computer Science and Engineering,Shanghai,China"}]},{"given":"Xie","family":"Chen","sequence":"additional","affiliation":[{"name":"Shanghai Jiao Tong University,MoE Key Lab of Artificial Intelligence, AI Institute X-LANCE Lab,Department of Computer Science and Engineering,Shanghai,China"}]},{"given":"Kai","family":"Yu","sequence":"additional","affiliation":[{"name":"Shanghai Jiao Tong University,MoE Key Lab of Artificial Intelligence, AI Institute X-LANCE Lab,Department of Computer Science and Engineering,Shanghai,China"}]}],"member":"263","reference":[{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2022.3145293"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.21437\/SSW.2021-11"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP40776.2020.9053732"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/TAFFC.2022.3233324"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP39728.2021.9413391"},{"key":"ref30","first-page":"11 119","article-title":"Guided-tts: A diffusion model for text-to-speech via classifier guidance","author":"kim","year":"2022","journal-title":"Proc ICML"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/SLT48900.2021.9383524"},{"key":"ref33","first-page":"17 022","article-title":"Hifi-gan: Generative adversarial networks for efficient and high fidelity speech synthesis","volume":"33","author":"kong","year":"2020","journal-title":"Proc NeurIPS"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/ASRU46091.2019.9003829"},{"key":"ref32","article-title":"The kaldi speech recognition toolkit","author":"povey","year":"2011","journal-title":"IEEE 2011 workshop on automatic speech recognition and understanding no CONF IEEE Signal Processing Society"},{"key":"ref2","first-page":"5530","article-title":"Conditional variational autoen-coder with adversarial learning for end-to-end text-to-speech","volume":"139","author":"kim","year":"2021","journal-title":"Proc ICML"},{"key":"ref1","first-page":"8599","article-title":"Grad-tts: A diffusion probabilistic model for text-to-speech","author":"popov","year":"2021","journal-title":"Proc ICML"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2021.3065460"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP43922.2022.9747098"},{"key":"ref19","first-page":"8780","article-title":"Diffusion models beat gans on image synthesis","volume":"34","author":"dhariwal","year":"2021","journal-title":"Proc NeurIPS"},{"key":"ref18","article-title":"Emotion intensity and its control for emotional voice conversion","author":"zhou","year":"2022","journal-title":"IEEE Transactions on Affective Computing"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2021-469"},{"key":"ref23","article-title":"Generative modeling by estimating gradients of the data distribution","volume":"32","author":"song","year":"2019","journal-title":"Proc NeurIPS"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2022\/577"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v36i10.21350"},{"key":"ref20","article-title":"More control for free! image synthesis with semantic diffusion guidance","author":"liu","year":"2021"},{"key":"ref22","article-title":"Score-based generative modeling through stochastic differential equations","author":"song","year":"2021","journal-title":"Proc ICLR"},{"key":"ref21","first-page":"6840","article-title":"Denoising diffusion probabilistic models","volume":"33","author":"ho","year":"2020","journal-title":"Proc NeurIPS"},{"key":"ref28","article-title":"Wavegrad: Estimating gradients for waveform generation","author":"chen","year":"2021","journal-title":"Proc ICLR"},{"key":"ref27","article-title":"BDDM: Bilateral denoising diffusion models for fast and high-quality speech synthesis","author":"lam","year":"2022","journal-title":"Proc ICLR"},{"key":"ref29","article-title":"Dif-fwave: A versatile diffusion model for audio synthesis","author":"kong","year":"2021","journal-title":"Proc ICLR"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2021-465"},{"key":"ref7","article-title":"Emotional end-to-end neural speech synthesizer","author":"lee","year":"2017"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2011.6126281"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP40776.2020.9053520"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2022-489"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP43922.2022.9746323"},{"key":"ref5","first-page":"190","article-title":"Phone-level prosody modelling with GMM-based MDN for diverse and controllable speech synthesis","volume":"30","author":"du","year":"2021","journal-title":"IEEE\/ACM Trans ASLP"}],"event":{"name":"ICASSP 2023 - 2023 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)","location":"Rhodes Island, Greece","start":{"date-parts":[[2023,6,4]]},"end":{"date-parts":[[2023,6,10]]}},"container-title":["ICASSP 2023 - 2023 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/10094559\/10094560\/10095621.pdf?arnumber=10095621","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,1,27]],"date-time":"2026-01-27T04:26:36Z","timestamp":1769487996000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10095621\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,6,4]]},"references-count":33,"URL":"https:\/\/doi.org\/10.1109\/icassp49357.2023.10095621","relation":{},"subject":[],"published":{"date-parts":[[2023,6,4]]}}}