{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,9,11]],"date-time":"2025-09-11T21:00:08Z","timestamp":1757624408658,"version":"3.44.0"},"publisher-location":"Cham","reference-count":31,"publisher":"Springer Nature Switzerland","isbn-type":[{"value":"9783032045485","type":"print"},{"value":"9783032045492","type":"electronic"}],"license":[{"start":{"date-parts":[[2025,9,10]],"date-time":"2025-09-10T00:00:00Z","timestamp":1757462400000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2025,9,10]],"date-time":"2025-09-10T00:00:00Z","timestamp":1757462400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2026]]},"DOI":"10.1007\/978-3-032-04549-2_15","type":"book-chapter","created":{"date-parts":[[2025,9,9]],"date-time":"2025-09-09T13:48:36Z","timestamp":1757425716000},"page":"180-191","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["Emotional Text-to-Speech via\u00a0Style Decoder with\u00a0Emotion Shared Styleformer Block and\u00a0RoPE Prior Encoder"],"prefix":"10.1007","author":[{"ORCID":"https:\/\/orcid.org\/0000-0003-1014-9565","authenticated-orcid":false,"given":"Wenhan","family":"Yao","sequence":"first","affiliation":[]},{"given":"Fen","family":"Xiao","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0009-0003-3611-7294","authenticated-orcid":false,"given":"Ye","family":"Xiao","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0009-0008-9231-8660","authenticated-orcid":false,"given":"Zexin","family":"Li","sequence":"additional","affiliation":[]},{"given":"Xiarun","family":"Chen","sequence":"additional","affiliation":[]},{"given":"Weiping","family":"Wen","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2025,9,10]]},"reference":[{"key":"15_CR1","doi-asserted-by":"crossref","unstructured":"Shen, J., et al.: Natural TTS synthesis by conditioning WaveNet on mel spectrogram predictions. In: IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 4779\u20134783. IEEE (2018)","DOI":"10.1109\/ICASSP.2018.8461368"},{"key":"15_CR2","unstructured":"Ren, Y., et al.: Fastspeech: fast, robust and controllable text to speech. In: Advances in Neural Information Processing Systems, vol. 32 (2019)"},{"key":"15_CR3","unstructured":"Ren, Y., et al.: Fastspeech 2: fast and high-quality end-to-end text to speech. arXiv preprint arXiv:2006.04558 (2020)"},{"key":"15_CR4","doi-asserted-by":"crossref","unstructured":"Li, N., Liu, S., Liu, Y., Zhao, S., Liu, M.: Neural speech synthesis with transformer network. In: Proceedings of the AAAI Conference on Artificial Intelligence, vol. 33, no. 01, pp. 6706\u20136713 (2019)","DOI":"10.1609\/aaai.v33i01.33016706"},{"key":"15_CR5","unstructured":"Kim, J.,\u00a0Kong, J.,\u00a0Son, J.: Conditional variational autoencoder with adversarial learning for end-to-end text-to-speech. In: International Conference on Machine Learning, pp. 5530\u20135540. PMLR (2021)"},{"key":"15_CR6","unstructured":"Wang, C., et al.: Neural codec language models are zero-shot text to speech synthesizers. arXiv preprint arXiv:2301.02111 (2023)"},{"key":"15_CR7","unstructured":"Chan, C.H.,\u00a0Qian, K.,\u00a0Zhang, Y.,\u00a0Hasegawa-Johnson, M.: Speechsplit2. 0: unsupervised speech disentanglement for voice conversion without tuning autoencoder bottlenecks. In: ICASSP 2022-2022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 6332\u20136336. IEEE (2022)"},{"key":"15_CR8","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1016\/j.specom.2021.11.006","volume":"137","author":"K Zhou","year":"2022","unstructured":"Zhou, K., Sisman, B., Liu, R., Li, H.: Emotional voice conversion: theory, databases and ESD. Speech Commun. 137, 1\u201318 (2022)","journal-title":"Speech Commun."},{"issue":"9","key":"15_CR9","doi-asserted-by":"publisher","first-page":"1406","DOI":"10.1093\/ietisy\/e90-d.9.1406","volume":"90","author":"T Nose","year":"2007","unstructured":"Nose, T., Yamagishi, J., Masuko, T., Kobayashi, T.: A style control technique for hmm-based expressive speech synthesis. IEICE Trans. Inf. Syst. 90(9), 1406\u20131413 (2007)","journal-title":"IEICE Trans. Inf. Syst."},{"key":"15_CR10","doi-asserted-by":"crossref","unstructured":"Toma, \u015e.-A., T\u00e2r\u015fa, G.-I.,\u00a0Oancea, E., Munteanu, D.-P.,\u00a0Totir, F.,\u00a0Anton, L.: A td-psola based method for speech synthesis and compression. In: 2010 8th International Conference on Communications, pp. 123\u2013126. IEEE (2010)","DOI":"10.1109\/ICCOMM.2010.5509044"},{"key":"15_CR11","doi-asserted-by":"crossref","unstructured":"Bott, T.,\u00a0Lux, F., Vu, N.T.: Controlling emotion in text-to-speech with natural language prompts. arXiv preprint arXiv:2406.06406 (2024)","DOI":"10.21437\/Interspeech.2024-1337"},{"key":"15_CR12","doi-asserted-by":"crossref","unstructured":"Im, C.-B., Lee, S.-H., Kim, S.-B., Lee, S.-W.: Emoq-TTS: emotion intensity quantization for fine-grained controllable emotional text-to-speech. In: ICASSP 2022-2022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 6317\u20136321. IEEE, 2022","DOI":"10.1109\/ICASSP43922.2022.9747098"},{"key":"15_CR13","doi-asserted-by":"crossref","unstructured":"Diatlova, D.,\u00a0Shutov, V.: Emospeech: guiding fastspeech2 towards emotional text to speech. arXiv preprint arXiv:2307.00024 (2023)","DOI":"10.21437\/SSW.2023-17"},{"key":"15_CR14","doi-asserted-by":"crossref","unstructured":"Cui, C., et al.: Emovie: a mandarin emotion speech dataset with a simple emotional text-to-speech model. arXiv preprint arXiv:2106.09317 (2021)","DOI":"10.21437\/Interspeech.2021-1148"},{"key":"15_CR15","doi-asserted-by":"publisher","first-page":"1506","DOI":"10.1109\/TASLP.2024.3363444","volume":"32","author":"X Zhu","year":"2024","unstructured":"Zhu, X., et al.: Metts: multilingual emotional text-to-speech by cross-speaker and cross-lingual emotion transfer. IEEE\/ACM Trans. Audio Speech Lang. Process. 32, 1506\u20131518 (2024)","journal-title":"IEEE\/ACM Trans. Audio Speech Lang. Process."},{"key":"15_CR16","doi-asserted-by":"crossref","unstructured":"Li, X., et al.: Umetts: a unified framework for emotional text-to-speech synthesis with multimodal prompts. In: ICASSP 2025-2025 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 1\u20135. IEEE (2025)","DOI":"10.1109\/ICASSP49660.2025.10889012"},{"key":"15_CR17","doi-asserted-by":"crossref","unstructured":"Liu, R.,\u00a0Sisman, B.,\u00a0Li, H.: Reinforcement learning for emotional text-to-speech synthesis with improved emotion discriminability. arXiv preprint arXiv:2104.01408 (2021)","DOI":"10.21437\/Interspeech.2021-1236"},{"key":"15_CR18","doi-asserted-by":"publisher","first-page":"853","DOI":"10.1109\/TASLP.2022.3145293","volume":"30","author":"Y Lei","year":"2022","unstructured":"Lei, Y., Yang, S., Wang, X., Xie, L.: Msemotts: multi-scale emotion transfer, prediction, and control for emotional speech synthesis. IEEE\/ACM Trans. Audio Speech Lang. Process. 30, 853\u2013864 (2022)","journal-title":"IEEE\/ACM Trans. Audio Speech Lang. Process."},{"key":"15_CR19","doi-asserted-by":"crossref","unstructured":"Alemayehu, Y., Yadav, R.K., Mohammed, A.R.,\u00a0Thapa, S.,\u00a0Chauhan, S.: Infusing emotion in text to speech model. In 2024 4th International Conference on Technological Advancements in Computational Sciences (ICTACS), pp. 712\u2013716. IEEE (2024)","DOI":"10.1109\/ICTACS62700.2024.10841022"},{"key":"15_CR20","doi-asserted-by":"crossref","unstructured":"Park, J.,\u00a0Kim, Y.: Styleformer: transformer based generative adversarial networks with style vector. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 8983\u20138992 (2022)","DOI":"10.1109\/CVPR52688.2022.00878"},{"key":"15_CR21","unstructured":"Rezende, D.,\u00a0Mohamed, S.: Variational inference with normalizing flows. In: International Conference on Machine Learning, pp. 1530\u20131538. PMLR (2015)"},{"key":"15_CR22","unstructured":"Kim, J., Kim, S., Kong, J., Yoon, S.: Glow-TTS: a generative flow for text-to-speech via monotonic alignment search. In: Advances in Neural Information Processing Systems, vol. 33, pp. 8067\u20138077 (2020)"},{"key":"15_CR23","doi-asserted-by":"publisher","first-page":"127063","DOI":"10.1016\/j.neucom.2023.127063","volume":"568","author":"J Su","year":"2024","unstructured":"Su, J., Ahmed, M., Lu, Y., Pan, S., Bo, W., Liu, Y.: Roformer: enhanced transformer with rotary position embedding. Neurocomputing 568, 127063 (2024)","journal-title":"Neurocomputing"},{"key":"15_CR24","doi-asserted-by":"crossref","unstructured":"Wang, H.,\u00a0Zheng, S.,\u00a0Chen, Y.,\u00a0Cheng, L.,\u00a0Chen, Q.: Cam++: a fast and efficient network for speaker verification using context-aware masking. arXiv preprint arXiv:2303.00332 (2023)","DOI":"10.21437\/Interspeech.2023-1513"},{"key":"15_CR25","unstructured":"Salimans, T., Kingma, D.P.: Weight normalization: a simple reparameterization to accelerate training of deep neural networks. In: Advances in Neural Information Processing Systems, vol.\u00a029 (2016)"},{"key":"15_CR26","unstructured":"Adigwe, A.,\u00a0Tits, N., Haddad, K.E.,\u00a0Ostadabbas, S.,\u00a0Dutoit, T.: The emotional voices database: Towards controlling the emotion dimension in voice generation systems. arXiv preprint arXiv:1806.09514 (2018)"},{"key":"15_CR27","unstructured":"Wang, Y., et al.: Style tokens: unsupervised style modeling, control and transfer in end-to-end speech synthesis. In: International Conference on Machine Learning, pp. 5180\u20135189. PMLR (2018)"},{"key":"15_CR28","doi-asserted-by":"crossref","unstructured":"Kubichek, R.: Mel-cepstral distance measure for objective speech quality assessment. In: Proceedings of IEEE Pacific Rim Conference on Communications Computers and Signal Processing, vol.\u00a01, pp. 125\u2013128. IEEE (1993)","DOI":"10.1109\/PACRIM.1993.407206"},{"issue":"3","key":"15_CR29","doi-asserted-by":"publisher","first-page":"1247","DOI":"10.5194\/gmd-7-1247-2014","volume":"7","author":"T Chai","year":"2014","unstructured":"Chai, T., Draxler, R.R.: Root mean square error (RMSE) or mean absolute error (MAE)?-arguments against avoiding RMSE in the literature. Geoscientific Model Dev. 7(3), 1247\u20131250 (2014)","journal-title":"Geoscientific Model Dev."},{"key":"15_CR30","doi-asserted-by":"crossref","unstructured":"Mittag, G.,\u00a0Naderi, B.,\u00a0Chehadi, A., M\u00f6ller, S.: Nisqa: a deep CNN-self-attention model for multidimensional speech quality prediction with crowdsourced datasets (2021)","DOI":"10.21437\/Interspeech.2021-299"},{"key":"15_CR31","unstructured":"Radford, A., Kim, J.W.,\u00a0Xu, T.,\u00a0Brockman, G.,\u00a0McLeavey, C.,\u00a0Sutskever, I.: Robust speech recognition via large-scale weak supervision. In: International Conference on Machine Learning, pp. 492\u2013518. PMLR (2023)"}],"container-title":["Lecture Notes in Computer Science","Artificial Neural Networks and Machine Learning \u2013 ICANN 2025"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-032-04549-2_15","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,9,9]],"date-time":"2025-09-09T13:48:48Z","timestamp":1757425728000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-032-04549-2_15"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,9,10]]},"ISBN":["9783032045485","9783032045492"],"references-count":31,"URL":"https:\/\/doi.org\/10.1007\/978-3-032-04549-2_15","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025,9,10]]},"assertion":[{"value":"10 September 2025","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ICANN","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Conference on Artificial Neural Networks","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Kaunas","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Lithuania","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2025","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"9 September 2025","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"12 September 2025","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"34","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"icann2025","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/e-nns.org\/icann2025\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}