{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2024,10,30]],"date-time":"2024-10-30T06:45:03Z","timestamp":1730270703218,"version":"3.28.0"},"reference-count":29,"publisher":"IEEE","license":[{"start":{"date-parts":[[2024,8,29]],"date-time":"2024-08-29T00:00:00Z","timestamp":1724889600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2024,8,29]],"date-time":"2024-08-29T00:00:00Z","timestamp":1724889600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2024,8,29]]},"DOI":"10.1109\/is61756.2024.10705252","type":"proceedings-article","created":{"date-parts":[[2024,10,9]],"date-time":"2024-10-09T17:45:15Z","timestamp":1728495915000},"page":"1-5","source":"Crossref","is-referenced-by-count":0,"title":["FastSpeech2 Based Japanese Emotional Speech Synthesis"],"prefix":"10.1109","author":[{"given":"Masaki","family":"Ikeda","sequence":"first","affiliation":[{"name":"University of Aizu,Information Systems Division,Aizuwakamatsu,Japan"}]},{"given":"Konstantin","family":"Markov","sequence":"additional","affiliation":[{"name":"University of Aizu,Information Systems Division,Aizuwakamatsu,Japan"}]}],"member":"263","reference":[{"volume-title":"Free your music production","key":"ref1"},{"volume-title":"Reading sentences containing Kanji and English in various voices","key":"ref2"},{"volume-title":"Free, medium-quality text-to-speech and singing voice synthesis software","author":"Vox","key":"ref3"},{"key":"ref4","first-page":"8067","article-title":"Glow-tts: A generative flow for text-to-speech via monotonic alignment search","volume":"33","author":"Kim","year":"2020","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref5","first-page":"5530","article-title":"Conditional variational autoencoder with adversarial learning for end-to-end text-to-speech","volume-title":"International Conference on Machine Learning","author":"Kim"},{"doi-asserted-by":"publisher","key":"ref6","DOI":"10.1109\/TPAMI.2024.3356232"},{"doi-asserted-by":"publisher","key":"ref7","DOI":"10.1109\/ICASSP40776.2020.9054148"},{"key":"ref8","article-title":"Fast speech 2: Fast and high-quality end-to-end text to speech","author":"Ren","year":"2020","journal-title":"arXiv preprint"},{"doi-asserted-by":"publisher","key":"ref9","DOI":"10.3390\/app9194050"},{"doi-asserted-by":"publisher","key":"ref10","DOI":"10.1093\/ietisy\/e90-d.9.1406"},{"key":"ref11","article-title":"Emotional end-to-end neural speech synthesizer","author":"Lee","year":"2017","journal-title":"arXiv preprint"},{"doi-asserted-by":"publisher","key":"ref12","DOI":"10.1109\/ICASSP.2018.8461368"},{"key":"ref13","first-page":"5180","article-title":"Style tokens: Unsupervised style modeling, control and transfer in end-to-end speech synthesis","volume-title":"International conference on machine learning","author":"Wang"},{"doi-asserted-by":"publisher","key":"ref14","DOI":"10.1109\/ICASSP39728.2021.9413907"},{"doi-asserted-by":"publisher","key":"ref15","DOI":"10.1109\/ICASSP40776.2020.9053732"},{"doi-asserted-by":"publisher","key":"ref16","DOI":"10.1109\/ICASSP49357.2023.10097118"},{"doi-asserted-by":"publisher","key":"ref17","DOI":"10.1109\/TASLP.2024.3402088"},{"doi-asserted-by":"publisher","key":"ref18","DOI":"10.1109\/APSIPA.2014.7041623"},{"doi-asserted-by":"publisher","key":"ref19","DOI":"10.1016\/j.specom.2020.11.004"},{"volume-title":"Expressive-fastspeech2","year":"2021","author":"Lee","key":"ref20"},{"doi-asserted-by":"publisher","key":"ref21","DOI":"10.21437\/Interspeech.2021-971"},{"key":"ref22","article-title":"Adaspeech: Adaptive text to speech for custom voice","author":"Chen","year":"2021","journal-title":"arXiv preprint"},{"doi-asserted-by":"publisher","key":"ref23","DOI":"10.21437\/SSW.2023-17"},{"doi-asserted-by":"publisher","key":"ref24","DOI":"10.21437\/Interspeech.2020-3139"},{"doi-asserted-by":"publisher","key":"ref25","DOI":"10.1109\/ICASSP40776.2020.9054535"},{"doi-asserted-by":"publisher","key":"ref26","DOI":"10.21437\/Interspeech.2021-1148"},{"key":"ref27","article-title":"Jsut corpus: free large-scale japanese speech corpus for end-to-end speech synthesis","volume-title":"arXiv preprint","author":"Sonobe","year":"2017"},{"doi-asserted-by":"publisher","key":"ref28","DOI":"10.21437\/Interspeech.2022-300"},{"volume-title":"Fastspeech2 JSUT implementation","author":"Nakata","key":"ref29"}],"event":{"name":"2024 IEEE 12th International Conference on Intelligent Systems (IS)","start":{"date-parts":[[2024,8,29]]},"location":"Varna, Bulgaria","end":{"date-parts":[[2024,8,31]]}},"container-title":["2024 IEEE 12th International Conference on Intelligent Systems (IS)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/10705133\/10705164\/10705252.pdf?arnumber=10705252","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,10,10]],"date-time":"2024-10-10T11:28:59Z","timestamp":1728559739000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10705252\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,8,29]]},"references-count":29,"URL":"https:\/\/doi.org\/10.1109\/is61756.2024.10705252","relation":{},"subject":[],"published":{"date-parts":[[2024,8,29]]}}}