{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,3]],"date-time":"2026-04-03T15:26:45Z","timestamp":1775230005134,"version":"3.50.1"},"reference-count":28,"publisher":"IEEE","license":[{"start":{"date-parts":[[2022,5,23]],"date-time":"2022-05-23T00:00:00Z","timestamp":1653264000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2022,5,23]],"date-time":"2022-05-23T00:00:00Z","timestamp":1653264000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2022,5,23]]},"DOI":"10.1109\/icassp43922.2022.9747422","type":"proceedings-article","created":{"date-parts":[[2022,4,27]],"date-time":"2022-04-27T15:50:34Z","timestamp":1651074634000},"page":"7242-7246","source":"Crossref","is-referenced-by-count":5,"title":["A Melody-Unsupervision Model for Singing Voice Synthesis"],"prefix":"10.1109","author":[{"given":"Soonbeom","family":"Choi","sequence":"first","affiliation":[{"name":"KAIST,Graduate School of Culture Technology,Daejeon,South Korea"}]},{"given":"Juhan","family":"Nam","sequence":"additional","affiliation":[{"name":"KAIST,Graduate School of Culture Technology,Daejeon,South Korea"}]}],"member":"263","reference":[{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2018-1575"},{"key":"ref11","article-title":"Singing voice synthesis based on convolutional neural networks","author":"nakamura","year":"2019"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2019-1722"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP40776.2020.9053950"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1145\/3394486.3403249"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP40776.2020.9053944"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2020-1410"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2021-239"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2020-1789"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2018.8462506"},{"key":"ref28","article-title":"Adam: A method for stochastic optimization","author":"kingma","year":"2014"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2018.8461829"},{"key":"ref27","author":"park","year":"2019","journal-title":"g2pE A simple python module for english grapheme to phoneme conversion"},{"key":"ref3","article-title":"Deep voice 3: Scaling text-to-speech with convolutional sequence learning","author":"ping","year":"2018","journal-title":"International Conference on Learning Representations (ICLR)"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2019.8683143"},{"key":"ref5","article-title":"Wavenet: A generative model for raw audio","author":"oord","year":"2016"},{"key":"ref8","first-page":"17022","article-title":"HiFi-GAN: Generative adversarial networks for efficient and high fidelity speech synthesis","volume":"33","author":"kong","year":"2020","journal-title":"Advances in neural information processing systems"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP40776.2020.9053795"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2018.8461368"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.3390\/app7121313"},{"key":"ref1","doi-asserted-by":"crossref","first-page":"4006","DOI":"10.21437\/Interspeech.2017-1452","article-title":"Tacotron: Towards end-to-end speech synthesis","author":"wang","year":"2017","journal-title":"Proc INTERSPEECH"},{"key":"ref20","article-title":"Improving transformer-based end-to-end speech recognition with connectionist temporal classification and language model integration","author":"nakatani","year":"2019","journal-title":"Proc INTERSPEECH"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2020-3015"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2019.8682539"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2017-68"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP40776.2020.9054636"},{"key":"ref26","author":"cho","year":"2017","journal-title":"Korean grapheme-to-phoneme analyzer (kog2p)"},{"key":"ref25","article-title":"Children&#x2019;s song dataset for singing voice research","author":"choi","year":"2020","journal-title":"Late Breaking Demo in the 21st International Society for Music Information Retrieval conference"}],"event":{"name":"ICASSP 2022 - 2022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)","location":"Singapore, Singapore","start":{"date-parts":[[2022,5,23]]},"end":{"date-parts":[[2022,5,27]]}},"container-title":["ICASSP 2022 - 2022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9745891\/9746004\/09747422.pdf?arnumber=9747422","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,8,22]],"date-time":"2022-08-22T16:10:36Z","timestamp":1661184636000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9747422\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022,5,23]]},"references-count":28,"URL":"https:\/\/doi.org\/10.1109\/icassp43922.2022.9747422","relation":{},"subject":[],"published":{"date-parts":[[2022,5,23]]}}}