{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,16]],"date-time":"2026-02-16T17:24:16Z","timestamp":1771262656930,"version":"3.50.1"},"reference-count":25,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","license":[{"start":{"date-parts":[[2020,1,1]],"date-time":"2020-01-01T00:00:00Z","timestamp":1577836800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2020,1,1]],"date-time":"2020-01-01T00:00:00Z","timestamp":1577836800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2020,1,1]],"date-time":"2020-01-01T00:00:00Z","timestamp":1577836800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"name":"Institute of Information &amp; Communications Technology Planning &amp; Evaluation"},{"name":"Korea Government","award":["2020-0-00059"],"award-info":[{"award-number":["2020-0-00059"]}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Signal Process. Lett."],"published-print":{"date-parts":[[2020]]},"DOI":"10.1109\/lsp.2020.3036349","type":"journal-article","created":{"date-parts":[[2020,11,6]],"date-time":"2020-11-06T20:42:27Z","timestamp":1604695347000},"page":"2004-2008","source":"Crossref","is-referenced-by-count":4,"title":["Memory Attention: Robust Alignment Using Gating Mechanism for End-to-End Speech Synthesis"],"prefix":"10.1109","volume":"27","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-3316-4808","authenticated-orcid":false,"given":"Joun Yeop","family":"Lee","sequence":"first","affiliation":[]},{"given":"Sung Jun","family":"Cheon","sequence":"additional","affiliation":[]},{"given":"Byoung Jin","family":"Choi","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-0568-4902","authenticated-orcid":false,"given":"Nam Soo","family":"Kim","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2018.8462020"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP40776.2020.9054106"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2019-1972"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/ASRU46091.2019.9003956"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP40776.2020.9054119"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-642-24797-2_2"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v33i01.33016706"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2018.8461829"},{"key":"ref18","first-page":"1","article-title":"Adam: A method for stochastic optimization","author":"kingma","year":"0","journal-title":"Proc Int Conf Learn Representations"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-540-49127-9_5"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2017-1452"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2018.8462105"},{"key":"ref6","first-page":"195","article-title":"Deep voice: Real-time neural text-to-speech","author":"arik","year":"0","journal-title":"Proc 34th Int Conf Mach Learn"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2018.8461368"},{"key":"ref8","first-page":"1","article-title":"Deep voice 3: Scaling text-to-speech with convolutional sequence learning","author":"ping","year":"0","journal-title":"Proc Int Conf Learn Represent"},{"key":"ref7","first-page":"2962","article-title":"Deep voice 2: Multi-speaker neural text-to-speech","author":"gibiansky","year":"0","journal-title":"Proc Adv Neural Inf Process Syst"},{"key":"ref2","first-page":"577","article-title":"Attention-based models for speech recognition","author":"chorowski","year":"0","journal-title":"Proc Adv Neural Inf Process Syst"},{"key":"ref9","first-page":"3165","article-title":"Fastspeech: Fast, robust and controllable text to speech","author":"ren","year":"0","journal-title":"Proc Adv Neural Inf Process Syst"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/D18-1336"},{"key":"ref20","article-title":"The LJ speech dataset","author":"ito","year":"2017"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/TASSP.1984.1164317"},{"key":"ref21","article-title":"WaveNet: A generative model for raw audio","author":"van den oord","year":"2016"},{"key":"ref24","first-page":"5206","article-title":"Forward attention in sequence-to-sequence acoustic modeling for speech synthesis","author":"panayotov","year":"0","journal-title":"Proc IEEE Int Conf Acoust Speech Signal Process"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2018-1616"},{"key":"ref25","first-page":"933","article-title":"Language modeling with gated convolutional networks","author":"dauphin","year":"0","journal-title":"Proc 34th Int Conf Mach Learn"}],"container-title":["IEEE Signal Processing Letters"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/97\/8966529\/09250512.pdf?arnumber=9250512","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,4,27]],"date-time":"2022-04-27T14:46:32Z","timestamp":1651070792000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9250512\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2020]]},"references-count":25,"URL":"https:\/\/doi.org\/10.1109\/lsp.2020.3036349","relation":{},"ISSN":["1070-9908","1558-2361"],"issn-type":[{"value":"1070-9908","type":"print"},{"value":"1558-2361","type":"electronic"}],"subject":[],"published":{"date-parts":[[2020]]}}}