{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,28]],"date-time":"2026-03-28T16:51:08Z","timestamp":1774716668048,"version":"3.50.1"},"reference-count":46,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","license":[{"start":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T00:00:00Z","timestamp":1704067200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T00:00:00Z","timestamp":1704067200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T00:00:00Z","timestamp":1704067200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100001824","name":"Grantov\u00e1 Agentura \u010cesk\u00e9 Republiky","doi-asserted-by":"publisher","award":["GA22-27800S"],"award-info":[{"award-number":["GA22-27800S"]}],"id":[{"id":"10.13039\/501100001824","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/100009056","name":"Z\u00e1pado\u010desk\u00e1 Univerzita v Plzni","doi-asserted-by":"publisher","award":["SGS-2022-017"],"award-info":[{"award-number":["SGS-2022-017"]}],"id":[{"id":"10.13039\/100009056","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/100010449","name":"Ministry of Education","doi-asserted-by":"publisher","id":[{"id":"10.13039\/100010449","id-type":"DOI","asserted-by":"publisher"}]},{"name":"Youth and Sports of the Czech Republic"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE\/ACM Trans. Audio Speech Lang. Process."],"published-print":{"date-parts":[[2024]]},"DOI":"10.1109\/taslp.2024.3426332","type":"journal-article","created":{"date-parts":[[2024,7,10]],"date-time":"2024-07-10T17:38:28Z","timestamp":1720633108000},"page":"3466-3476","source":"Crossref","is-referenced-by-count":8,"title":["T5G2P: Text-to-Text Transfer Transformer Based Grapheme-to-Phoneme Conversion"],"prefix":"10.1109","volume":"32","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-6194-7826","authenticated-orcid":false,"given":"Mark\u00e9ta","family":"\u0158ez\u00e1\u010dkov\u00e1","sequence":"first","affiliation":[{"name":"Department of Cybernetics, Faculty of Applied Sciences, University of West Bohemia, Pilsen, Czech Republic"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-3149-2330","authenticated-orcid":false,"given":"Daniel","family":"Tihelka","sequence":"additional","affiliation":[{"name":"New Technologies for the Information Society, Faculty of Applied Sciences, University of West Bohemia, Pilsen, Czech Republic"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-7408-7730","authenticated-orcid":false,"given":"Jind\u0159ich","family":"Matou\u0161ek","sequence":"additional","affiliation":[{"name":"Department of Cybernetics, Faculty of Applied Sciences, University of West Bohemia, Pilsen, Czech Republic"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.21437\/interspeech.2017-1452"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2018.8461368"},{"key":"ref3","article-title":"FastSpeech: Fast, robust and controllable text to speech","volume-title":"Proc. Annu. Conf. Neural Inf. Process. Syst.","author":"Ren","year":"2019"},{"key":"ref4","article-title":"FastSpeech 2: Fast and high-quality end-to-end text to speech","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Ren","year":"2021"},{"key":"ref5","first-page":"5530","article-title":"Conditional variational autoencoder with adversarial learning for end-to-end text-to-speech","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Kim","year":"2021"},{"key":"ref6","article-title":"Char2wav: End-to-end speech synthesis","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Sotelo","year":"2017"},{"key":"ref7","article-title":"ClariNet: Parallel wave generation in end-to-end text-to-speech","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Ping","year":"2019"},{"key":"ref8","article-title":"End-to-end adversarial text-to-speech","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Donahue","year":"2021"},{"key":"ref9","article-title":"Neural codec language models are zero-shot text to speech synthesizers","author":"Wang","year":"2023"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.21437\/interspeech.2019-2830"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2015.7178767"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.21437\/interspeech.2015-134"},{"key":"ref13","article-title":"Attention is all you need","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"30","author":"Vaswani","year":"2017"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2019-1954"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-642-15760-8_72"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.naacl-industry.32"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.21437\/interspeech.2023-2336"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.21437\/interspeech.2021-546"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.21437\/interspeech.2019-2335"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-27947-9_8"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.48550\/ARXIV.1406.1078"},{"key":"ref22","article-title":"Googles neural machine translation system: Bridging the gap between human and machine translation","author":"Wu","year":"2016"},{"key":"ref23","article-title":"Very deep transformers for neural machine translation","author":"Liu","year":"2020"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-00794-2_40"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-540-30120-2_68"},{"key":"ref26","volume-title":"IPA-Based Transcription for Czech Students of English","author":"Voln","year":"2002"},{"key":"ref27","first-page":"253","article-title":"Fonologick vjimenost esk znl labiodently","volume-title":"Kapitoly z Fonetiky a Fonologie Slovanskch Jazyk","author":"Voln","year":"2006"},{"key":"ref28","article-title":"SAMPA computer readable phonetic alphabet","volume-title":"Handbook of Standards and Resources for Spoken Language Systems","author":"Wells","year":"1997"},{"key":"ref29","volume-title":"Modern Russian Stress","author":"Avanesov","year":"1964"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1515\/9783110869453"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1017\/S0025100309990284"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-27947-9_26"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1109\/78.650093"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1038\/nature14539"},{"issue":"1","key":"ref35","first-page":"1","article-title":"Exploring the limits of transfer learning with a unified text-to-text transformer","volume":"21","author":"Raffel","year":"2020","journal-title":"J. Mach. Learn. Res."},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.21437\/interspeech.2022-538"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-89579-2_3"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1007\/s10579-013-9246-z"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.emnlp-demos.6"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1007\/978-1-349-17519-2_12"},{"key":"ref41","first-page":"179","article-title":"Categorizing and tagging words","volume-title":"Natural Language Processing with Python","author":"Bird","year":"2009"},{"key":"ref42","article-title":"Librispeech alignments","author":"Lugosch","year":"2019"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.21437\/interspeech.2022-11066"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.1007\/3-540-45323-7_38"},{"key":"ref45","article-title":"mT5: A massively multilingual pre-trained text-to-text transformer","volume-title":"Proc. North Amer. Ch. Assoc. Comput. Linguistics","author":"Xue","year":"2020"},{"key":"ref46","first-page":"336","article-title":"T5G2P: Multilingual grapheme-to-phoneme conversion with text-to-text transfer transformer","volume-title":"Proc. Asian Conf. Pattern Recognit.","author":"ezkov","year":"2023"}],"container-title":["IEEE\/ACM Transactions on Audio, Speech, and Language Processing"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/6570655\/10304349\/10592637.pdf?arnumber=10592637","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,7,30]],"date-time":"2024-07-30T17:59:02Z","timestamp":1722362342000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10592637\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024]]},"references-count":46,"URL":"https:\/\/doi.org\/10.1109\/taslp.2024.3426332","relation":{},"ISSN":["2329-9290","2329-9304"],"issn-type":[{"value":"2329-9290","type":"print"},{"value":"2329-9304","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024]]}}}