{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,8,23]],"date-time":"2025-08-23T05:28:15Z","timestamp":1755926895333,"version":"3.32.0"},"reference-count":32,"publisher":"IEEE","license":[{"start":{"date-parts":[[2024,11,7]],"date-time":"2024-11-07T00:00:00Z","timestamp":1730937600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2024,11,7]],"date-time":"2024-11-07T00:00:00Z","timestamp":1730937600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2024,11,7]]},"DOI":"10.1109\/iscslp63861.2024.10800708","type":"proceedings-article","created":{"date-parts":[[2024,12,23]],"date-time":"2024-12-23T19:11:17Z","timestamp":1734981077000},"page":"501-505","source":"Crossref","is-referenced-by-count":1,"title":["Lightweight Language Model for Speech Synthesis: Attempts and Analysis"],"prefix":"10.1109","author":[{"given":"Zhuojun","family":"Wu","sequence":"first","affiliation":[{"name":"School of Computer Science, Wuhan University,Wuhan"}]},{"given":"Dong","family":"Liu","sequence":"additional","affiliation":[{"name":"School of Computer Science, Wuhan University,Wuhan"}]},{"given":"Ming","family":"Li","sequence":"additional","affiliation":[{"name":"School of Computer Science, Wuhan University,Wuhan"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2018.8461368"},{"key":"ref2","article-title":"Fastspeech: Fast, robust and controllable text to speech","volume":"32","author":"Ren","year":"2019","journal-title":"Advances in neural information processing systems"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v33i01.33016706"},{"key":"ref4","first-page":"8067","article-title":"Glow-tts: A generative flow for text-to-speech via monotonic alignment search","volume":"33","author":"Kim","year":"2020","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref5","article-title":"Flowtron: an autoregressive flow-based generative network for text-to-speech synthesis","volume-title":"arXiv preprint","author":"Valle","year":"2020"},{"key":"ref6","first-page":"8599","article-title":"Grad-tts: A diffusion probabilistic model for text-to-speech","volume-title":"International Conference on Machine Learning","author":"Popov","year":"2021"},{"key":"ref7","article-title":"Fastspeech 2: Fast and high-quality end-to-end text to speech","author":"Ren","year":"2020","journal-title":"arXiv preprint"},{"key":"ref8","article-title":"Wavenet: A generative model for raw audio","author":"Oord","year":"2016","journal-title":"arXiv preprint"},{"key":"ref9","article-title":"Mel-gan: Generative adversarial networks for conditional waveform synthesis","volume-title":"Advances in neural information processing systems","volume":"32","author":"Kumar","year":"2019"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2019.8683143"},{"article-title":"High fidelity speech synthesis with adversarial networks","volume-title":"arXiv preprint","author":"Bi\u0144kowski","key":"ref11"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP40776.2020.9053795"},{"key":"ref13","first-page":"17022","article-title":"Hifi-gan: Generative adversarial networks for efficient and high fidelity speech synthesis","volume-title":"Advances in neural information processing systems","volume":"33","author":"Kong","year":"2020"},{"key":"ref14","first-page":"5530","article-title":"Conditional variational autoencoder with adversarial learning for end-to-end text-to-speech","volume-title":"International Conference on Machine Learning","author":"Kim","year":"2021"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2023-534"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v37i11.26488"},{"key":"ref17","article-title":"Neural codec language models are zero-shot text to speech synthesizers","author":"Wang","year":"2023","journal-title":"arXiv preprint"},{"key":"ref18","article-title":"Mega-TTS 2: Boosting prompting mechanisms for zero-shot speech synthesis","volume-title":"The Twelfth International Conference on Learning Representations","author":"Jiang","year":"2024"},{"key":"ref19","article-title":"High fidelity neural audio compression","author":"D\u00e9fossez","year":"2022","journal-title":"arXiv preprint"},{"key":"ref20","article-title":"Language models are few-shot learners","volume":"1","author":"Mann","year":"2020","journal-title":"arXiv preprint"},{"key":"ref21","article-title":"Llama: Open and efficient foundation language models","author":"Touvron","year":"2023","journal-title":"arXiv preprint"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.findings-acl.507"},{"key":"ref23","article-title":"Lauragpt: Listen, attend, understand, and regenerate audio with gpt","author":"Chen","year":"2023","journal-title":"arXiv preprint"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP48485.2024.10447523"},{"key":"ref25","article-title":"Auto-encoding variational bayes","author":"Kingma","year":"2013","journal-title":"arXiv preprint"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2021.3129994"},{"volume-title":"Qwen technical report","year":"2023","author":"Bai","key":"ref27"},{"key":"ref28","article-title":"Accurate lora-finetuning quantization ofllms via information retention","author":"Qin","year":"2024","journal-title":"arXiv preprint"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2024-2343"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP39728.2021.9414878"},{"key":"ref31","first-page":"38087","article-title":"Smoothquant: Accurate and efficient post-training quantization for large language models","volume-title":"International Conference on Ma-chine Learning","author":"Xiao","year":"2023"},{"key":"ref32","article-title":"Base tts: Lessons from building a billion-parameter text-to-speech model on 100k hours of data","author":"Lajszczak","year":"2024","journal-title":"arXiv preprint"}],"event":{"name":"2024 IEEE 14th International Symposium on Chinese Spoken Language Processing (ISCSLP)","start":{"date-parts":[[2024,11,7]]},"location":"Beijing, China","end":{"date-parts":[[2024,11,10]]}},"container-title":["2024 IEEE 14th International Symposium on Chinese Spoken Language Processing (ISCSLP)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/10799944\/10799969\/10800708.pdf?arnumber=10800708","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,12,24]],"date-time":"2024-12-24T06:26:42Z","timestamp":1735021602000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10800708\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,11,7]]},"references-count":32,"URL":"https:\/\/doi.org\/10.1109\/iscslp63861.2024.10800708","relation":{},"subject":[],"published":{"date-parts":[[2024,11,7]]}}}