{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,25]],"date-time":"2026-04-25T14:55:18Z","timestamp":1777128918840,"version":"3.51.4"},"reference-count":80,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","license":[{"start":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T00:00:00Z","timestamp":1704067200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T00:00:00Z","timestamp":1704067200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T00:00:00Z","timestamp":1704067200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62376237"],"award-info":[{"award-number":["62376237"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"name":"Shenzhen Science and Technology Program","award":["ZDSYS20230626091302006"],"award-info":[{"award-number":["ZDSYS20230626091302006"]}]},{"name":"Shenzhen Science and Technology Research Fund","award":["JCYJ20220818103001002"],"award-info":[{"award-number":["JCYJ20220818103001002"]}]},{"name":"Science and Technology Development Fund, Macao SAR","award":["0006\/2024\/RIA1"],"award-info":[{"award-number":["0006\/2024\/RIA1"]}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE\/ACM Trans. Audio Speech Lang. Process."],"published-print":{"date-parts":[[2024]]},"DOI":"10.1109\/taslp.2024.3468005","type":"journal-article","created":{"date-parts":[[2024,9,25]],"date-time":"2024-09-25T19:29:56Z","timestamp":1727292596000},"page":"4569-4579","source":"Crossref","is-referenced-by-count":2,"title":["An Investigation of Time-Frequency Representation Discriminators for High-Fidelity Vocoders"],"prefix":"10.1109","volume":"32","author":[{"ORCID":"https:\/\/orcid.org\/0009-0001-7819-5667","authenticated-orcid":false,"given":"Yicheng","family":"Gu","sequence":"first","affiliation":[{"name":"The Chinese University of Hong Kong, Shenzhen, Guangzhou, China"}]},{"given":"Xueyao","family":"Zhang","sequence":"additional","affiliation":[{"name":"The Chinese University of Hong Kong, Shenzhen, Guangzhou, China"}]},{"given":"Liumeng","family":"Xue","sequence":"additional","affiliation":[{"name":"The Chinese University of Hong Kong, Shenzhen, Guangzhou, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-9158-9401","authenticated-orcid":false,"given":"Haizhou","family":"Li","sequence":"additional","affiliation":[{"name":"The Chinese University of Hong Kong, Shenzhen, Guangzhou, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0001-1192-9857","authenticated-orcid":false,"given":"Zhizheng","family":"Wu","sequence":"additional","affiliation":[{"name":"The Chinese University of Hong Kong, Shenzhen, Guangzhou, China"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v36i10.21350"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1016\/j.neunet.2024.106762"},{"key":"ref3","first-page":"1","article-title":"FastSpeech 2: Fast and high-quality end-to-end text to speech","author":"Ren","year":"2021"},{"key":"ref4","first-page":"1","article-title":"NaturalSpeech 2: Latent diffusion models are natural and zero-shot speech and singing synthesizers","volume-title":"Proc. 12th Int. Conf. Learn Representations","author":"Shen","year":"2024"},{"key":"ref5","first-page":"5530","article-title":"Conditional variational autoencoder with adversarial learning for end-to-end text-to-speech","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Kim","year":"2021"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2022-10294"},{"key":"ref7","article-title":"High fidelity neural audio compression","volume-title":"Trans. Mach. Learn. Res.","volume":"2023","author":"Dfossez","year":"2023"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/icassp48485.2024.10447523"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1250\/ast.27.349"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1587\/transinf.2015EDP7457"},{"key":"ref11","first-page":"125","article-title":"WaveNet: A generative model for raw audio","volume-title":"Proc. 9th ISCA Speech Synth. Workshop","author":"Oord","year":"2016"},{"key":"ref12","first-page":"2415","article-title":"Efficient neural audio synthesis","author":"Kalchbrenner","year":"2018"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/LSP.2024.3440956"},{"key":"ref14","first-page":"3918","article-title":"Parallel wavenet: Fast high-fidelity speech synthesis","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Oord","year":"2018"},{"key":"ref15","first-page":"1","article-title":"ClariNet: Parallel wave generation in end-to-end text-to-speech","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Ping","year":"2019"},{"key":"ref16","first-page":"7706","article-title":"WaveFlow: A compact flow-based model for raw audio","volume-title":"Proc. Int. Conf. Mach. Learn.","volume":"119","author":"Ping","year":"2020"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2019.8683143"},{"issue":"6","key":"ref18","doi-asserted-by":"crossref","first-page":"1019","DOI":"10.1109\/TASLP.2019.2906484","volume":"27","author":"Juvela","year":"2019","journal-title":"IEEE\/ACM Trans. Audio, Speech, Lang. Process."},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2019.8682804"},{"key":"ref20","first-page":"1","article-title":"DiffWave: A versatile diffusion model for audio synthesis","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Kong","year":"2021"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/icassp48485.2024.10447251"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2019.8682298"},{"key":"ref23","first-page":"667","article-title":"Singing voice synthesis using differentiable LPC and glottal-flow-inspired wavetables","volume-title":"Proc. 24th Int. Soc. Music Inf. Retrieval Conf.","author":"Yu","year":"2023"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP40776.2020.9053795"},{"key":"ref25","first-page":"14881","article-title":"MelGAN: Generative adversarial networks for conditional waveform synthesis","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Kumar","year":"2019"},{"key":"ref26","article-title":"Universal MelGAN: A robust neural vocoder for high-fidelity waveform generation in multiple domains","author":"Jang"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2020-2143"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2021-845"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1145\/3503161.3547854"},{"key":"ref30","first-page":"1","article-title":"BigVGAN: A universal neural vocoder with large-scale training","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Lee","year":"2023"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1109\/icassp48485.2024.10446121"},{"key":"ref32","first-page":"27980","article-title":"High-fidelity audio compression with improved RVQGAN","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Kumar","year":"2023"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2021.3118033"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2021-517"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP49357.2023.10096288"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP48485.2024.10446960"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2024-255"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP48485.2024.10448502"},{"key":"ref39","first-page":"80","article-title":"HiFi-WaveGAN: Generative adversarial network with auxiliary spectrogram-phase loss for high-fidelity singing voice generation","volume-title":"Proc. 18th Int. Symp. Neural Netw.","author":"Chunhui","year":"2024"},{"key":"ref40","article-title":"FreeV: Free lunch for vocoders through pseudo inversed mel filter","author":"Yuanjun","year":"2024"},{"key":"ref41","first-page":"76","article-title":"DDSP-based singing vocoders: A new subtractive-based synthesizer and a comprehensive evaluation","volume-title":"Proc. 23rd Int. Soc. Music Inf. Retrieval Conf.","author":"Wu","year":"2022"},{"key":"ref42","first-page":"1583","article-title":"Neural networks fail to learn periodic functions and how to fix it","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Liu","year":"2020"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.1109\/ICME55011.2023.00293"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2021-41"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.1109\/TASSP.1977.1162950"},{"key":"ref46","doi-asserted-by":"publisher","DOI":"10.1016\/j.csl.2017.01.001"},{"key":"ref47","doi-asserted-by":"publisher","DOI":"10.1121\/1.404385"},{"key":"ref48","first-page":"3","article-title":"Constant-q transform toolbox for music processing","volume-title":"Proc. 7th Sound Music Comput. Conf.","author":"Schrkhuber","year":"2010"},{"key":"ref49","doi-asserted-by":"publisher","DOI":"10.1109\/18.119724"},{"key":"ref50","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP48485.2024.10448436"},{"key":"ref51","doi-asserted-by":"publisher","DOI":"10.1007\/BF00330404"},{"key":"ref52","doi-asserted-by":"publisher","DOI":"10.1109\/icassp.1984.1172757"},{"key":"ref53","article-title":"Analysis and synthesis of music using the auditory transform","author":"Stautner","year":"1983"},{"key":"ref54","first-page":"1","article-title":"Joint time\/frequency analysis, Q quality factor and dispersion computation using gabor-morlet wavelets or the Gabor-Morlet transform","volume":"1983","author":"Taner","year":"1983","journal-title":"Rock Solid Images"},{"key":"ref55","doi-asserted-by":"publisher","DOI":"10.1007\/BF01937176"},{"key":"ref56","doi-asserted-by":"publisher","DOI":"10.1007\/BF02649110"},{"key":"ref57","doi-asserted-by":"publisher","DOI":"10.25080\/Majora-7b98e3ed-003"},{"key":"ref58","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2020.3019084"},{"key":"ref59","doi-asserted-by":"publisher","DOI":"10.21105\/joss.01237"},{"key":"ref60","first-page":"6914","article-title":"M4Singer: A multi-style, multi-singer and musical score provided mandarin singing corpus","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Zhang","year":"2022"},{"key":"ref61","first-page":"487","article-title":"PJS: Phoneme-balanced japanese singing-voice corpus","volume-title":"Proc. Asia-Pacific Signal Inf. Process. Assoc. Annu. Summit Conf.","author":"Koguchi","year":"2020"},{"key":"ref62","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2022-48"},{"key":"ref63","doi-asserted-by":"publisher","DOI":"10.1145\/3474085.3475437"},{"key":"ref64","article-title":"Children\u2019s song dataset for singing voice research","volume-title":"Proc. 21st Int. Soc. Music Inf. Retrieval Conf.","volume":"4","author":"Choi","year":"2020"},{"key":"ref65","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2019-2441"},{"key":"ref66","article-title":"The LJ speech dataset","author":"Ito","year":"2017"},{"key":"ref67","article-title":"Superseded-cstr vctk corpus: English multi-speaker corpus for cstr voice cloning toolkit","author":"Veaux","year":"2016"},{"key":"ref68","first-page":"1","article-title":"Decoupled weight decay regularization","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Loshchilov","year":"2019"},{"key":"ref69","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2023.3277276"},{"key":"ref70","article-title":"Amphion: An open-source audio music and speech generation toolkit","volume-title":"Proc. IEEE Spoken Lang. Technol. Workshop","author":"Zhang","year":"2024"},{"key":"ref71","doi-asserted-by":"publisher","DOI":"10.1109\/asru57964.2023.10389671"},{"key":"ref72","article-title":"Weight normalization: A simple reparameterization to accelerate training of deep neural networks","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Salimans","year":"2016"},{"key":"ref73","doi-asserted-by":"publisher","DOI":"10.1016\/j.wocn.2018.07.001"},{"key":"ref74","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2018.8461329"},{"key":"ref75","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2001.941023"},{"key":"ref76","first-page":"1","article-title":"Chunked autoregressive GAN for conditional waveform synthesis","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Morrison","year":"2022"},{"key":"ref77","first-page":"1","article-title":"NaturalSpeech 3: Zero-shot speech synthesis with factorized codec and diffusion models","volume-title":"Proc. 41st Int. Conf. Mach. Learn.","author":"Ju","year":"2024"},{"key":"ref78","first-page":"1","article-title":"Speechtokenizer: Unified speech tokenizer for speech language models","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Xin","year":"2024"},{"key":"ref79","article-title":"Emilia: An extensive, multilingual, and diverse speech dataset for large-scale speech generation","volume-title":"Proc. IEEE Spoken Lang. Technol. Workshop","author":"Haorui","year":"2024"},{"key":"ref80","article-title":"WenetSpeech4TTS: A 12,800-hour mandarin TTS corpus for large speech generation model benchmark","author":"Linhan","year":"2024"}],"container-title":["IEEE\/ACM Transactions on Audio, Speech, and Language Processing"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/6570655\/10304349\/10693565.pdf?arnumber=10693565","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,11,27]],"date-time":"2024-11-27T00:33:45Z","timestamp":1732667625000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10693565\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024]]},"references-count":80,"URL":"https:\/\/doi.org\/10.1109\/taslp.2024.3468005","relation":{},"ISSN":["2329-9290","2329-9304"],"issn-type":[{"value":"2329-9290","type":"print"},{"value":"2329-9304","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024]]}}}