{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,5,13]],"date-time":"2026-05-13T17:23:25Z","timestamp":1778693005605,"version":"3.51.4"},"reference-count":36,"publisher":"IEEE","license":[{"start":{"date-parts":[[2024,4,14]],"date-time":"2024-04-14T00:00:00Z","timestamp":1713052800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2024,4,14]],"date-time":"2024-04-14T00:00:00Z","timestamp":1713052800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2024,4,14]]},"DOI":"10.1109\/icassp48485.2024.10448436","type":"proceedings-article","created":{"date-parts":[[2024,3,18]],"date-time":"2024-03-18T18:56:31Z","timestamp":1710788191000},"page":"10616-10620","source":"Crossref","is-referenced-by-count":13,"title":["Multi-Scale Sub-Band Constant-Q Transform Discriminator for High-Fidelity Vocoder"],"prefix":"10.1109","author":[{"given":"Yicheng","family":"Gu","sequence":"first","affiliation":[{"name":"The Chinese University of Hong Kong,School of Data Science,Shenzhen (CUHK-Shenzhen),China"}]},{"given":"Xueyao","family":"Zhang","sequence":"additional","affiliation":[{"name":"The Chinese University of Hong Kong,School of Data Science,Shenzhen (CUHK-Shenzhen),China"}]},{"given":"Liumeng","family":"Xue","sequence":"additional","affiliation":[{"name":"The Chinese University of Hong Kong,School of Data Science,Shenzhen (CUHK-Shenzhen),China"}]},{"given":"Zhizheng","family":"Wu","sequence":"additional","affiliation":[{"name":"The Chinese University of Hong Kong,School of Data Science,Shenzhen (CUHK-Shenzhen),China"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.48550\/arXiv.1609.03499"},{"key":"ref2","first-page":"2415","article-title":"Efficient neural audio synthesis","volume-title":"ICML","volume":"80","author":"Kalchbrenner"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2019.8683143"},{"key":"ref4","first-page":"7706","article-title":"Waveflow: A compact flow-based model for raw audio","volume-title":"ICML","volume":"119","author":"Ping"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP40776.2020.9053795"},{"key":"ref6","first-page":"14881","article-title":"Melgan: Generative adversarial networks for conditional waveform synthesis","author":"Kumar","year":"2019","journal-title":"NeurIPS"},{"key":"ref7","article-title":"Universal melgan: A robust neural vocoder for high-fidelity waveform generation in multiple domains","author":"Jang","year":"2020"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2020-2143"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2021-845"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1145\/3503161.3547854"},{"key":"ref11","article-title":"Bigvgan: A universal neural vocoder with large-scale training","volume-title":"ICLR","author":"Lee"},{"key":"ref12","article-title":"Bigvsan: Enhancing gan-based neural vocoders with slicing adversarial network","author":"Shibuya","year":"2023","journal-title":"CoRR"},{"key":"ref13","article-title":"Wavegrad: Estimating gradients for waveform generation","volume-title":"ICLR","author":"Chen"},{"key":"ref14","article-title":"Diffwave: A versatile diffusion model for audio synthesis","volume-title":"ICLR","author":"Kong"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2019.8682298"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2021-41"},{"key":"ref17","article-title":"High fidelity neural audio compression","author":"D\u00e9fossez","year":"2022"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1145\/3474085.3475437"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1121\/1.404385"},{"key":"ref20","article-title":"MERT: acoustic music understanding model with large-scale self-supervised training","author":"Li","year":"2023"},{"key":"ref21","first-page":"3","article-title":"Constant-q transform toolbox for music processing","volume-title":"Sound and Music Computing Conference","author":"Sch\u00f6rkhuber"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.25080\/Majora-7b98e3ed-003"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2020.3019084"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v36i10.21350"},{"key":"ref25","article-title":"Amphion: An open-source audio, music and speech generation toolkit","author":"Zhang","year":"2023"},{"key":"ref26","article-title":"M4singer: A multi-style, multi-singer and musical score provided mandarin singing corpus","author":"Zhang","year":"2022","journal-title":"NeurIPS"},{"key":"ref27","first-page":"487","article-title":"PJS: phoneme-balanced japanese singing-voice corpus","volume-title":"APSIPA","author":"Koguchi"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2022-48"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1145\/3474085.3475437"},{"key":"ref30","article-title":"Children\u2019s song dataset for singing voice research","volume-title":"ISMIR","author":"Choi"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2019-2441"},{"key":"ref32","article-title":"The lj speech dataset","author":"Ito","year":"2017"},{"key":"ref33","volume-title":"Cstr vctk corpus: English multi-speaker corpus for cstr voice cloning toolkit (version 0.92)","author":"Yamagishi","year":"2019"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2001.941023"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/PACRIM.1993.407206"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1109\/ASRU57964.2023.10389671"}],"event":{"name":"ICASSP 2024 - 2024 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)","location":"Seoul, Korea, Republic of","start":{"date-parts":[[2024,4,14]]},"end":{"date-parts":[[2024,4,19]]}},"container-title":["ICASSP 2024 - 2024 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/10445798\/10445803\/10448436.pdf?arnumber=10448436","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,8,3]],"date-time":"2024-08-03T04:37:53Z","timestamp":1722659873000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10448436\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,4,14]]},"references-count":36,"URL":"https:\/\/doi.org\/10.1109\/icassp48485.2024.10448436","relation":{},"subject":[],"published":{"date-parts":[[2024,4,14]]}}}