{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2024,10,29]],"date-time":"2024-10-29T19:39:12Z","timestamp":1730230752469,"version":"3.28.0"},"reference-count":22,"publisher":"IEEE","license":[{"start":{"date-parts":[[2024,4,14]],"date-time":"2024-04-14T00:00:00Z","timestamp":1713052800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2024,4,14]],"date-time":"2024-04-14T00:00:00Z","timestamp":1713052800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2024,4,14]]},"DOI":"10.1109\/icassp48485.2024.10446991","type":"proceedings-article","created":{"date-parts":[[2024,3,18]],"date-time":"2024-03-18T18:56:31Z","timestamp":1710788191000},"page":"10266-10270","source":"Crossref","is-referenced-by-count":0,"title":["A Study on the Adverse Impact of Synthetic Speech on Speech Recognition"],"prefix":"10.1109","author":[{"given":"Jian","family":"Huang","sequence":"first","affiliation":[{"name":"Alibaba Group,China"}]},{"given":"Yancheng","family":"Bai","sequence":"additional","affiliation":[{"name":"Alibaba Group,China"}]},{"given":"Yang","family":"Cai","sequence":"additional","affiliation":[{"name":"Alibaba Group,China"}]},{"given":"Wei","family":"Bian","sequence":"additional","affiliation":[{"name":"Alibaba Group,China"}]}],"member":"263","reference":[{"key":"ref1","first-page":"17022","article-title":"Hifigan: Generative adversarial networks for efficient and high fidelity speech synthesis","volume":"33","author":"Kong","year":"2020","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2022-277"},{"article-title":"Neural codec language models are zero-shot text to speech synthesizers","year":"2023","author":"Wang","key":"ref3"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/ASRU46091.2019.9003775"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP40776.2020.9053104"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2020-1290"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2022-10225"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/ASRU51503.2021.9687942"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.emnlp-main.505"},{"key":"ref10","first-page":"70","article-title":"Deepfake audio detection via feature engineering and machine learning","volume-title":"Woodstock\u201922: Symposium on the irreproducible science","author":"Iqbal"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1145\/3552466.3556530"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/ISCSLP49672.2021.9362075"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP49357.2023.10094955"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2020-999"},{"key":"ref15","first-page":"1877","article-title":"Language models are few-shot learners","volume":"33","author":"Brown","year":"2020","journal-title":"Advances in neural information processing systems"},{"key":"ref16","first-page":"27730","article-title":"Training language models to follow instructions with human feedback","volume":"35","author":"Ouyang","year":"2022","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref17","first-page":"28492","article-title":"Robust speech recognition via large-scale weak supervision","volume-title":"International Conference on Machine Learning","author":"Radford"},{"article-title":"Audiopalm: A large language model that can speak and listen","year":"2023","author":"Rubenstein","key":"ref18"},{"article-title":"Viola: Unified codec language models for speech recognition, synthesis, and translation","year":"2023","author":"Wang","key":"ref19"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/ICSDA.2017.8384449"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2022-9996"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2023-1428"}],"event":{"name":"ICASSP 2024 - 2024 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)","start":{"date-parts":[[2024,4,14]]},"location":"Seoul, Korea, Republic of","end":{"date-parts":[[2024,4,19]]}},"container-title":["ICASSP 2024 - 2024 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/10445798\/10445803\/10446991.pdf?arnumber=10446991","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,8,2]],"date-time":"2024-08-02T05:28:30Z","timestamp":1722576510000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10446991\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,4,14]]},"references-count":22,"URL":"https:\/\/doi.org\/10.1109\/icassp48485.2024.10446991","relation":{},"subject":[],"published":{"date-parts":[[2024,4,14]]}}}