{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,17]],"date-time":"2026-03-17T02:13:20Z","timestamp":1773713600925,"version":"3.50.1"},"reference-count":75,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"2","license":[{"start":{"date-parts":[[2026,3,1]],"date-time":"2026-03-01T00:00:00Z","timestamp":1772323200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2026,3,1]],"date-time":"2026-03-01T00:00:00Z","timestamp":1772323200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2026,3,1]],"date-time":"2026-03-01T00:00:00Z","timestamp":1772323200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"name":"China NSFC","award":["62201503"],"award-info":[{"award-number":["62201503"]}]},{"name":"China NSFC","award":["62222114"],"award-info":[{"award-number":["62222114"]}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Dependable and Secure Comput."],"published-print":{"date-parts":[[2026,3]]},"DOI":"10.1109\/tdsc.2025.3624972","type":"journal-article","created":{"date-parts":[[2025,10,23]],"date-time":"2025-10-23T18:02:03Z","timestamp":1761242523000},"page":"2165-2182","source":"Crossref","is-referenced-by-count":0,"title":["Critical Information Only: A Content Privacy-Preserving Framework for Detecting Audio Deepfakes"],"prefix":"10.1109","volume":"23","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-9686-4369","authenticated-orcid":false,"given":"Xinfeng","family":"Li","sequence":"first","affiliation":[{"name":"College of Computing and Data Science, Nanyang Technological University, Singapore"}]},{"ORCID":"https:\/\/orcid.org\/0009-0004-3560-0142","authenticated-orcid":false,"given":"Yifan","family":"Zheng","sequence":"additional","affiliation":[{"name":"College of Electrical Engineering and the Ubiquitous System Security Lab (USSLab), Zhejiang University, Hangzhou, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-4430-5263","authenticated-orcid":false,"given":"Chen","family":"Yan","sequence":"additional","affiliation":[{"name":"College of Electrical Engineering and the Ubiquitous System Security Lab (USSLab), Zhejiang University, Hangzhou, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-4960-3019","authenticated-orcid":false,"given":"Kai","family":"Li","sequence":"additional","affiliation":[{"name":"Department of Computer Science and Technology, Tsinghua University, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-4882-1823","authenticated-orcid":false,"given":"Chang","family":"Zeng","sequence":"additional","affiliation":[{"name":"National Institute of Informatics, Tokyo, Japan"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-1101-0007","authenticated-orcid":false,"given":"Xiaoyu","family":"Ji","sequence":"additional","affiliation":[{"name":"College of Electrical Engineering and the Ubiquitous System Security Lab (USSLab), Zhejiang University, Hangzhou, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-5043-9148","authenticated-orcid":false,"given":"Wenyuan","family":"Xu","sequence":"additional","affiliation":[{"name":"College of Electrical Engineering and the Ubiquitous System Security Lab (USSLab), Zhejiang University, Hangzhou, China"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1145\/3072959.3073640"},{"key":"ref2","article-title":"Deepfake audio is a political nightmare","author":"Meaker","year":"2023"},{"key":"ref3","article-title":"Fraudsters cloned company director\u2019s voice in ${\\$}$$35 million bank heist, police find","author":"Brewster","year":"2022"},{"key":"ref4","article-title":"Artificial imposters\u2014cybercriminals turn to AI voice cloning for a new breed of scam","year":"2023"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP43922.2022.9747766"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP39728.2021.9414234"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP49357.2023.10096278"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2021-702"},{"key":"ref9","article-title":"ASVSpoof 2021 baseline CM","year":"2021"},{"key":"ref10","article-title":"Synthetic speech detection using meta-learning with prototypical loss","author":"Pal","year":"2022"},{"key":"ref11","article-title":"Google admits partners leaked more than 1,000 private conversations with Google assistant","author":"Haselton","year":"2019"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/SPW.2018.00009"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.14722\/ndss.2024.23030"},{"key":"ref14","first-page":"6306","article-title":"Neural discrete representation learning","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"30","author":"Den","year":"2017"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1038\/s41593-023-01468-4"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.48550\/ARXIV.1706.03762"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/35.417"},{"key":"ref18","article-title":"Definition of the opus audio codec","volume-title":"Internet Eng. Task Force","author":"Valin","year":"2012"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1016\/j.csl.2020.101114"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.21437\/ASVSPOOF.2021-8"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2015.7178964"},{"key":"ref22","first-page":"4211","article-title":"Common voice: A massively-multilingual speech corpus","volume-title":"Proc. 12th Conf. Lang. Resour. Eval.","author":"Ardila","year":"2020"},{"key":"ref23","article-title":"Diffwave: A versatile diffusion model for audio synthesis","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Kong","year":"2021"},{"key":"ref24","article-title":"Audio deepfake detection: A survey","author":"Yi","year":"2023"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1145\/3576915.3623209"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1145\/3319535.3354248"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.21437\/interspeech.2021-847"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1145\/3664647.3681345"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.21437\/odyssey.2022-16"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.14722\/ndss.2025.243389"},{"key":"ref31","first-page":"19594","article-title":"Styletts 2: Towards human-level text-to-speech through style diffusion and adversarial training with large speech language models","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"36","author":"Li","year":"2023"},{"key":"ref32","article-title":"Diffusion-based voice conversion with fast maximum likelihood sampling scheme","volume-title":"Proc. Int.Conf. Learn. Representations","author":"Popov","year":"2022"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2021-983"},{"key":"ref34","article-title":"Unifyspeech: A unified framework for zero-shot text-to-speech and voice conversion","author":"Liu","year":"2023"},{"key":"ref35","first-page":"5210","article-title":"AutoVC: Zero-shot voice style transfer with only autoencoder loss","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Qian","year":"2019"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1109\/11.362938"},{"key":"ref37","volume-title":"Augmentative and Alternative Communication","author":"Beukelman","year":"1998"},{"key":"ref38","article-title":"Vorbis audio compression","author":"Community","year":"2016"},{"issue":"1\/2","key":"ref39","first-page":"52","article-title":"Modified discrete cosine transform: Its implications for audio coding and error concealment","volume":"51","author":"Wang","year":"2003","journal-title":"J. Audio Eng. Soc."},{"key":"ref40","article-title":"High fidelity neural audio compression","volume":"2023","author":"D\u00e9fossez","year":"2023","journal-title":"Trans. Mach. Learn. Res."},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2021.3129994"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2023.3288409"},{"key":"ref43","article-title":"Neural codec language models are zero-shot text to speech synthesizers","author":"Wang","year":"2023"},{"key":"ref44","article-title":"Speechtokenizer: Unified speech tokenizer for speech large language models","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Zhang","year":"2024"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.1109\/MSP.2012.2205597"},{"key":"ref46","doi-asserted-by":"publisher","DOI":"10.1109\/ASRU.2013.6707742"},{"key":"ref47","doi-asserted-by":"publisher","DOI":"10.21437\/interspeech.2020-3015"},{"key":"ref48","doi-asserted-by":"publisher","DOI":"10.23919\/APSIPAASC55919.2022.9980281"},{"key":"ref49","doi-asserted-by":"publisher","DOI":"10.1145\/3552466.3556523"},{"key":"ref50","doi-asserted-by":"publisher","DOI":"10.1016\/j.neuropsychologia.2023.108584"},{"key":"ref52","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2021.3122291"},{"key":"ref53","doi-asserted-by":"publisher","DOI":"10.1109\/JSTSP.2022.3207050"},{"key":"ref54","article-title":"HiFi-Codec: Group-residual vector quantization for high fidelity audio codec","author":"Yang","year":"2023"},{"key":"ref55","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2010.5495200"},{"key":"ref56","doi-asserted-by":"publisher","DOI":"10.5555\/3454287.3455008"},{"key":"ref57","first-page":"17022","article-title":"HiFi-GAN: Generative adversarial networks for efficient and high fidelity speech synthesis","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Kong","year":"2020"},{"key":"ref58","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP40776.2020.9053795"},{"key":"ref59","doi-asserted-by":"publisher","DOI":"10.1109\/SLT48900.2021.9383551"},{"key":"ref60","first-page":"14881","article-title":"MelGAN: Generative adversarial networks for conditional waveform synthesis","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"32","author":"Kumar","year":"2019"},{"key":"ref61","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP39728.2021.9413605"},{"key":"ref62","doi-asserted-by":"publisher","DOI":"10.1109\/TASSP.1984.1164317"},{"key":"ref63","doi-asserted-by":"publisher","DOI":"10.1587\/transinf.2015EDP7457"},{"key":"ref64","doi-asserted-by":"publisher","DOI":"10.1109\/TBIOM.2021.3059479"},{"key":"ref65","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2017.7953152"},{"key":"ref66","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00552"},{"key":"ref67","article-title":"SpeechBrain: A general-purpose speech toolkit","author":"Ravanelli","year":"2021"},{"key":"ref68","article-title":"Tencent speech-to-text","author":"Cloud","year":"2024"},{"key":"ref69","article-title":"Xunfei speech-to-text","year":"2024"},{"key":"ref70","article-title":"Azure speech-to-text","author":"Azure","year":"2024"},{"key":"ref71","article-title":"Amazon speech-to-text","author":"Transcribe","year":"2024"},{"key":"ref72","doi-asserted-by":"publisher","DOI":"10.21437\/interspeech.2004-668"},{"key":"ref73","doi-asserted-by":"publisher","DOI":"10.1109\/TASL.2011.2114881"},{"key":"ref74","doi-asserted-by":"publisher","DOI":"10.1145\/1124772.1124848"},{"key":"ref75","article-title":"Wav2Vec2 v2.0","author":"Fairseq","year":"2024"},{"key":"ref77","doi-asserted-by":"publisher","DOI":"10.1145\/3398209"}],"container-title":["IEEE Transactions on Dependable and Secure Computing"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/8858\/11434575\/11216043.pdf?arnumber=11216043","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,3,17]],"date-time":"2026-03-17T01:16:26Z","timestamp":1773710186000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11216043\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2026,3]]},"references-count":75,"journal-issue":{"issue":"2"},"URL":"https:\/\/doi.org\/10.1109\/tdsc.2025.3624972","relation":{},"ISSN":["1545-5971","1941-0018","2160-9209"],"issn-type":[{"value":"1545-5971","type":"print"},{"value":"1941-0018","type":"electronic"},{"value":"2160-9209","type":"electronic"}],"subject":[],"published":{"date-parts":[[2026,3]]}}}