{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,7]],"date-time":"2026-04-07T16:25:01Z","timestamp":1775579101334,"version":"3.50.1"},"reference-count":43,"publisher":"IEEE","license":[{"start":{"date-parts":[[2025,4,6]],"date-time":"2025-04-06T00:00:00Z","timestamp":1743897600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,4,6]],"date-time":"2025-04-06T00:00:00Z","timestamp":1743897600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025,4,6]]},"DOI":"10.1109\/icassp49660.2025.10889782","type":"proceedings-article","created":{"date-parts":[[2025,3,12]],"date-time":"2025-03-12T17:15:19Z","timestamp":1741799719000},"page":"1-5","source":"Crossref","is-referenced-by-count":3,"title":["Latent Watermarking of Audio Generative Models"],"prefix":"10.1109","author":[{"given":"Robin San","family":"Roman","sequence":"first","affiliation":[{"name":"Meta, FAIR, Univ. de Lorraine, CNRS, Inria, Loria"}]},{"given":"Pierre","family":"Fernandez","sequence":"additional","affiliation":[{"name":"Meta, FAIR, Inria Rennes"}]},{"given":"Antoine","family":"Deleforge","sequence":"additional","affiliation":[{"name":"IRMA, CNRS, Univ. de Strasbourg, Inria"}]},{"given":"Yossi","family":"Adi","sequence":"additional","affiliation":[{"name":"Meta, FAIR, Hebrew Univ. of Jerusalem"}]},{"given":"Romain","family":"Serizel","sequence":"additional","affiliation":[{"name":"Univ. de Lorraine, CNRS, Inria, Loria"}]}],"member":"263","reference":[{"key":"ref1","article-title":"Audiogen: Textually guided audio generation","author":"Kreuk","year":"2022"},{"key":"ref2","article-title":"AudioLDM: Text-to-audio generation with latent diffusion models","volume-title":"Proceedings of the International Conference on Machine Learning","author":"Liu"},{"key":"ref3","article-title":"Musiclm: Generating music from text","author":"Agostinelli","year":"2023"},{"key":"ref4","article-title":"Simple and controllable music generation","volume-title":"NeurIPS","volume":"36","author":"Copet"},{"key":"ref5","article-title":"Neural codec language models are zero-shot text to speech synthesizers","author":"Wang","year":"2023"},{"key":"ref6","article-title":"Audiobox: Unified audio generation with natural language prompts","author":"Vyas","year":"2023"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2023.3288409"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/cvpr52688.2022.01042"},{"key":"ref9","article-title":"Gpt-4 technical report","year":"2023"},{"key":"ref10","article-title":"Proactive detection of voice cloning with localized watermarking","author":"Roman","year":"2024"},{"key":"ref11","article-title":"Wavmark: Watermarking for audio generation","author":"Chen","year":"2024"},{"key":"ref12","article-title":"Seamless: Multilingual expressive and streaming speech translation","author":"Barrault","year":"2023"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.02053"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/cvpr52733.2024.00857"},{"key":"ref15","article-title":"Melgan: Generative adversarial networks for conditional waveform synthesis","author":"Kumar","year":"2019"},{"key":"ref16","article-title":"Hifi-gan: Generative adversarial networks for efficient and high fidelity speech synthesis","author":"Kong","year":"2020"},{"key":"ref17","article-title":"From discrete tokens to high-fidelity audio using multi-band diffusion","volume-title":"NeurIPS","volume":"36","author":"Roman"},{"key":"ref18","article-title":"High fidelity neural audio compression","author":"D\u00e9fossez","year":"2022"},{"key":"ref19","article-title":"Wavenet: A generative model for raw audio","author":"den Oord","year":"2016"},{"key":"ref20","article-title":"Improving language understanding by generative pre-training","author":"Radford","year":"2018"},{"key":"ref21","first-page":"1877","article-title":"Language models are few-shot learners","volume-title":"NeurIPS","volume":"33","author":"Brown"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.acl-long.593"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/taslp.2021.3129994"},{"key":"ref24","article-title":"High-fidelity audio compression with improved rvqgan","author":"Kumar","year":"2023"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.48550\/ARXIV.1706.03762"},{"key":"ref26","article-title":"Voicebox: Text-guided multilingual universal speech generation at scale","author":"Le","year":"2023"},{"key":"ref27","article-title":"Naturalspeech 2: Latent diffusion models are natural and zero-shot speech and singing synthesizers","author":"Shen","year":"2023"},{"key":"ref28","article-title":"Joint audio and symbolic conditioning for temporally controlled text-to-music generation","author":"Tal","year":"2024"},{"key":"ref29","article-title":"Fast timing-conditioned latent audio diffusion","author":"Evans","year":"2024"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2005.861292"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1109\/TASL.2009.2019259"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v37i11.26550"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1016\/j.dsp.2021.103381"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP48485.2024.10447253"},{"key":"ref35","article-title":"Responsible disclosure of generative models using scalable fingerprinting","volume-title":"International Conference on Learning Representations","author":"Yu"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1109\/icassp48485.2024.10448134"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.01418"},{"key":"ref38","article-title":"A recipe for watermarking diffusion models","author":"Zhao","year":"2023"},{"key":"ref39","article-title":"On the learnability of watermarks for language models","author":"Gu","year":"2023"},{"key":"ref40","article-title":"Watermarking makes language models radioactive","author":"Sander","year":"2024"},{"key":"ref41","article-title":"Llama 2: Open foundation and finetuned chat models","author":"Touvron","year":"2023"},{"key":"ref42","article-title":"Diffwave: A versatile diffusion model for audio synthesis","author":"Kong","year":"2021"},{"key":"ref43","article-title":"Fr\\\u2019echet audio distance: A metric for evaluating music enhancement algorithms","author":"Kilgour","year":"2018"}],"event":{"name":"ICASSP 2025 - 2025 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)","location":"Hyderabad, India","start":{"date-parts":[[2025,4,6]]},"end":{"date-parts":[[2025,4,11]]}},"container-title":["ICASSP 2025 - 2025 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/10887540\/10887541\/10889782.pdf?arnumber=10889782","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,3,25]],"date-time":"2026-03-25T05:24:14Z","timestamp":1774416254000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10889782\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,4,6]]},"references-count":43,"URL":"https:\/\/doi.org\/10.1109\/icassp49660.2025.10889782","relation":{},"subject":[],"published":{"date-parts":[[2025,4,6]]}}}