{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,10,31]],"date-time":"2025-10-31T07:11:10Z","timestamp":1761894670520,"version":"build-2065373602"},"reference-count":36,"publisher":"IEEE","license":[{"start":{"date-parts":[[2025,6,30]],"date-time":"2025-06-30T00:00:00Z","timestamp":1751241600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,6,30]],"date-time":"2025-06-30T00:00:00Z","timestamp":1751241600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025,6,30]]},"DOI":"10.1109\/icme59968.2025.11210031","type":"proceedings-article","created":{"date-parts":[[2025,10,30]],"date-time":"2025-10-30T17:57:42Z","timestamp":1761847062000},"page":"1-6","source":"Crossref","is-referenced-by-count":0,"title":["Diverse Audio Caption Generation with Semantic-aware Diffusion Model"],"prefix":"10.1109","author":[{"given":"Hualei","family":"Wang","sequence":"first","affiliation":[{"name":"Chinese Academy of Sciences,Beijing Key Laboratory of Mobile Computing and Pervasive Device, Institute of Computing Technology,Beijing,China"}]},{"given":"Yiming","family":"Li","sequence":"additional","affiliation":[{"name":"Chinese Academy of Sciences,Beijing Key Laboratory of Mobile Computing and Pervasive Device, Institute of Computing Technology,Beijing,China"}]},{"given":"Hong","family":"Liu","sequence":"additional","affiliation":[{"name":"Chinese Academy of Sciences,Beijing Key Laboratory of Mobile Computing and Pervasive Device, Institute of Computing Technology,Beijing,China"}]},{"given":"Xiangdong","family":"Wang","sequence":"additional","affiliation":[{"name":"Chinese Academy of Sciences,Beijing Key Laboratory of Mobile Computing and Pervasive Device, Institute of Computing Technology,Beijing,China"}]}],"member":"263","reference":[{"key":"ref1","first-page":"18090","article-title":"Pengi: An audio language model for audio tasks","volume":"36","author":"Deshmukh","year":"2023","journal-title":"Advances in Neural Information Processing Systems"},{"article-title":"SALMONN: Towards generic hearing abilities for large language models","volume-title":"The Twelfth International Conference on Learning Representations","author":"Tang","key":"ref2"},{"article-title":"Revisiting deep audio-text retrieval through the lens of transportation","volume-title":"The Twelfth International Conference on Learning Representations","author":"Luong","key":"ref3"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP49357.2023.10096877"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2024.3419446"},{"article-title":"Qwen-audio: Advancing universal audio understanding via unified large-scale audio-language models","year":"2023","author":"Chu","key":"ref6"},{"article-title":"Audio flamingo: A novel audio language model with few-shot learning and dialogue abilities","year":"2024","author":"Kong","key":"ref7"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2024.3416686"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/LSP.2024.3409212"},{"key":"ref10","first-page":"6840","article-title":"Denoising diffusion probabilistic models","volume":"33","author":"Ho","year":"2020","journal-title":"Advances in neural information processing systems"},{"key":"ref11","first-page":"8162","article-title":"Improved denoising diffusion probabilistic models","volume-title":"International conference on machine learning","author":"Nichol"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01042"},{"article-title":"Audioldm: Text-to-audio generation with latent diffusion models","year":"2023","author":"Liu","key":"ref13"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1145\/3664647.3681688"},{"key":"ref15","first-page":"4328","article-title":"Diffusion-lm improves controllable text generation","volume":"35","author":"Li","year":"2022","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.findings-emnlp.660"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.21437\/interspeech.2025-79"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1016\/j.inffus.2024.102643"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.acl-long.248"},{"article-title":"Diffcap: Exploring continuous diffusion on image captioning","year":"2023","author":"He","key":"ref20"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2024.naacl-long.373"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2023\/750"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.02237"},{"article-title":"Prefix-diffusion: A lightweight diffusion model for diverse image captioning","year":"2023","author":"Liu","key":"ref24"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.acl-main.703"},{"article-title":"Beats: Audio pre-training with acoustic tokenizers","year":"2022","author":"Chen","key":"ref26"},{"article-title":"Analog bits: Generating discrete data using diffusion models with self-conditioning","year":"2022","author":"Chen","key":"ref27"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP49357.2023.10095969"},{"key":"ref29","article-title":"Understanding and improving layer normalization","volume":"32","author":"Xu","year":"2019","journal-title":"Advances in neural information processing systems"},{"article-title":"Ella: Equip diffusion models with llm for enhanced semantic alignment","year":"2024","author":"Hu","key":"ref30"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-73242-3_3"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP40776.2020.9052990"},{"key":"ref33","first-page":"119","article-title":"Audiocaps: Generating captions for audios in the wild","volume-title":"Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies","volume":"1","author":"Kim"},{"article-title":"Qwen2-audio technical report","year":"2024","author":"Chu","key":"ref34"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/icassp49660.2025.10889071"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP43922.2022.9746834"}],"event":{"name":"2025 IEEE International Conference on Multimedia and Expo (ICME)","start":{"date-parts":[[2025,6,30]]},"location":"Nantes, France","end":{"date-parts":[[2025,7,4]]}},"container-title":["2025 IEEE International Conference on Multimedia and Expo (ICME)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/11208895\/11208897\/11210031.pdf?arnumber=11210031","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,10,31]],"date-time":"2025-10-31T05:33:18Z","timestamp":1761888798000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11210031\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,6,30]]},"references-count":36,"URL":"https:\/\/doi.org\/10.1109\/icme59968.2025.11210031","relation":{},"subject":[],"published":{"date-parts":[[2025,6,30]]}}}