{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,23]],"date-time":"2026-03-23T16:05:21Z","timestamp":1774281921817,"version":"3.50.1"},"reference-count":29,"publisher":"IEEE","license":[{"start":{"date-parts":[[2024,4,14]],"date-time":"2024-04-14T00:00:00Z","timestamp":1713052800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2024,4,14]],"date-time":"2024-04-14T00:00:00Z","timestamp":1713052800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2024,4,14]]},"DOI":"10.1109\/icassp48485.2024.10448115","type":"proceedings-article","created":{"date-parts":[[2024,3,18]],"date-time":"2024-03-18T18:56:31Z","timestamp":1710788191000},"page":"371-375","source":"Crossref","is-referenced-by-count":16,"title":["Training Audio Captioning Models without Audio"],"prefix":"10.1109","author":[{"given":"Soham","family":"Deshmukh","sequence":"first","affiliation":[{"name":"Microsoft"}]},{"given":"Benjamin","family":"Elizalde","sequence":"additional","affiliation":[{"name":"Microsoft"}]},{"given":"Dimitra","family":"Emmanouilidou","sequence":"additional","affiliation":[{"name":"Microsoft"}]},{"given":"Bhiksha","family":"Raj","sequence":"additional","affiliation":[{"name":"Carnegie Mellon University"}]},{"given":"Rita","family":"Singh","sequence":"additional","affiliation":[{"name":"Carnegie Mellon University"}]},{"given":"Huaming","family":"Wang","sequence":"additional","affiliation":[{"name":"Microsoft"}]}],"member":"263","reference":[{"key":"ref1","article-title":"Sequence to sequence learning with neural networks","volume":"27","author":"Sutskever","year":"2014","journal-title":"Advances in neural information processing systems"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2020.3030497"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v36i10.21315"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP43922.2022.9746312"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.acl-main.703"},{"key":"ref6","article-title":"Audio captioning using pre-trained large-scale language model guided by audio-based similar caption retrieval","author":"Koizumi","year":"2020"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP49357.2023.10096877"},{"key":"ref8","article-title":"Pengi: An audio language model for audio tasks","author":"Deshmukh","year":"2023"},{"key":"ref9","first-page":"90","article-title":"Diversity and bias in audio captioning datasets","volume-title":"Proceedings of the 6th Detection and Classification of Acoustic Scenes and Events 2021 Workshop (DCASE2021)","author":"Martin"},{"key":"ref10","article-title":"Investigations in audio captioning: Addressing vocabulary imbalance and evaluating suitability of language-centric performance metrics","author":"Kothinti","year":"2023"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.23919\/EUSIPCO54536.2021.9616087"},{"key":"ref12","article-title":"Audio captioning using sound event detection","author":"Eren","year":"2021","journal-title":"DCASE2021 Challenge, Tech. Rep."},{"key":"ref13","article-title":"Improving the performance of automated audio captioning via integrating the acoustic and semantic information","volume-title":"Workshop on Detection and Classification of Acoustic Scenes and Events","author":"Ye"},{"key":"ref14","article-title":"Synergy between human and machine approaches to sound\/scene recognition and processing: An overview of icassp special session","author":"Heller","year":"2023"},{"key":"ref15","article-title":"CL4AC: A contrastive loss for audio captioning","volume-title":"Proceedings of the Detection and Classification of Acoustic Scenes and Events 2021 Workshop (DCASE 2021)","author":"Liu"},{"key":"ref16","article-title":"Never-ending learning of sounds","volume-title":"Ph.D. dissertation","author":"Elizalde","year":"2020"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP43922.2022.9746894"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/taslp.2024.3419446"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP49357.2023.10095889"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP48485.2024.10448504"},{"key":"ref21","article-title":"Mind the gap: Understanding the modality gap in multi-modal contrastive representation learning","author":"Liang","year":"2022","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP49357.2023.10095969"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2023-1136"},{"key":"ref24","article-title":"Make-An-Audio: Text-to-audio generation with prompt-enhanced diffusion models","author":"Huang","year":"2023"},{"key":"ref25","article-title":"AudioLDM: Text-to-audio generation with latent diffusion models","author":"Liu","year":"2023"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.findings-emnlp.299"},{"key":"ref27","article-title":"I can\u2019t believe there\u2019s no images! learning visual tasks using only language data","author":"Gu","year":"2023"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP40776.2020.9052990"},{"key":"ref29","article-title":"Audio-Caps: Generating Captions for Audios in The Wild","author":"Kim","year":"2019","journal-title":"NAACL-HLT"}],"event":{"name":"ICASSP 2024 - 2024 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)","location":"Seoul, Korea, Republic of","start":{"date-parts":[[2024,4,14]]},"end":{"date-parts":[[2024,4,19]]}},"container-title":["ICASSP 2024 - 2024 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/10445798\/10445803\/10448115.pdf?arnumber=10448115","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,8,2]],"date-time":"2024-08-02T04:52:43Z","timestamp":1722574363000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10448115\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,4,14]]},"references-count":29,"URL":"https:\/\/doi.org\/10.1109\/icassp48485.2024.10448115","relation":{},"subject":[],"published":{"date-parts":[[2024,4,14]]}}}