{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,7]],"date-time":"2026-04-07T16:19:17Z","timestamp":1775578757768,"version":"3.50.1"},"reference-count":29,"publisher":"IEEE","license":[{"start":{"date-parts":[[2024,4,14]],"date-time":"2024-04-14T00:00:00Z","timestamp":1713052800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2024,4,14]],"date-time":"2024-04-14T00:00:00Z","timestamp":1713052800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2024,4,14]]},"DOI":"10.1109\/icassp48485.2024.10446663","type":"proceedings-article","created":{"date-parts":[[2024,3,18]],"date-time":"2024-03-18T18:56:31Z","timestamp":1710788191000},"page":"1331-1335","source":"Crossref","is-referenced-by-count":36,"title":["Adapting Frechet Audio Distance for Generative Music Evaluation"],"prefix":"10.1109","author":[{"given":"Azalea","family":"Gui","sequence":"first","affiliation":[{"name":"University of Toronto"}]},{"given":"Hannes","family":"Gamper","sequence":"additional","affiliation":[{"name":"Microsoft Research Redmond"}]},{"given":"Sebastian","family":"Braun","sequence":"additional","affiliation":[{"name":"Microsoft Research Redmond"}]},{"given":"Dimitra","family":"Emmanouilidou","sequence":"additional","affiliation":[{"name":"Microsoft Research Redmond"}]}],"member":"263","reference":[{"key":"ref1","author":"Copet","year":"2023","journal-title":"Simple and Controllable Music Generation"},{"key":"ref2","author":"Agostinelli","year":"2023","journal-title":"MusicLM: Generating Music From Text"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2023.3268730"},{"key":"ref4","author":"Huang","year":"2023","journal-title":"Noise2Music: Text-conditioned Music Generation with Diffusion Models"},{"key":"ref5","author":"Liu","year":"2023","journal-title":"AudioLDM: Text-to-Audio Generation with Latent Diffusion Models"},{"key":"ref6","author":"Mariani","year":"2023","journal-title":"Multi-Source Diffusion Models for Simultaneous Music Generation and Separation"},{"key":"ref7","author":"Goel","year":"2022","journal-title":"It\u2019s Raw! Audio Generation with State-Space Models"},{"key":"ref8","author":"Elizalde","year":"2022","journal-title":"CLAP: Learning Audio Concepts From Natural Language Supervision"},{"key":"ref9","author":"Huang","year":"2022","journal-title":"MuLan: A Joint Embedding of Music Audio and Natural Language"},{"key":"ref10","author":"Kilgour","year":"2019","journal-title":"Fr \u2019echet Audio Distance: A Metric for Evaluating Music Enhancement\\Algorithms"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2017.7952132"},{"key":"ref12","author":"Vinay","year":"2022","journal-title":"Evaluating generative audio systems and their metrics"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00611"},{"key":"ref14","author":"Defferrard","year":"2017","journal-title":"FMA: A Dataset For Music Analysis"},{"key":"ref15","article-title":"The MUSDB18 corpus for music separation","author":"Rafii","year":"2017"},{"key":"ref16","article-title":"Moisesdb: A dataset for source separation beyond 4-stems","author":"Pereira","year":"2023"},{"key":"ref17","article-title":"The Role of ImageNet Classes in Fr\\\u2019echet Inception Distance","author":"Kynk\u00e4\u00e4nniemi","year":"2023"},{"key":"ref18","author":"Betzalel","year":"2022","journal-title":"A Study on the Evaluation of Generative Models"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2019.8682475"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2020-1242"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP48485.2024.10448504"},{"key":"ref22","article-title":"Large-scale contrastive language-audio pre-training with feature fusion and keyword-to-caption augmentation","volume-title":"IEEE International Conference on Acoustics, Speech and Signal Processing, ICASSP","author":"Wu"},{"key":"ref23","article-title":"MERT: Acoustic Music Understanding Model with Large-Scale Self-supervised Training","author":"Li","year":"2023"},{"key":"ref24","article-title":"CD-PAM: Contrastive learning for perceptual audio similarity","author":"Manocha","year":"2021"},{"key":"ref25","article-title":"High Fidelity Neural Audio Compression","author":"D\u00e9fossez","year":"2022"},{"key":"ref26","article-title":"High-Fidelity Audio Compression with Improved RVQ-GAN","author":"Kumar","year":"2023"},{"key":"ref27","article-title":"Mubert","year":"2023"},{"key":"ref28","article-title":"Pedalboard","author":"Sobot","year":"2021"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2020.3030497"}],"event":{"name":"ICASSP 2024 - 2024 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)","location":"Seoul, Korea, Republic of","start":{"date-parts":[[2024,4,14]]},"end":{"date-parts":[[2024,4,19]]}},"container-title":["ICASSP 2024 - 2024 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/10445798\/10445803\/10446663.pdf?arnumber=10446663","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,8,2]],"date-time":"2024-08-02T04:45:01Z","timestamp":1722573901000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10446663\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,4,14]]},"references-count":29,"URL":"https:\/\/doi.org\/10.1109\/icassp48485.2024.10446663","relation":{},"subject":[],"published":{"date-parts":[[2024,4,14]]}}}