{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,22]],"date-time":"2026-04-22T19:42:56Z","timestamp":1776886976396,"version":"3.51.2"},"reference-count":42,"publisher":"IEEE","license":[{"start":{"date-parts":[[2025,12,6]],"date-time":"2025-12-06T00:00:00Z","timestamp":1764979200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,12,6]],"date-time":"2025-12-06T00:00:00Z","timestamp":1764979200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025,12,6]]},"DOI":"10.1109\/asru65441.2025.11434644","type":"proceedings-article","created":{"date-parts":[[2026,4,2]],"date-time":"2026-04-02T19:48:04Z","timestamp":1775159284000},"page":"1-8","source":"Crossref","is-referenced-by-count":2,"title":["DiffRhythm+: Controllable and Flexible Full-Length Song Generation with Preference Optimization"],"prefix":"10.1109","author":[{"given":"Huakang","family":"Chen","sequence":"first","affiliation":[{"name":"Audio, Speech and Language Processing Group (ASLP@NPU)"}]},{"given":"Yuepeng","family":"Jiang","sequence":"additional","affiliation":[{"name":"Audio, Speech and Language Processing Group (ASLP@NPU)"}]},{"given":"Guobin","family":"Ma","sequence":"additional","affiliation":[{"name":"Audio, Speech and Language Processing Group (ASLP@NPU)"}]},{"given":"Chunbo","family":"Hao","sequence":"additional","affiliation":[{"name":"Audio, Speech and Language Processing Group (ASLP@NPU)"}]},{"given":"Shuai","family":"Wang","sequence":"additional","affiliation":[{"name":"Nanjing University,School of Intelligence Science and Technology,Suzhou,China"}]},{"given":"Jixun","family":"Yao","sequence":"additional","affiliation":[{"name":"Audio, Speech and Language Processing Group (ASLP@NPU)"}]},{"given":"Ziqian","family":"Ning","sequence":"additional","affiliation":[{"name":"Audio, Speech and Language Processing Group (ASLP@NPU)"}]},{"given":"Meng","family":"Meng","sequence":"additional","affiliation":[{"name":"MilM Plus Xiaomi Inc"}]},{"given":"Jian","family":"Luan","sequence":"additional","affiliation":[{"name":"MilM Plus Xiaomi Inc"}]},{"given":"Lei","family":"Xie","sequence":"additional","affiliation":[{"name":"Audio, Speech and Language Processing Group (ASLP@NPU)"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1126\/science.aax0868"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1093\/oso\/9780198848400.001.0001"},{"key":"ref3","article-title":"The Music Producer\u2019s Handbook, Hal Leonard Books, an imprint of Hal Leonard Corporation","author":"Owsinski","year":"2016","journal-title":"Milwaukee, WI, second edition"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1145\/3313831.3376739"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1007\/s00521-024-09418-2"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v36i10.21350"},{"key":"ref7","article-title":"Hifisinger: Towards high-fidelity neural singing voice synthesis","volume-title":"arXiv preprint arXiv:2009.01776","author":"Chen","year":"2020"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP43922.2022.9747664"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1016\/j.neunet.2024.106762"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2024-2360"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.52202\/075280-0766"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP48485.2024.10447265"},{"key":"ref13","article-title":"Musiclm: Generating music from text","author":"Agostinelli","year":"2023","journal-title":"arXiv preprint arXiv:2301.11325"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2024.acl-long.339"},{"key":"ref15","article-title":"Singsong: Generating musical accompaniments from singing","author":"Donahue","year":"2023","journal-title":"arXiv preprint arXiv:2301.12662"},{"key":"ref16","article-title":"Riffusion-stable diffusion for realtime music generation","author":"Forsgren","year":"2022"},{"key":"ref17","article-title":"Jukebox: A generative model for music","author":"Dhariwal","year":"2020","journal-title":"arXiv preprint arXiv:2005.00341"},{"key":"ref18","article-title":"Accompanied singing voice synthesis with fully text-controlled melody","volume-title":"arXiv preprint arXiv:2407.02049","author":"Li","year":"2024"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.52202\/079017-2546"},{"key":"ref20","article-title":"Songgen: A single stage auto-regressive transformer for text-to-song generation","author":"Liu","year":"2025","journal-title":"arXiv preprint arXiv:2502.13128"},{"key":"ref21","article-title":"Yue: Scaling open foundation models for long-form music generation","author":"Yuan","year":"2025","journal-title":"arXiv preprint arXiv:2503.08638"},{"key":"ref22","article-title":"Noise2music: Text-conditioned music generation with diffusion models","author":"Huang","year":"2023","journal-title":"arXiv preprint arXiv:2302.03917"},{"key":"ref23","article-title":"Fast timing-conditioned latent audio diffusion","volume-title":"Forty-first International Conference on Machine Learning","author":"Evans"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2024.naacl-long.459"},{"key":"ref25","article-title":"Diffrhythm: Blazingly fast and embarrassingly simple end-to-end full-length song generation with latent diffusion","author":"Ning","year":"2025","journal-title":"arXiv preprint arXiv:2503.01183"},{"key":"ref26","article-title":"Mulan: A joint embedding of music audio and natural language","author":"Huang","year":"2022","journal-title":"arXiv preprint arXiv:2208.12415"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/TASLPRO.2025.3602320"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2024.3399026"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.52202\/075280-2066"},{"key":"ref30","article-title":"Joint audio and symbolic conditioning for temporally controlled text-to-music generation","author":"Tal","year":"2024","journal-title":"arXiv preprint arXiv:2406.10970"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2024-250"},{"key":"ref32","article-title":"Realtime and accurate: Zero-shot high-fidelity singing voice conversion with multi-condition flow synthesis","volume-title":"arXiv preprint arXiv:2405.15093","author":"Li","year":"2024"},{"key":"ref33","article-title":"Flow matching for generative modeling","author":"Lipman","year":"2022","journal-title":"arXiv preprint arXiv:2210.02747"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1145\/3664647.3681688"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.52202\/075280-2338"},{"key":"ref36","article-title":"Songeval: A benchmark dataset for song aesthetics evaluation","author":"Yao","year":"2025","journal-title":"arXiv preprint arXiv:2505.10793"},{"key":"ref37","article-title":"Audiobox: Unified audio generation with natural language prompts","author":"Vyas","year":"2023","journal-title":"arXiv preprint arXiv:2312.15821"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v39i24.34750"},{"key":"ref39","article-title":"Fr \u2018echet audio distance: A metric for evaluating music enhancement algorithms","author":"Kilgour","year":"2018","journal-title":"arXiv preprint arXiv:1812.08466"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2025.findings-acl.133"},{"key":"ref41","article-title":"Llama: Open and efficient foundation language models","author":"Touvron","year":"2023","journal-title":"arXiv preprint arXiv:2302.13971"},{"key":"ref42","article-title":"Classifier-free diffusion guidance","author":"Ho","year":"2022","journal-title":"arXiv preprint arXiv:2207.12598"}],"event":{"name":"2025 IEEE Automatic Speech Recognition and Understanding Workshop (ASRU)","location":"Honolulu, HI, USA","start":{"date-parts":[[2025,12,6]]},"end":{"date-parts":[[2025,12,10]]}},"container-title":["2025 IEEE Automatic Speech Recognition and Understanding Workshop (ASRU)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/11434577\/11433836\/11434644.pdf?arnumber=11434644","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,4,3]],"date-time":"2026-04-03T04:57:39Z","timestamp":1775192259000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11434644\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,12,6]]},"references-count":42,"URL":"https:\/\/doi.org\/10.1109\/asru65441.2025.11434644","relation":{},"subject":[],"published":{"date-parts":[[2025,12,6]]}}}