{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,6,25]],"date-time":"2025-06-25T05:53:27Z","timestamp":1750830807413,"version":"3.28.0"},"reference-count":27,"publisher":"IEEE","license":[{"start":{"date-parts":[[2024,6,30]],"date-time":"2024-06-30T00:00:00Z","timestamp":1719705600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2024,6,30]],"date-time":"2024-06-30T00:00:00Z","timestamp":1719705600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2024,6,30]]},"DOI":"10.1109\/cec60901.2024.10612075","type":"proceedings-article","created":{"date-parts":[[2024,8,8]],"date-time":"2024-08-08T17:55:15Z","timestamp":1723139715000},"page":"1-8","source":"Crossref","is-referenced-by-count":2,"title":["Emotion-Conditioned MusicLM: Enhancing Emotional Resonance in Music Generation"],"prefix":"10.1109","author":[{"given":"Yuelang","family":"Sun","sequence":"first","affiliation":[{"name":"School of Engineering, Computer and Mathematical Sciences, Auckland University of Technology,Auckland,New Zealand"}]},{"given":"Matthew","family":"Kuo","sequence":"additional","affiliation":[{"name":"School of Engineering, Computer and Mathematical Sciences, Auckland University of Technology,Auckland,New Zealand"}]},{"given":"Xiaodan","family":"Wang","sequence":"additional","affiliation":[{"name":"Yanbian University,Jilin,China"}]},{"given":"Weihua","family":"Li","sequence":"additional","affiliation":[{"name":"School of Engineering, Computer and Mathematical Sciences, Auckland University of Technology,Auckland,New Zealand"}]},{"given":"Quan","family":"Bai","sequence":"additional","affiliation":[{"name":"University of Tasmania,Hobart,Australia"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1145\/3108242"},{"key":"ref2","article-title":"A comprehensive survey on deep music generation: Multi-level representations, algorithms, evaluations, and future directions","author":"Ji","year":"2020","journal-title":"arXiv preprint"},{"issue":"2","key":"ref3","article-title":"Hierarchical text-conditional image generation with clip latents","volume":"1","author":"Ramesh","year":"2022","journal-title":"arXiv preprint"},{"key":"ref4","article-title":"Sdxl: Improving latent diffusion models for high-resolution image synthesis","author":"Podell","year":"2023","journal-title":"arXiv preprint"},{"key":"ref5","article-title":"Musiclm: Generating music from text","author":"Agostinelli","year":"2023","journal-title":"arXiv preprint"},{"key":"ref6","article-title":"Noise2music: Text-conditioned music generation with diffusion models","author":"Huang","year":"2023","journal-title":"arXiv preprint"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v32i1.11312"},{"key":"ref8","article-title":"Symbolic music generation with diffusion models","author":"Mittal","year":"2021","journal-title":"arXiv preprint"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2023.3276177"},{"key":"ref10","first-page":"4364","article-title":"A hierarchical latent vector model for learning long-term structure in music","volume-title":"International conference on machine learning","author":"Roberts","year":"2018"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2023.3288409"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1038\/s41398-021-01483-8"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.3389\/frai.2020.497864"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1007\/s11227-018-2499-y"},{"key":"ref15","article-title":"Mulan: A joint embedding of music audio and natural language","author":"Huang","year":"2022","journal-title":"arXiv preprint"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP40776.2020.9053240"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP49357.2023.10095889"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP49357.2023.10095969"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/JSTSP.2020.2987728"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2014.6854637"},{"key":"ref21","article-title":"Emotion embedding spaces for matching music to stories","volume-title":"International Society for Music Information Retrieval Conference","author":"Won","year":"2021"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1007\/s11704-021-0569-4"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.48550\/ARXIV.1907.11692"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP43922.2022.9746312"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP49357.2023.10095969"},{"key":"ref26","article-title":"Mert: Acoustic music understanding model with large-scale self-supervised training","author":"Li","year":"2023","journal-title":"arXiv preprint"},{"key":"ref27","article-title":"High fidelity neural audio compression","author":"Defossez","year":"2022","journal-title":"arXiv preprint"}],"event":{"name":"2024 IEEE Congress on Evolutionary Computation (CEC)","start":{"date-parts":[[2024,6,30]]},"location":"Yokohama, Japan","end":{"date-parts":[[2024,7,5]]}},"container-title":["2024 IEEE Congress on Evolutionary Computation (CEC)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/10609966\/10611750\/10612075.pdf?arnumber=10612075","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,8,11]],"date-time":"2024-08-11T04:24:51Z","timestamp":1723350291000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10612075\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,6,30]]},"references-count":27,"URL":"https:\/\/doi.org\/10.1109\/cec60901.2024.10612075","relation":{},"subject":[],"published":{"date-parts":[[2024,6,30]]}}}