{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,9,23]],"date-time":"2025-09-23T14:08:44Z","timestamp":1758636524789,"version":"3.28.0"},"reference-count":49,"publisher":"IEEE","license":[{"start":{"date-parts":[[2024,9,30]],"date-time":"2024-09-30T00:00:00Z","timestamp":1727654400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2024,9,30]],"date-time":"2024-09-30T00:00:00Z","timestamp":1727654400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2024,9,30]]},"DOI":"10.1109\/is262782.2024.10704094","type":"proceedings-article","created":{"date-parts":[[2024,10,7]],"date-time":"2024-10-07T17:42:20Z","timestamp":1728322940000},"page":"1-10","source":"Crossref","is-referenced-by-count":1,"title":["GENPIA: A Genre-Conditioned Piano Music Generation System"],"prefix":"10.1109","author":[{"given":"Quoc-Viet","family":"Nguyen","sequence":"first","affiliation":[{"name":"National Central University,Computer Science and Information Engineering,Taoyuan,Taiwan"}]},{"given":"Hao-Wei","family":"Lai","sequence":"additional","affiliation":[{"name":"National Central University,Computer Science and Information Engineering,Taoyuan,Taiwan"}]},{"given":"Khanh-Duy","family":"Nguyen","sequence":"additional","affiliation":[{"name":"National Central University,Computer Science and Information Engineering,Taoyuan,Taiwan"}]},{"given":"Min-Te","family":"Sun","sequence":"additional","affiliation":[{"name":"National Central University,Computer Science and Information Engineering,Taoyuan,Taiwan"}]},{"given":"Wu-Yuin","family":"Hwang","sequence":"additional","affiliation":[{"name":"National Central University,Graduate Institute of Network Learning Technology,Taoyuan,Taiwan"}]},{"given":"Kazuya","family":"Sakai","sequence":"additional","affiliation":[{"name":"Tokyo Metropolitan University,Electrical Engineering and Computer Science,Hino,Tokyo,Japan"}]},{"given":"Wei-Shinn","family":"Ku","sequence":"additional","affiliation":[{"name":"Auburn University,Computer Science and Software Engineering,Auburn,AL,USA"}]}],"member":"263","reference":[{"volume-title":"Salamander grand piano","key":"ref1"},{"journal-title":"Musiclm: Generating music from text","year":"2023","author":"Borsos","key":"ref2"},{"key":"ref3","first-page":"3159","article-title":"Character-level language modeling with deeper self-attention","volume-title":"Proceedings of the AAAI conference on artificial intelligence","volume":"33","author":"AI-Rfou","year":"2019"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1007\/springerreference_61053"},{"key":"ref5","first-page":"155","article-title":"A multitrack dataset for annotation-intensive mir research","volume":"14","author":"Tierney","year":"2014","journal-title":"ISMIR"},{"journal-title":"composition of guitar tabs by transformers and groove modeling","year":"2020","author":"Hsiao","key":"ref6"},{"key":"ref7","volume":"abs\/2211.07131","author":"Lee","year":"2022","journal-title":"2413-mdb: A multi-instrumental fm video game music dataset with emotion annotations"},{"key":"ref8","first-page":"1899","article-title":"Encoding musical style with transformer autoencoders","author":"Hawthorne","year":"2020","journal-title":"ICML"},{"key":"ref9","article-title":"Transformer-xl: Attentive language models beyond a fixed-length context","author":"Yang","year":"2019","journal-title":"ar Xiv preprint"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1007\/978-0-387-79948-3_5050"},{"key":"ref11","article-title":"Lakhnes: Improving multi-instrumental music generation with cross-domain pre-training","author":"Li","year":"2019","journal-title":"ar Xiv preprint"},{"journal-title":"Mmm: Exploring conditional multi-track music generation with the transformer","year":"2020","author":"Ens","key":"ref12"},{"key":"ref13","first-page":"368","article-title":"Ranking-based emotion recognition for experimental music","volume":"2017","author":"Tatar","year":"2017","journal-title":"ISMIR"},{"key":"ref14","first-page":"196","article-title":"A dataset for soundscape emotion recognition","author":"Pasquier","year":"2017","journal-title":"2017 17th ACII"},{"volume-title":"github. Youtube-dl","year":"2021","key":"ref15"},{"volume-title":"github. Yt-dlp","year":"2023","key":"ref16"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1145\/3422622"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1162\/neco.1997.9.8.1735"},{"key":"ref19","first-page":"178","article-title":"Compound word transformer: Learning to compose full-song music over dynamic directed hypergraphs","volume-title":"AAAI Conference on AI","volume":"35","author":"Liu","year":"2021"},{"journal-title":"transformer: Generating music with long-term structure (2018)","year":"2018","author":"Uszkoreit","key":"ref20"},{"key":"ref21","first-page":"1180","article-title":"Pop music trans-former: Beat-based modeling and generation of expres-sive pop piano compositions","volume-title":"Proceedings of the 28th ACM Multimedia","author":"Huang","year":"2020"},{"journal-title":"A multi-modal pop piano dataset for emotion recognition and emotion-based music generation","year":"2021","author":"Doh","key":"ref22"},{"journal-title":"Generating music by fine-tuning recurrent neural networks with reinforcement learning","year":"2016","author":"Gu","key":"ref23"},{"key":"ref24","first-page":"516","article-title":"Transformer vae: A hierarchical model for structure-aware and interpretable music representation learning","author":"Xia","year":"2020","journal-title":"ICASSP"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1613\/jair.301"},{"key":"ref26","first-page":"5156","article-title":"A. Trans-formers are rnns: Fast autoregressive transformers with linear attention","author":"Vyas","year":"2020","journal-title":"ICML"},{"key":"ref27","article-title":"A method for stochastic optimization","author":"Kingma","year":"2015","journal-title":"CoRR"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1561\/9781680836233"},{"key":"ref29","first-page":"87","article-title":"A large-scale midi dataset for classical piano music","volume":"5","author":"Chen","year":"2020","journal-title":"Trans. Int. Soc. Music. Inf. Retr."},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2021.3121991"},{"volume-title":"Midjourney.com. Midjourney","key":"ref31"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1007\/s00521-018-3758-9"},{"volume-title":"OpenAI. Chatgpt","year":"2021","key":"ref33"},{"key":"ref34","first-page":"570","article-title":"Multi-modal music emotion recognition: A new dataset, methodology and comparative analysis","author":"Malheiro","year":"2013","journal-title":"10th CMMR 2013"},{"journal-title":"Musenet. openai blog","year":"2019","author":"Payne","key":"ref35"},{"key":"ref36","first-page":"4364","article-title":"A hierarchical latent vector model for learning long-term structure in music","author":"Engel","year":"2018","journal-title":"ICML"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1038\/323533a0"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1037\/h0077714"},{"journal-title":"Text-to-music generation with long-context latent diffu-sion","year":"2023","author":"Jin","key":"ref39"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1002\/j.1538-7305.1948.tb01338.x"},{"volume-title":"Team Audacity. Audacity","year":"2000","key":"ref41"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1109\/JIOT.2023.3253602"},{"key":"ref43","article-title":"Attention is all you need","author":"Shazeer","year":"2017","journal-title":"Advances in neural information processing systems"},{"key":"ref44","article-title":"909: A pop-song dataset for music arrangement generation","author":"Jiang","year":"2020","journal-title":"ISMIR"},{"journal-title":"The jazz transformer on the front line: Exploring the shortcomings of ai-composed music through quantitative measures","year":"2020","author":"Yang","key":"ref45"},{"key":"ref46","doi-asserted-by":"publisher","DOI":"10.1109\/TCYB.2019.2953194"},{"key":"ref47","article-title":"What is a likert scale? and how do you pronouncelikert?","author":"Wuensch","year":"2005","journal-title":"East Carolina University"},{"issue":"7553","key":"ref48","doi-asserted-by":"crossref","first-page":"436","DOI":"10.1038\/nature14539","article-title":"Deep learning","volume":"521","author":"Yan","year":"2015","journal-title":"nature"},{"journal-title":"A convolutional generative adversarial network for symbolic-domain music generation","year":"2017","author":"Yang","key":"ref49"}],"event":{"name":"2024 IEEE 5th International Symposium on the Internet of Sounds (IS2)","start":{"date-parts":[[2024,9,30]]},"location":"Erlangen, Germany","end":{"date-parts":[[2024,10,2]]}},"container-title":["2024 IEEE 5th International Symposium on the Internet of Sounds (IS2)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/10704037\/10704076\/10704094.pdf?arnumber=10704094","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,10,8]],"date-time":"2024-10-08T04:59:20Z","timestamp":1728363560000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10704094\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,9,30]]},"references-count":49,"URL":"https:\/\/doi.org\/10.1109\/is262782.2024.10704094","relation":{},"subject":[],"published":{"date-parts":[[2024,9,30]]}}}