{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,6]],"date-time":"2026-03-06T10:40:33Z","timestamp":1772793633780,"version":"3.50.1"},"publisher-location":"New York, NY, USA","reference-count":18,"publisher":"ACM","license":[{"start":{"date-parts":[[2024,5,21]],"date-time":"2024-05-21T00:00:00Z","timestamp":1716249600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.acm.org\/publications\/policies\/copyright_policy#Background"}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2024,5,21]]},"DOI":"10.1145\/3649921.3650011","type":"proceedings-article","created":{"date-parts":[[2024,7,5]],"date-time":"2024-07-05T06:38:18Z","timestamp":1720161498000},"page":"1-6","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":3,"title":["The NES Video-Music Database: A Dataset of Symbolic Video Game Music Paired with Gameplay Videos"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0009-0002-0323-3726","authenticated-orcid":false,"given":"Igor","family":"Cardoso","sequence":"first","affiliation":[{"name":"Universidade Federal de Vi\u00e7osa, Brazil"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-5613-0400","authenticated-orcid":false,"given":"Rubens","family":"O. Moraes","sequence":"additional","affiliation":[{"name":"Universidade Federal de Vi\u00e7osa, Brazil"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-5731-8114","authenticated-orcid":false,"given":"Lucas","family":"N. Ferreira","sequence":"additional","affiliation":[{"name":"Universidade Federal de Vi\u00e7osa, Brazil"}]}],"member":"320","published-online":{"date-parts":[[2024,7,5]]},"reference":[{"key":"e_1_3_2_1_1_1","volume-title":"Proceedings of The Experimental AI in Games Workshop(EXAG\u201923)","author":"Cardinale Sara","year":"2023","unstructured":"Sara Cardinale and Oliver Withington. 2023. HarmonyMapper: Generating Emotionally Diverse Chord Progressions for Games. In Proceedings of The Experimental AI in Games Workshop(EXAG\u201923)."},{"key":"e_1_3_2_1_2_1","doi-asserted-by":"publisher","DOI":"10.1145\/3474085.3475195"},{"key":"e_1_3_2_1_3_1","volume-title":"The NES music database: A multi-instrumental dataset with expressive performance attributes. arXiv preprint arXiv:1806.04278","author":"Donahue Chris","year":"2018","unstructured":"Chris Donahue, Huanru\u00a0Henry Mao, and Julian McAuley. 2018. The NES music database: A multi-instrumental dataset with expressive performance attributes. arXiv preprint arXiv:1806.04278 (2018)."},{"key":"e_1_3_2_1_4_1","volume-title":"MusPy: A toolkit for symbolic music generation. arXiv preprint arXiv:2008.01951","author":"Dong Hao-Wen","year":"2020","unstructured":"Hao-Wen Dong, Ke Chen, Julian McAuley, and Taylor Berg-Kirkpatrick. 2020. MusPy: A toolkit for symbolic music generation. arXiv preprint arXiv:2008.01951 (2020)."},{"key":"e_1_3_2_1_5_1","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v32i1.11312"},{"key":"e_1_3_2_1_6_1","doi-asserted-by":"publisher","DOI":"10.1609\/aiide.v16i1.7408"},{"key":"e_1_3_2_1_7_1","doi-asserted-by":"publisher","DOI":"10.1609\/aiide.v18i1.21960"},{"key":"e_1_3_2_1_8_1","volume-title":"Proceedings of the Conference of the International Society for Music Information Retrieval(ISMIR\u201919)","author":"N.","unstructured":"Lucas\u00a0N. Ferreira and Jim Whitehead. 2019. Learning to Generate Music with Sentiment. In Proceedings of the Conference of the International Society for Music Information Retrieval(ISMIR\u201919)."},{"key":"e_1_3_2_1_9_1","volume-title":"Proceedings, Part XI 16","author":"Gan Chuang","year":"2020","unstructured":"Chuang Gan, Deng Huang, Peihao Chen, Joshua\u00a0B Tenenbaum, and Antonio Torralba. 2020. Foley music: Learning to generate music from videos. In Computer Vision\u2013ECCV 2020: 16th European Conference, Glasgow, UK, August 23\u201328, 2020, Proceedings, Part XI 16. Springer, 758\u2013775."},{"key":"e_1_3_2_1_10_1","volume-title":"NIPS 2017 Workshop on Machine Learning for Creativity and Design.","author":"Oore Sageev","year":"2017","unstructured":"Sageev Oore, Ian Simon, Sander Dieleman, and Doug Eck. 2017. Learning to create piano performances. In NIPS 2017 Workshop on Machine Learning for Creativity and Design."},{"key":"e_1_3_2_1_11_1","doi-asserted-by":"publisher","DOI":"10.1007\/s10710-017-9307-y"},{"key":"e_1_3_2_1_12_1","first-page":"3325","article-title":"Audeo: Audio generation for a silent performance video","volume":"33","author":"Su Kun","year":"2020","unstructured":"Kun Su, Xiulong Liu, and Eli Shlizerman. 2020. Audeo: Audio generation for a silent performance video. Advances in Neural Information Processing Systems 33 (2020), 3325\u20133337.","journal-title":"Advances in Neural Information Processing Systems"},{"key":"e_1_3_2_1_13_1","volume-title":"Multi-instrumentalist net: Unsupervised generation of music from body movements. arXiv preprint arXiv:2012.03478","author":"Su Kun","year":"2020","unstructured":"Kun Su, Xiulong Liu, and Eli Shlizerman. 2020. Multi-instrumentalist net: Unsupervised generation of music from body movements. arXiv preprint arXiv:2012.03478 (2020)."},{"key":"e_1_3_2_1_14_1","volume-title":"Ismir, Vol.\u00a02003.","author":"Avery Wang","unstructured":"Avery Wang 2003. An industrial strength audio search algorithm.. In Ismir, Vol.\u00a02003. Washington, DC, 7\u201313."},{"key":"e_1_3_2_1_15_1","volume-title":"Learning interpretable representation for controllable polyphonic music generation. arXiv preprint arXiv:2008.07122","author":"Wang Ziyu","year":"2020","unstructured":"Ziyu Wang, Dingsu Wang, Yixiao Zhang, and Gus Xia. 2020. Learning interpretable representation for controllable polyphonic music generation. arXiv preprint arXiv:2008.07122 (2020)."},{"key":"e_1_3_2_1_16_1","volume-title":"Audio engineering society conference: 56th international conference: Audio for games","author":"Williams Duncan","unstructured":"Duncan Williams, Alexis Kirke, Joel Eaton, Eduardo Miranda, Ian Daly, James Hallowell, Etienne Roesch, Faustina Hwang, and Slawomir\u00a0J Nasuto. 2015. Dynamic game soundtrack generation in response to a continuously varying emotional trajectory. In Audio engineering society conference: 56th international conference: Audio for games. Audio Engineering Society."},{"key":"e_1_3_2_1_17_1","volume-title":"Musicbert: Symbolic music understanding with large-scale pre-training. arXiv preprint arXiv:2106.05630","author":"Zeng Mingliang","year":"2021","unstructured":"Mingliang Zeng, Xu Tan, Rui Wang, Zeqian Ju, Tao Qin, and Tie-Yan Liu. 2021. Musicbert: Symbolic music understanding with large-scale pre-training. arXiv preprint arXiv:2106.05630 (2021)."},{"key":"e_1_3_2_1_18_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.01433"}],"event":{"name":"FDG 2024: Foundations of Digital Games","location":"Worcester MA USA","acronym":"FDG 2024"},"container-title":["Proceedings of the 19th International Conference on the Foundations of Digital Games"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3649921.3650011","content-type":"unspecified","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3649921.3650011","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,8,22]],"date-time":"2025-08-22T12:52:35Z","timestamp":1755867155000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3649921.3650011"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,5,21]]},"references-count":18,"alternative-id":["10.1145\/3649921.3650011","10.1145\/3649921"],"URL":"https:\/\/doi.org\/10.1145\/3649921.3650011","relation":{},"subject":[],"published":{"date-parts":[[2024,5,21]]},"assertion":[{"value":"2024-07-05","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}