{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,8,21]],"date-time":"2025-08-21T18:16:21Z","timestamp":1755800181930,"version":"3.44.0"},"reference-count":22,"publisher":"IEEE","license":[{"start":{"date-parts":[[2025,8,26]],"date-time":"2025-08-26T00:00:00Z","timestamp":1756166400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,8,26]],"date-time":"2025-08-26T00:00:00Z","timestamp":1756166400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025,8,26]]},"DOI":"10.1109\/cog64752.2025.11114105","type":"proceedings-article","created":{"date-parts":[[2025,8,19]],"date-time":"2025-08-19T18:06:42Z","timestamp":1755626802000},"page":"1-8","source":"Crossref","is-referenced-by-count":0,"title":["IPCGRL: Language-Instructed Reinforcement Learning for Procedural Level Generation"],"prefix":"10.1109","author":[{"given":"In-Chang","family":"Baek","sequence":"first","affiliation":[{"name":"Gwangju Institute of Science and Technology (GIST),South Korea"}]},{"given":"Sung-Hyun","family":"Kim","sequence":"additional","affiliation":[{"name":"Gwangju Institute of Science and Technology (GIST),South Korea"}]},{"given":"Seo-Young","family":"Lee","sequence":"additional","affiliation":[{"name":"Gwangju Institute of Science and Technology (GIST),South Korea"}]},{"given":"Dong-Hyeon","family":"Kim","sequence":"additional","affiliation":[{"name":"Dongseo University,South Korea"}]},{"given":"Kyung-Joong","family":"Kim","sequence":"additional","affiliation":[{"name":"Gwangju Institute of Science and Technology (GIST),South Korea"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1609\/aiide.v16i1.7416"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/CoG52621.2021.9619159"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1609\/aiide.v17i1.18904"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1145\/3555858.3563273"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/CoG60054.2024.10645598"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/TG.2023.3335399"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/CoG60054.2024.10645619"},{"key":"ref8","article-title":"Pcgrllm: Large language model-driven reward design for procedural content generation reinforcement learning","author":"Baek","year":"2025","journal-title":"arXiv preprint"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA48891.2023.10160626"},{"key":"ref10","first-page":"13584","article-title":"Language instructed reinforcement learning for human-ai coordination","volume-title":"International Conference on Machine Learning","author":"Hu","year":"2023"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/TCIAIG.2011.2148116"},{"key":"ref12","article-title":"Grounding language to autonomously-acquired skills via goal generation","author":"Akakzia","year":"2020","journal-title":"arXiv preprint"},{"key":"ref13","first-page":"9248","article-title":"Natural language instruction-following with task-related language development and translation","volume":"36","author":"Pang","year":"2023","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.15607\/rss.2021.xvii.047"},{"key":"ref15","first-page":"9767","article-title":"Multi-task reinforcement learning with context-based representations","volume-title":"International Conference on Machine Learning","author":"Sodhani","year":"2021"},{"key":"ref16","first-page":"3894","article-title":"Goal representations for instruction following: A semi-supervised language interface to control","volume-title":"Conference on Robot Learning","author":"Myers","year":"2023"},{"key":"ref17","article-title":"Learning to understand goal specifications by modelling reward","author":"Bahdanau","year":"2018","journal-title":"arXiv preprint"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2019\/331"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.emnlp-main.1032"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.18653\/vl\/N19-142"},{"key":"ref21","article-title":"Proximal policy optimization algorithms","author":"Schulman","year":"2017","journal-title":"arXiv preprint"},{"key":"ref22","first-page":"16455","article-title":"Discovered policy optimisation","volume":"35","author":"Lu","year":"2022","journal-title":"Advances in Neural Information Processing Systems"}],"event":{"name":"2025 IEEE Conference on Games (CoG)","start":{"date-parts":[[2025,8,26]]},"location":"Lisbon, Portugal","end":{"date-parts":[[2025,8,29]]}},"container-title":["2025 IEEE Conference on Games (CoG)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/11113832\/11113841\/11114105.pdf?arnumber=11114105","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,8,20]],"date-time":"2025-08-20T06:47:06Z","timestamp":1755672426000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11114105\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,8,26]]},"references-count":22,"URL":"https:\/\/doi.org\/10.1109\/cog64752.2025.11114105","relation":{},"subject":[],"published":{"date-parts":[[2025,8,26]]}}}