{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,28]],"date-time":"2026-03-28T05:05:23Z","timestamp":1774674323655,"version":"3.50.1"},"reference-count":35,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"6","license":[{"start":{"date-parts":[[2022,12,1]],"date-time":"2022-12-01T00:00:00Z","timestamp":1669852800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2022,12,1]],"date-time":"2022-12-01T00:00:00Z","timestamp":1669852800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2022,12,1]],"date-time":"2022-12-01T00:00:00Z","timestamp":1669852800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/100000185","name":"Defense Advanced Research Projects Agency","doi-asserted-by":"publisher","id":[{"id":"10.13039\/100000185","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/100006602","name":"Air Force Research Laboratory","doi-asserted-by":"publisher","award":["W911NF2020003"],"award-info":[{"award-number":["W911NF2020003"]}],"id":[{"id":"10.13039\/100006602","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Emerg. Top. Comput. Intell."],"published-print":{"date-parts":[[2022,12]]},"DOI":"10.1109\/tetci.2022.3166555","type":"journal-article","created":{"date-parts":[[2022,5,16]],"date-time":"2022-05-16T20:24:32Z","timestamp":1652732672000},"page":"1335-1344","source":"Crossref","is-referenced-by-count":20,"title":["Decision Making in Monopoly Using a Hybrid Deep Reinforcement Learning Approach"],"prefix":"10.1109","volume":"6","author":[{"ORCID":"https:\/\/orcid.org\/0000-0001-5050-0220","authenticated-orcid":false,"given":"Trevor","family":"Bonjour","sequence":"first","affiliation":[{"name":"Purdue University, West Lafayette, IN, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-9782-6591","authenticated-orcid":false,"given":"Marina","family":"Haliem","sequence":"additional","affiliation":[{"name":"Purdue University, West Lafayette, IN, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-1560-0116","authenticated-orcid":false,"given":"Aala","family":"Alsalem","sequence":"additional","affiliation":[{"name":"Purdue University, West Lafayette, IN, USA"}]},{"given":"Shilpa","family":"Thomas","sequence":"additional","affiliation":[{"name":"University of Southern California, Los Angeles, CA, USA"}]},{"given":"Hongyu","family":"Li","sequence":"additional","affiliation":[{"name":"University of Southern California, Los Angeles, CA, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-9131-4723","authenticated-orcid":false,"given":"Vaneet","family":"Aggarwal","sequence":"additional","affiliation":[{"name":"Purdue University, West Lafayette, IN, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-5988-8305","authenticated-orcid":false,"given":"Mayank","family":"Kejriwal","sequence":"additional","affiliation":[{"name":"University of Southern California, Los Angeles, CA, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-3803-8672","authenticated-orcid":false,"given":"Bharat","family":"Bhargava","sequence":"additional","affiliation":[{"name":"Purdue University, West Lafayette, IN, USA"}]}],"member":"263","reference":[{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1109\/IntelliSys.2017.8324210"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1080\/0025570X.1972.11976187"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v32i1.11796"},{"key":"ref30","first-page":"1995","article-title":"Dueling network architectures for deep reinforcement learning","author":"wang","year":"0","journal-title":"Proc Int Conf Mach Learn"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1023\/A:1022628806385"},{"key":"ref34","first-page":"2613","article-title":"Double Q-learning","author":"hasselt","year":"0","journal-title":"Proc Adv Neural Inf Process Syst"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/TENCON.2019.8929523"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2020.3034218"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/IJCNN48605.2020.9207153"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1016\/j.neucom.2019.10.020"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1016\/j.robot.2020.103568"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/IJCNN.2019.8852110"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1016\/j.trc.2021.102980"},{"key":"ref17","author":"sutton","year":"2018","journal-title":"Reinforcement Learning An Introduction"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/MSP.2017.2743240"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1016\/B978-1-55860-377-6.50013-X"},{"key":"ref4","first-page":"4474","article-title":"No press diplomacy: Modeling multi agent gameplay","author":"paquette","year":"0","journal-title":"Proc Adv Neural Inf Process Syst"},{"key":"ref28","article-title":"Massively parallel methods for deep reinforcement learning","author":"nair","year":"2015"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v30i1.10295"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/TG.2021.3049539"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1126\/science.aay2400"},{"key":"ref5","doi-asserted-by":"crossref","first-page":"508","DOI":"10.1126\/science.aam6960","article-title":"DeepStack: Expert level artificial intelligence in heads up no limit poker","volume":"356","author":"morav?\u00edk","year":"2017","journal-title":"Science"},{"key":"ref29","article-title":"Prioritized experience replay","author":"schaul","year":"2015"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/TETCI.2018.2823329"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1038\/s41586-019-1724-z"},{"key":"ref2","doi-asserted-by":"crossref","first-page":"484","DOI":"10.1038\/nature16961","article-title":"Mastering the game of go with deep neural networks and tree search","volume":"529","author":"silver","year":"2016","journal-title":"Nature"},{"key":"ref9","article-title":"Learning to play monopoly: A reinforcement learning approach","author":"bailis","year":"0","journal-title":"Proc 50th Anniversary Conv Soc Study Artif Intell Simul Behav"},{"key":"ref1","first-page":"13 076","article-title":"Learning to correlate in multi-player general-sum sequential games","author":"celli","year":"0","journal-title":"Proc Adv Neural Inf Process Syst"},{"key":"ref20","first-page":"877","article-title":"Evolutionary function approximation for reinforcement learning","volume":"7","author":"whiteson","year":"2006","journal-title":"J Mach Learn Res"},{"key":"ref22","article-title":"Continuous control with deep reinforcement learning","author":"lillicrap","year":"2015"},{"key":"ref21","first-page":"1587","article-title":"Addressing function approximation error in actor-critic methods","author":"fujimoto","year":"0","journal-title":"Proc Int Conf Mach Learn"},{"key":"ref24","first-page":"1889","article-title":"Trust region policy optimization","author":"schulman","year":"0","journal-title":"Proc Int Conf Mach Learn"},{"key":"ref23","first-page":"1928","article-title":"Asynchronous methods for deep reinforcement learning","author":"mnih","year":"0","journal-title":"Proc Int Conf Mach Learn"},{"key":"ref26","doi-asserted-by":"crossref","first-page":"529","DOI":"10.1038\/nature14236","article-title":"Human-level control through deep reinforcement learning","volume":"518","author":"mnih","year":"2015","journal-title":"Nature"},{"key":"ref25","article-title":"Proximal policy optimization algorithms","author":"schulman","year":"2017"}],"container-title":["IEEE Transactions on Emerging Topics in Computational Intelligence"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/7433297\/9965775\/09775710.pdf?arnumber=9775710","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,3,5]],"date-time":"2024-03-05T19:20:00Z","timestamp":1709666400000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9775710\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022,12]]},"references-count":35,"journal-issue":{"issue":"6"},"URL":"https:\/\/doi.org\/10.1109\/tetci.2022.3166555","relation":{},"ISSN":["2471-285X"],"issn-type":[{"value":"2471-285X","type":"electronic"}],"subject":[],"published":{"date-parts":[[2022,12]]}}}