{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,9,4]],"date-time":"2025-09-04T14:28:42Z","timestamp":1756996122608,"version":"3.28.0"},"reference-count":31,"publisher":"IEEE","license":[{"start":{"date-parts":[[2021,12,14]],"date-time":"2021-12-14T00:00:00Z","timestamp":1639440000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2021,12,14]],"date-time":"2021-12-14T00:00:00Z","timestamp":1639440000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2021,12,14]],"date-time":"2021-12-14T00:00:00Z","timestamp":1639440000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2021,12,14]]},"DOI":"10.1109\/cdc45484.2021.9683740","type":"proceedings-article","created":{"date-parts":[[2022,2,1]],"date-time":"2022-02-01T20:50:18Z","timestamp":1643748618000},"page":"43-48","source":"Crossref","is-referenced-by-count":1,"title":["Imitation Learning From Inconcurrent Multi-Agent Interactions"],"prefix":"10.1109","author":[{"given":"Xin","family":"Zhang","sequence":"first","affiliation":[]},{"given":"Weixiao","family":"Huang","sequence":"additional","affiliation":[]},{"given":"Yanhua","family":"Li","sequence":"additional","affiliation":[]},{"given":"Renjie","family":"Liao","sequence":"additional","affiliation":[]},{"given":"Ziming","family":"Zhang","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref31","first-page":"1433","article-title":"Maximum entropy inverse reinforcement learning","volume":"8","author":"ziebart","year":"2008","journal-title":"AAAI"},{"key":"ref30","article-title":"Modeling interaction via the principle of maximum causal entropy","author":"ziebart","year":"2010","journal-title":"International Conference on Machine Learning (ICML)"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1145\/2432622.2432623"},{"key":"ref11","first-page":"4565","article-title":"Generative adversarial imitation learning","author":"ho","year":"2016","journal-title":"Advances in neural information processing systems"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/INFCOM.2004.1354485"},{"article-title":"Continuous control with deep reinforcement learning","year":"2015","author":"lillicrap","key":"ref13"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1016\/B978-1-55860-335-6.50027-1"},{"key":"ref15","first-page":"310","article-title":"A generalized reinforcement-learning model: Convergence and applications","volume":"96","author":"littman","year":"1996","journal-title":"ICML"},{"key":"ref16","first-page":"6379","article-title":"Multi-agent actor-critic for mixed cooperative-competitive environments","author":"lowe","year":"2017","journal-title":"Advances in neural information processing systems"},{"key":"ref17","first-page":"2","article-title":"Algorithms for inverse reinforcement learning","volume":"1","author":"ng","year":"2000","journal-title":"ICML"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1162\/neco.1991.3.1.88"},{"key":"ref19","first-page":"661","article-title":"Efficient reductions for imitation learning","author":"ross","year":"2010","journal-title":"Proceedings of the Thirteenth International Conference on Artificial Intelligence and Statistics"},{"article-title":"Multi-agent adversarial inverse reinforcement learning","year":"2019","author":"yu","key":"ref28"},{"key":"ref4","first-page":"12","article-title":"Robot learning from demonstration","volume":"97","author":"atkeson","year":"1997","journal-title":"ICML"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1016\/j.jet.2015.12.001"},{"article-title":"Concrete problems in ai safety","year":"2016","author":"amodei","key":"ref3"},{"key":"ref6","article-title":"Openai baselines","author":"dhariwal","year":"2017","journal-title":"Github"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/ICDM.2019.00194"},{"key":"ref5","first-page":"26","article-title":"Onnash equilibria in stochastic games","author":"chatterjee","year":"2004","journal-title":"International Workshop on Computer Science Logic"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1016\/0022-0531(83)90076-5"},{"journal-title":"Competitive Markov Decision Processes","year":"2012","author":"filar","key":"ref7"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1017\/S0960129515000365"},{"key":"ref9","first-page":"6765","article-title":"Inverse reward design","author":"hadfield-menell","year":"2017","journal-title":"Advances in neural information processing systems"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1145\/1015330.1015430"},{"key":"ref20","first-page":"1889","article-title":"Trust region policy optimization","author":"schulman","year":"2015","journal-title":"International Conference on Machine Learning"},{"key":"ref22","first-page":"301","article-title":"Spieltheoretische behandlung eines oligopolmodells mit nachfragetr&#x00E4;gheit: Teil i: Bestimmung des dynamischen preisgle-ichgewichts","volume":"h 2","author":"selten","year":"1965","journal-title":"Journal of Institutional and Theoretical Economics Zeitschrift fur Die Gesamte Staatswissenschaft"},{"article-title":"High-dimensional continuous control using generalized advantage estimation","year":"2015","author":"schulman","key":"ref21"},{"key":"ref24","doi-asserted-by":"crossref","first-page":"484","DOI":"10.1038\/nature16961","article-title":"Mastering the game of go with deep neural networks and tree search","volume":"529","author":"silver","year":"2016","journal-title":"Nature"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1017\/CBO9780511811654"},{"key":"ref26","first-page":"5279","article-title":"Scalable trust-region method for deep reinforcement learning using kronecker-factored approximation","author":"wu","year":"2017","journal-title":"Advances in neural information processing systems"},{"key":"ref25","first-page":"7461","article-title":"Multi-agent generative adversarial imitation learning","author":"song","year":"2018","journal-title":"Advances in neural information processing systems"}],"event":{"name":"2021 60th IEEE Conference on Decision and Control (CDC)","start":{"date-parts":[[2021,12,14]]},"location":"Austin, TX, USA","end":{"date-parts":[[2021,12,17]]}},"container-title":["2021 60th IEEE Conference on Decision and Control (CDC)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9682670\/9682776\/09683740.pdf?arnumber=9683740","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,5,10]],"date-time":"2022-05-10T16:58:03Z","timestamp":1652201883000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9683740\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2021,12,14]]},"references-count":31,"URL":"https:\/\/doi.org\/10.1109\/cdc45484.2021.9683740","relation":{},"subject":[],"published":{"date-parts":[[2021,12,14]]}}}