{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,12,30]],"date-time":"2025-12-30T06:55:46Z","timestamp":1767077746360,"version":"3.48.0"},"reference-count":15,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","license":[{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"am","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"name":"NSF","award":["CNS 2304863"],"award-info":[{"award-number":["CNS 2304863"]}]},{"name":"NSF","award":["CNS 2339774"],"award-info":[{"award-number":["CNS 2339774"]}]},{"name":"NSF","award":["IIS 2332476"],"award-info":[{"award-number":["IIS 2332476"]}]},{"name":"NSF","award":["ONR N00014-23-1-2505"],"award-info":[{"award-number":["ONR N00014-23-1-2505"]}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Control Syst. Lett."],"published-print":{"date-parts":[[2025]]},"DOI":"10.1109\/lcsys.2025.3645286","type":"journal-article","created":{"date-parts":[[2025,12,17]],"date-time":"2025-12-17T18:49:25Z","timestamp":1765997365000},"page":"2867-2872","source":"Crossref","is-referenced-by-count":0,"title":["Nash Q-Learning With Inferring Causal Signal Temporal Logic: A Study of Competitive Multi-Agent Systems"],"prefix":"10.1109","volume":"9","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-5703-1110","authenticated-orcid":false,"given":"Hadi","family":"Partovi Aria","sequence":"first","affiliation":[{"name":"School for Engineering of Matter, Transport and Energy, Arizona State University, Tempe, AZ, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-0440-0912","authenticated-orcid":false,"given":"Zhe","family":"Xu","sequence":"additional","affiliation":[{"name":"School for Engineering of Matter, Transport and Energy, Arizona State University, Tempe, AZ, USA"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/tsmcc.2007.913919"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.2307\/1969529"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-540-30206-3_12"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1016\/B978-1-55860-335-6.50027-1"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1016\/B978-1-55860-307-3.50049-6"},{"key":"ref6","first-page":"6379","article-title":"Multi-agent actor-critic for mixed cooperative-competitive environments","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Lowe"},{"key":"ref7","first-page":"1039","article-title":"Nash Q-learning for general-sum stochastic games","volume":"4","author":"Hu","year":"2003","journal-title":"J. Mach. Learn. Res."},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1126\/science.aao1733"},{"key":"ref9","article-title":"Causal inference from competing treatments","author":"Stoica","year":"2024","journal-title":"arXiv:2406.03422"},{"key":"ref10","first-page":"232","article-title":"Learning Nash equilibrium for general-sum Markov games from batch data","volume-title":"Proc. 20th Int. Conf. Artif. Intell. Stat.","volume":"54","author":"Perolat"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/CDC.2016.7799279"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/TASE.2018.2836867"},{"key":"ref13","first-page":"524","article-title":"Mining causal signal temporal logic formulas for efficient reinforcement learning with temporally extended tasks","volume-title":"Proc. Int. Conf. Neuro-symbolic Systems","volume":"288","author":"Aria"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1111\/tops.12143"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1016\/j.neucom.2024.128170"}],"container-title":["IEEE Control Systems Letters"],"original-title":[],"link":[{"URL":"https:\/\/ieeexplore.ieee.org\/ielam\/7782633\/10939047\/11303123-aam.pdf","content-type":"application\/pdf","content-version":"am","intended-application":"syndication"},{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/7782633\/10939047\/11303123.pdf?arnumber=11303123","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,12,30]],"date-time":"2025-12-30T06:51:29Z","timestamp":1767077489000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11303123\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025]]},"references-count":15,"URL":"https:\/\/doi.org\/10.1109\/lcsys.2025.3645286","relation":{},"ISSN":["2475-1456"],"issn-type":[{"type":"electronic","value":"2475-1456"}],"subject":[],"published":{"date-parts":[[2025]]}}}