{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,10]],"date-time":"2026-02-10T18:38:48Z","timestamp":1770748728497,"version":"3.50.0"},"reference-count":49,"publisher":"IEEE","license":[{"start":{"date-parts":[[2025,6,30]],"date-time":"2025-06-30T00:00:00Z","timestamp":1751241600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,6,30]],"date-time":"2025-06-30T00:00:00Z","timestamp":1751241600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100002914","name":"Concordia University","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100002914","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/100000001","name":"National Science Foundation","doi-asserted-by":"publisher","id":[{"id":"10.13039\/100000001","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025,6,30]]},"DOI":"10.1109\/ijcnn64981.2025.11228849","type":"proceedings-article","created":{"date-parts":[[2025,11,14]],"date-time":"2025-11-14T18:46:15Z","timestamp":1763145975000},"page":"1-9","source":"Crossref","is-referenced-by-count":1,"title":["Position Paper: Emergent Machina Sapiens Urge Rethinking Multi-Agent Paradigms in Critical Infrastructures"],"prefix":"10.1109","author":[{"given":"Hepeng","family":"Li","sequence":"first","affiliation":[{"name":"University of Maine,Department of Electrical &#x0026; Computer Engineering,Orono,ME,USA"}]},{"given":"Yuhong","family":"Liu","sequence":"additional","affiliation":[{"name":"Santa Clara University,Department of Computer Science &#x0026; Engineering,Santa Clara,CA,USA"}]},{"given":"Jun","family":"Yan","sequence":"additional","affiliation":[{"name":"Concordia University,Concordia Institute for Information Systems Engineering,Montr&#x00E9;al,Canada"}]},{"given":"Jie","family":"Gao","sequence":"additional","affiliation":[{"name":"Delft University of Technology,Department of Transport &#x0026; Planning,Delft,Netherlands"}]},{"given":"Xiao\u2019ou","family":"Yang","sequence":"additional","affiliation":[{"name":"Santa Clara University,Department of Computer Science &#x0026; Engineering,Santa Clara,CA,USA"}]},{"given":"Mohamed","family":"Naili","sequence":"additional","affiliation":[{"name":"Santa Clara University,Department of Computer Science &#x0026; Engineering,Santa Clara,CA,USA"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1038\/nature14236"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.13140\/RG.2.2.18893.74727"},{"key":"ref3","article-title":"Practices for governing agentic ai systems","author":"Shavit","year":"2023","journal-title":"Research Paper, OpenAI"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1145\/3593013.3594033"},{"key":"ref5","article-title":"Reflections from the turing award winners","author":"LeCun","year":"2020"},{"key":"ref6","first-page":"22 243","article-title":"Big self-supervised models are strong semi-supervised learners","volume":"33","author":"Chen","year":"2020","journal-title":"Advances in neural information processing systems"},{"key":"ref7","first-page":"1126","article-title":"Model-agnostic meta-learning for fast adaptation of deep networks","volume-title":"Proceedings of the 34th International Conference on Machine Learning","volume":"70","author":"Finn"},{"key":"ref8","article-title":"Meta-learning with implicit gradients","volume-title":"Advances in Neural Information Processing Systems","volume":"32","author":"Rajeswaran","year":"2019"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1016\/j.neunet.2013.11.001"},{"key":"ref10","article-title":"Reinforcement learning benchmarks for traffic signal control","volume-title":"Thirty-fifth Conference on Neural Information Processing Systems Datasets and Benchmarks Track (Round 1)","author":"Ault"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/TSG.2018.2879572"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/TSG.2022.3154718"},{"key":"ref13","first-page":"651","article-title":"Scalable deep reinforcement learning for vision-based robotic manipulation","volume-title":"Proceedings of The 2nd Conference on Robot Learning","volume":"87","author":"Kalashnikov"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v39i27.35095"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1016\/j.procir.2020.05.163"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1016\/j.rcim.2022.102412"},{"key":"ref17","volume-title":"Reinforcement Learning: An Introduction","author":"Sutton","year":"2018"},{"key":"ref18","first-page":"487","volume-title":"Multi-Agent Reinforcement Learning: Independent vs. Cooperative Agents","author":"Tan","year":"1997"},{"key":"ref19","first-page":"746","article-title":"The dynamics of reinforcement learning in cooperative multiagent systems","volume-title":"Proceedings of the Fifteenth National\/Tenth Conference on Artificial Intelligence\/Innovative Applications of Artificial Intelligence","author":"Claus"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/TSP.2013.2241057"},{"key":"ref21","first-page":"6382","article-title":"Multi-agent actor-critic for mixed cooperative-competitive environments","volume-title":"the 31st Conference on Neural Information Processing Systems","author":"Lowe"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-60990-0_12"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1016\/B978-1-55860-335-6.50027-1"},{"key":"ref24","first-page":"32 059","article-title":"A game-theoretic framework for managing risk in multi-agent systems","volume-title":"Proceedings of the 40th International Conference on Machine Learning","volume":"202","author":"Slumbers"},{"key":"ref25","article-title":"Alympics: Llm agents meet game theory\u2013exploring strategic decision-making with ai agents","author":"Mao","year":"2024"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/mnet.2024.3521887"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1162\/daed_a_01904"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1017\/S0269888900008122"},{"key":"ref29","article-title":"Agent ai: Surveying the horizons of multimodal interaction","author":"Durante","year":"2024"},{"key":"ref30","article-title":"Aflow: Automating agentic workflow generation","author":"Zhang","year":"2024"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v37i9.26313"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1109\/MUE.2007.158"},{"key":"ref33","article-title":"A survey on context-aware multi-agent systems: Techniques, challenges and future directions","author":"Du","year":"2024"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1016\/j.jclepro.2022.134047"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1007\/s10462-021-10097-x"},{"key":"ref36","article-title":"A multi-agent reinforcement learning model of reputation and cooperation in human groups","author":"McKee","year":"2021"},{"key":"ref37","article-title":"Learning multiagent communication with backpropagation","volume":"29","author":"Sukhbaatar","year":"2016","journal-title":"Advances in neural information processing systems"},{"key":"ref38","first-page":"6382","article-title":"Multi-agent actor-critic for mixed cooperative-competitive environments","volume-title":"Proceedings of the 31st International Conference on Neural Information Processing Systems","author":"Lowe"},{"key":"ref39","article-title":"Responsible emergent multi-agent behavior","author":"Grupen","year":"2023"},{"key":"ref40","article-title":"Safe, multi-agent, reinforcement learning for autonomous driving","author":"Shalev-Shwartz","year":"2016"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1109\/IJCNN48605.2020.9207663"},{"key":"ref42","doi-asserted-by":"crossref","DOI":"10.24963\/ijcai.2019\/832","article-title":"Failure-scenario maker for rule-based agent using multi-agent adversarial reinforcement learning and its application to autonomous driving","author":"Wachi","year":"2019"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.1109\/TVT.2022.3169907"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.1109\/TITS.2021.3054625"},{"key":"ref45","first-page":"7730","article-title":"Waymax: An accelerated, data-driven simulator for large-scale autonomous driving research","volume-title":"Advances in Neural Information Processing Systems","volume":"36","author":"Gulino","year":"2023"},{"key":"ref46","doi-asserted-by":"publisher","DOI":"10.1016\/j.rser.2021.110797"},{"key":"ref47","first-page":"18","article-title":"Machina-economicus or homo-complexicus: Artificial intelligence and the future of economics?","volume":"93","author":"Daneke","year":"2020","journal-title":"Real-World Economic Review"},{"key":"ref48","doi-asserted-by":"publisher","DOI":"10.1016\/j.apenergy.2025.125364"},{"key":"ref49","article-title":"Position: Beyond personhood: Agency, accountability, and the limits of anthropomorphic ethical analysis","volume-title":"Forty-first International Conference on Machine Learning","author":"Dai"}],"event":{"name":"2025 International Joint Conference on Neural Networks (IJCNN)","location":"Rome, Italy","start":{"date-parts":[[2025,6,30]]},"end":{"date-parts":[[2025,7,5]]}},"container-title":["2025 International Joint Conference on Neural Networks (IJCNN)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/11227166\/11227148\/11228849.pdf?arnumber=11228849","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,11,15]],"date-time":"2025-11-15T07:12:39Z","timestamp":1763190759000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11228849\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,6,30]]},"references-count":49,"URL":"https:\/\/doi.org\/10.1109\/ijcnn64981.2025.11228849","relation":{},"subject":[],"published":{"date-parts":[[2025,6,30]]}}}