{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,17]],"date-time":"2026-01-17T10:37:45Z","timestamp":1768646265000,"version":"3.49.0"},"reference-count":43,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"4","license":[{"start":{"date-parts":[[2025,8,1]],"date-time":"2025-08-01T00:00:00Z","timestamp":1754006400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2025,8,1]],"date-time":"2025-08-01T00:00:00Z","timestamp":1754006400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,8,1]],"date-time":"2025-08-01T00:00:00Z","timestamp":1754006400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62276097"],"award-info":[{"award-number":["62276097"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"name":"Key Program of the National Natural Science Foundation of China","award":["62136003"],"award-info":[{"award-number":["62136003"]}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Cogn. Dev. Syst."],"published-print":{"date-parts":[[2025,8]]},"DOI":"10.1109\/tcds.2025.3533744","type":"journal-article","created":{"date-parts":[[2025,1,27]],"date-time":"2025-01-27T13:48:21Z","timestamp":1737985701000},"page":"976-986","source":"Crossref","is-referenced-by-count":3,"title":["MAST: Multiagent Safe Transformer for Reinforcement Learning"],"prefix":"10.1109","volume":"17","author":[{"ORCID":"https:\/\/orcid.org\/0009-0004-6667-7222","authenticated-orcid":false,"given":"Suhang","family":"Wei","sequence":"first","affiliation":[{"name":"Department of Computer Science and Engineering, East China University of Science and Technology, Shanghai, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0006-7520-2522","authenticated-orcid":false,"given":"Xianwei","family":"Wang","sequence":"additional","affiliation":[{"name":"Department of Computer Science and Engineering, East China University of Science and Technology, Shanghai, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-6083-3440","authenticated-orcid":false,"given":"Xiang","family":"Feng","sequence":"additional","affiliation":[{"name":"Department of Computer Science and Engineering, East China University of Science and Technology, Shanghai, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-1899-1135","authenticated-orcid":false,"given":"Huiqun","family":"Yu","sequence":"additional","affiliation":[{"name":"Department of Computer Science and Engineering, East China University of Science and Technology, Shanghai, China"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1007\/s10489-022-04105-y"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1016\/j.apenergy.2022.120111"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/TITS.2022.3150151"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA46639.2022.9812263"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1007\/s43684-022-00023-5"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/TIV.2023.3318070"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1016\/j.seta.2023.103363"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1002\/widm.1487"},{"key":"ref9","first-page":"22","article-title":"Constrained policy optimization","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Achiam","year":"2017"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v36i8.20855"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/JAS.2023.123618"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/TAC.2015.2444131"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.23919\/CCC52363.2021.9549282"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/LCSYS.2021.3138546"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1146\/annurev-control-042920-020211"},{"key":"ref16","first-page":"1889","article-title":"Trust region policy optimization","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Schulman","year":"2015"},{"key":"ref17","article-title":"Projection-based constrained policy optimization","volume-title":"Proc. 8th Int. Conf. Learn. Represent (ICLR)","author":"Yang","year":"2020"},{"key":"ref18","first-page":"9111","article-title":"Constrained update projection approach to safe policy optimization","volume":"35","author":"Yang","year":"2022","journal-title":"Proc. Adv. Neural Inf. Process. Syst."},{"key":"ref19","first-page":"18964","article-title":"Safety gymnasium: A unified safe reinforcement learning benchmark","volume":"36","author":"Ji","year":"2023","journal-title":"Proc. Adv. Neural Inf. Process. Syst."},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1016\/j.ins.2022.07.111"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-86486-6_10"},{"key":"ref22","article-title":"Learning safe multi-agent control with decentralized neural barrier certificates","volume-title":"Proc. Int. Conf. Learn. Represent.","author":"Qin","year":"2021"},{"key":"ref23","first-page":"10757","article-title":"Multi-agent determinantal Q-learning","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Yang","year":"2020"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v32i1.11794"},{"key":"ref25","article-title":"Multi-agent actor-critic for mixed cooperative-competitive environments","volume":"30","author":"Lowe","year":"2017","journal-title":"Proc. Adv. Neural Inf. Process. Syst."},{"key":"ref26","first-page":"24611","article-title":"The surprising effectiveness of PPO in cooperative multi-agent games","volume":"35","author":"Yu","year":"2022","journal-title":"Proc. Adv. Neural Inf. Process. Syst."},{"key":"ref27","first-page":"1046","article-title":"Trust region policy optimisation in multi-agent reinforcement learning","volume-title":"Proc. 10th Int. Conf. Learn. Represent. (ICLR)","author":"Kuba","year":"2022"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1016\/j.artint.2023.103905"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v35i10.17062"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.48550\/ARXIV.1706.03762"},{"key":"ref31","first-page":"15084","article-title":"Decision transformer: Reinforcement learning via sequence modeling","volume":"34","author":"Chen","year":"2021","journal-title":"Proc. Adv. Neural Inf. Process. Syst."},{"key":"ref32","first-page":"18532","article-title":"Elastic decision transformer","volume":"36","author":"Wu","year":"2023","journal-title":"Proc. Adv. Neural Inf. Process. Syst."},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1613\/jair.1.12233"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.14569\/IJACSA.2023.01401103"},{"key":"ref35","first-page":"13458","article-title":"Settling the variance of multi-agent policy gradients","volume":"34","author":"Kuba","year":"2021","journal-title":"Proc. Adv. Neural Inf. Process. Syst."},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1017\/CBO9780511804441"},{"key":"ref37","first-page":"11809","article-title":"Tree of thoughts: Deliberate problem solving with large language models","volume":"36","author":"Yao","year":"2023","journal-title":"Proc. Adv. Neural Inf. Process. Syst."},{"key":"ref38","first-page":"27730","article-title":"Training language models to follow instructions with human feedback","volume":"35","author":"Ouyang","year":"2022","journal-title":"Proc. Adv. Neural Inf. Process. Syst."},{"key":"ref39","first-page":"8821","article-title":"Zero-shot text-to-image generation","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Ramesh","year":"2021"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1016\/j.icte.2023.07.001"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v35i12.17300"},{"key":"ref42","first-page":"12208","article-title":"FACMAC: Factored multi-agent centralised policy gradients","volume":"34","author":"Peng","year":"2021","journal-title":"Proc. Adv. Neural Inf. Process. Syst."},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.1109\/CBMS52027.2021.00056"}],"container-title":["IEEE Transactions on Cognitive and Developmental Systems"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/7274989\/11118941\/10854886.pdf?arnumber=10854886","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,12,16]],"date-time":"2025-12-16T18:33:03Z","timestamp":1765909983000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10854886\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,8]]},"references-count":43,"journal-issue":{"issue":"4"},"URL":"https:\/\/doi.org\/10.1109\/tcds.2025.3533744","relation":{},"ISSN":["2379-8920","2379-8939"],"issn-type":[{"value":"2379-8920","type":"print"},{"value":"2379-8939","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025,8]]}}}