{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2024,9,27]],"date-time":"2024-09-27T04:15:30Z","timestamp":1727410530661},"reference-count":14,"publisher":"IEEE","license":[{"start":{"date-parts":[[2024,6,24]],"date-time":"2024-06-24T00:00:00Z","timestamp":1719187200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2024,6,24]],"date-time":"2024-06-24T00:00:00Z","timestamp":1719187200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China (NSFC)","doi-asserted-by":"publisher","award":["61831008,62027802,62301185"],"award-info":[{"award-number":["61831008,62027802,62301185"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100007162","name":"Guangdong Science and Technology Planning Project","doi-asserted-by":"publisher","award":["2021A1515110071"],"award-info":[{"award-number":["2021A1515110071"]}],"id":[{"id":"10.13039\/501100007162","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2024,6,24]]},"DOI":"10.1109\/vtc2024-spring62846.2024.10683035","type":"proceedings-article","created":{"date-parts":[[2024,9,25]],"date-time":"2024-09-25T17:28:12Z","timestamp":1727285292000},"page":"1-6","source":"Crossref","is-referenced-by-count":0,"title":["Multi-Scenario Task Scheduling Based on Heterogeneous-Agent Reinforcement Learning in Space-Air-Ground Integrated Network"],"prefix":"10.1109","author":[{"given":"Kexin","family":"Fan","sequence":"first","affiliation":[{"name":"Harbin Institute of Technology,Guangdong Provincial Key Laboratory of Aerospace Communication and Networking Technology,Shenzhen,China"}]},{"given":"Bowen","family":"Feng","sequence":"additional","affiliation":[{"name":"Harbin Institute of Technology,Guangdong Provincial Key Laboratory of Aerospace Communication and Networking Technology,Shenzhen,China"}]},{"given":"Junyi","family":"Yang","sequence":"additional","affiliation":[{"name":"Harbin Institute of Technology,Guangdong Provincial Key Laboratory of Aerospace Communication and Networking Technology,Shenzhen,China"}]},{"given":"Zhikai","family":"Zhang","sequence":"additional","affiliation":[{"name":"Harbin Institute of Technology,Guangdong Provincial Key Laboratory of Aerospace Communication and Networking Technology,Shenzhen,China"}]},{"given":"Qinyu","family":"Zhang","sequence":"additional","affiliation":[{"name":"Harbin Institute of Technology,Guangdong Provincial Key Laboratory of Aerospace Communication and Networking Technology,Shenzhen,China"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/COMST.2018.2841996"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/TWC.2022.3175472"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/JIOT.2021.3058236"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/MWC.001.00254"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/TGCN.2022.3186792"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/JIOT.2021.3071531"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/TNSE.2021.3130251"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/TWC.2022.3153316"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/TCOMM.2022.3186997"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/TSC.2022.3190562"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/JIOT.2022.3233667"},{"key":"ref12","article-title":"Trust region policy optimisation in multi-agent reinforcement learning","author":"Kuba","year":"2021","journal-title":"arXiv"},{"key":"ref13","article-title":"High-dimensional continuous control using generalized advantage estimation","author":"Schulman","year":"2015","journal-title":"arXiv"},{"key":"ref14","article-title":"Heterogeneous-agent reinforcement learning","author":"Zhong","year":"2023","journal-title":"arXiv"}],"event":{"name":"2024 IEEE 99th Vehicular Technology Conference (VTC2024-Spring)","start":{"date-parts":[[2024,6,24]]},"location":"Singapore, Singapore","end":{"date-parts":[[2024,6,27]]}},"container-title":["2024 IEEE 99th Vehicular Technology Conference (VTC2024-Spring)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/10682988\/10682819\/10683035.pdf?arnumber=10683035","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,9,26]],"date-time":"2024-09-26T05:59:29Z","timestamp":1727330369000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10683035\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,6,24]]},"references-count":14,"URL":"https:\/\/doi.org\/10.1109\/vtc2024-spring62846.2024.10683035","relation":{},"subject":[],"published":{"date-parts":[[2024,6,24]]}}}