{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,11]],"date-time":"2026-04-11T13:06:12Z","timestamp":1775912772546,"version":"3.50.1"},"reference-count":51,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"2","license":[{"start":{"date-parts":[[2025,3,1]],"date-time":"2025-03-01T00:00:00Z","timestamp":1740787200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2025,3,1]],"date-time":"2025-03-01T00:00:00Z","timestamp":1740787200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,3,1]],"date-time":"2025-03-01T00:00:00Z","timestamp":1740787200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Serv. Comput."],"published-print":{"date-parts":[[2025,3]]},"DOI":"10.1109\/tsc.2025.3528346","type":"journal-article","created":{"date-parts":[[2025,1,10]],"date-time":"2025-01-10T20:34:32Z","timestamp":1736541272000},"page":"1039-1053","source":"Crossref","is-referenced-by-count":22,"title":["TF-DDRL: A Transformer-Enhanced Distributed DRL Technique for Scheduling IoT Applications in Edge and Cloud Computing Environments"],"prefix":"10.1109","volume":"18","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-9521-5324","authenticated-orcid":false,"given":"Zhiyu","family":"Wang","sequence":"first","affiliation":[{"name":"Quantam Cloud Computing and Distributed Systems (QCLOUDS) Laboratory, School of Computing and Information Systems, The University of Melbourne, Parkville, VIC, Australia"}]},{"given":"Mohammad","family":"Goudarzi","sequence":"additional","affiliation":[{"name":"Faculty of Information Technology, Monash University, Clayton, VIC, Australia"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-9754-6496","authenticated-orcid":false,"given":"Rajkumar","family":"Buyya","sequence":"additional","affiliation":[{"name":"Quantam Cloud Computing and Distributed Systems (QCLOUDS) Laboratory, School of Computing and Information Systems, The University of Melbourne, Parkville, VIC, Australia"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1145\/3544836"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/TSC.2022.3155447"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1016\/j.future.2022.11.005"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/JIOT.2023.3247640"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/TII.2022.3158974"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1016\/j.future.2023.10.012"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1145\/3592598"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/JIOT.2023.3243266"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/TMC.2023.3328333"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2022.3207346"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/COMST.2021.3073036"},{"key":"ref12","first-page":"1928","article-title":"Asynchronous methods for deep reinforcement learning","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Mnih"},{"key":"ref13","first-page":"1407","article-title":"IMPALA: Scalable distributed deep-RL with importance weighted actor-learner architectures","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Espeholt"},{"key":"ref14","first-page":"7487","article-title":"Stabilizing transformers for reinforcement learning","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Parisotto"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.48550\/ARXIV.1706.03762"},{"key":"ref16","article-title":"Prioritized experience replay","author":"Schaul","year":"2015"},{"key":"ref17","article-title":"Distributed prioritized experience replay","author":"Horgan","year":"2018"},{"key":"ref18","first-page":"1928","article-title":"Asynchronous methods for deep reinforcement learning","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Mnih"},{"key":"ref19","first-page":"1995","article-title":"Dueling network architectures for deep reinforcement learning","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Wang"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v30i1.10295"},{"key":"ref21","first-page":"1861","article-title":"Soft actor-critic: Off-policy maximum entropy deep reinforcement learning with a stochastic actor","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Haarnoja"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/TSC.2022.3218044"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/TNET.2023.3263538"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1016\/j.dcan.2018.10.003"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/TMC.2023.3289611"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/TMC.2023.3270242"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/TMC.2023.3266226"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/INFOCOMWKSHPS54753.2022.9798187"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/TETC.2019.2902661"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1109\/JSAC.2020.2986615"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-67540-0_10"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1109\/TPDS.2023.3349177"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1186\/s13677-023-00465-z"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1109\/INFOCOMWKSHPS54753.2022.9798315"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/ICC45855.2022.9838831"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1109\/TITS.2023.3320861"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1016\/j.future.2022.07.024"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1109\/TSIPN.2022.3171336"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2023.3236361"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1109\/ICC.2015.7248815"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-05057-3_35"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1109\/TPDS.2020.3042599"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.1109\/TSC.2021.3133547"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.1109\/INFOCOM.2019.8737464"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.1109\/TMC.2022.3181308"},{"key":"ref46","doi-asserted-by":"publisher","DOI":"10.1109\/TSC.2023.3332308"},{"key":"ref47","doi-asserted-by":"publisher","DOI":"10.1134\/S1064562422060230"},{"key":"ref48","doi-asserted-by":"publisher","DOI":"10.1049\/pbpc027e_ch2"},{"key":"ref49","doi-asserted-by":"publisher","DOI":"10.1145\/3460866.3461768"},{"key":"ref50","doi-asserted-by":"publisher","DOI":"10.1016\/j.apenergy.2021.117901"},{"key":"ref51","first-page":"15084","article-title":"Decision transformer: Reinforcement learning via sequence modeling","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Chen"}],"container-title":["IEEE Transactions on Services Computing"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/4629386\/10964032\/10836729.pdf?arnumber=10836729","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,4,15]],"date-time":"2025-04-15T17:37:40Z","timestamp":1744738660000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10836729\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,3]]},"references-count":51,"journal-issue":{"issue":"2"},"URL":"https:\/\/doi.org\/10.1109\/tsc.2025.3528346","relation":{},"ISSN":["1939-1374","2372-0204"],"issn-type":[{"value":"1939-1374","type":"electronic"},{"value":"2372-0204","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025,3]]}}}