{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,11,2]],"date-time":"2025-11-02T16:58:22Z","timestamp":1762102702534,"version":"3.37.3"},"reference-count":34,"publisher":"IEEE","license":[{"start":{"date-parts":[[2021,12,14]],"date-time":"2021-12-14T00:00:00Z","timestamp":1639440000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2021,12,14]],"date-time":"2021-12-14T00:00:00Z","timestamp":1639440000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2021,12,14]],"date-time":"2021-12-14T00:00:00Z","timestamp":1639440000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100016047","name":"Science Fund of the Republic of Serbia","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100016047","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001871","name":"Funda\u00e7\u00e3o para a Ci\u00eancia e a Tecnologia","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100001871","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2021,12,14]]},"DOI":"10.1109\/cdc45484.2021.9683607","type":"proceedings-article","created":{"date-parts":[[2022,2,1]],"date-time":"2022-02-01T20:50:18Z","timestamp":1643748618000},"page":"5976-5981","source":"Crossref","is-referenced-by-count":4,"title":["Distributed Consensus-Based Multi-Agent Off-Policy Temporal-Difference Learning"],"prefix":"10.1109","author":[{"given":"Milos S.","family":"Stankovic","sequence":"first","affiliation":[]},{"given":"Marko","family":"Beko","sequence":"additional","affiliation":[]},{"given":"Srdjan S.","family":"Stankovic","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"journal-title":"Stochastic Approximation and Recursive Algorithms and Applications","year":"2003","author":"kushner","key":"ref33"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1109\/TAC.2010.2076530"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.2174\/156720181901220120094538"},{"key":"ref30","first-page":"1","article-title":"Weak convergence properties of constrained emphatic temporal-difference learning with constant and slowly diminishing stepsize","volume":"17","author":"yu","year":"2016","journal-title":"Journal of Machine Learning Research"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2013.6638519"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/TAC.2016.2545098"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1016\/j.sigpro.2012.01.007"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/TCNS.2016.2633788"},{"journal-title":"Reinforcement Learning An Introduction","year":"2017","author":"sutton","key":"ref13"},{"journal-title":"Neuro-Dynamic Programming","year":"1996","author":"bertsekas","key":"ref14"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1145\/1553374.1553501"},{"key":"ref16","first-page":"289","article-title":"Off-policy learning with eligibility traces: A survey","volume":"15","author":"geist","year":"2014","journal-title":"Journal of Machine Learning Research"},{"journal-title":"On convergence of some gradient-based temporal-differences algorithms for off-policy learning","year":"2017","author":"yu","key":"ref17"},{"key":"ref18","first-page":"1125","article-title":"SBEED: Convergent reinforcement learning with nonlinear function approximation","volume":"80","author":"dai","year":"2018","journal-title":"Proceedings of the 35th International Conference on Machine Learning"},{"key":"ref19","first-page":"321","author":"zhang","year":"2021","journal-title":"Multi-agent reinforcement learning A selective overview of theories and algorithms"},{"key":"ref28","first-page":"1","article-title":"An emphatic approach to the problem of off-policy temporal-difference learning","volume":"17","author":"sutton","year":"2016","journal-title":"Journal of Machine Learning Research"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/TAC.1986.1104412"},{"key":"ref27","first-page":"1626","article-title":"Finite-time analysis of distributed TD(0) with linear function approximation on multi-agent reinforcement learning","author":"doan","year":"2019","journal-title":"Proc Int Conf Machine Learning"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1016\/j.automatica.2015.07.018"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1016\/j.automatica.2018.03.054"},{"key":"ref29","first-page":"1724","article-title":"On convergence of emphatic temporal-difference learning","volume":"40","author":"yu","year":"2015","journal-title":"Proc 28th Conference on Learning Theory"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1137\/0325070"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/TIT.2013.2275131"},{"key":"ref7","doi-asserted-by":"crossref","first-page":"601","DOI":"10.1109\/TAC.2014.2364096","article-title":"Distributed optimization over time-varying directed graphs","volume":"60","author":"nedi?","year":"2015","journal-title":"IEEE Trans Autom Control"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/TCNS.2021.3061909"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/TSP.2012.2217338"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/TAC.2008.2009583"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/TAC.2016.2585302"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/TAC.2014.2368731"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/TSP.2013.2241057"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/CDC40024.2019.9029969"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/CDC.2018.8619839"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/ACC.2016.7524910"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.23919\/ECC.2019.8795670"}],"event":{"name":"2021 60th IEEE Conference on Decision and Control (CDC)","start":{"date-parts":[[2021,12,14]]},"location":"Austin, TX, USA","end":{"date-parts":[[2021,12,17]]}},"container-title":["2021 60th IEEE Conference on Decision and Control (CDC)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9682670\/9682776\/09683607.pdf?arnumber=9683607","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,5,10]],"date-time":"2022-05-10T16:58:11Z","timestamp":1652201891000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9683607\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2021,12,14]]},"references-count":34,"URL":"https:\/\/doi.org\/10.1109\/cdc45484.2021.9683607","relation":{},"subject":[],"published":{"date-parts":[[2021,12,14]]}}}