{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,11]],"date-time":"2026-01-11T01:01:45Z","timestamp":1768093305798,"version":"3.49.0"},"reference-count":51,"publisher":"Springer Science and Business Media LLC","issue":"5","license":[{"start":{"date-parts":[[2025,8,6]],"date-time":"2025-08-06T00:00:00Z","timestamp":1754438400000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2025,8,6]],"date-time":"2025-08-06T00:00:00Z","timestamp":1754438400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["J Syst Sci Complex"],"published-print":{"date-parts":[[2025,10]]},"DOI":"10.1007\/s11424-025-4426-7","type":"journal-article","created":{"date-parts":[[2025,8,6]],"date-time":"2025-08-06T10:40:16Z","timestamp":1754476816000},"page":"1853-1886","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":1,"title":["Distributed Policy Gradient with Variance Reduction in Multi-Agent Reinforcement Learning"],"prefix":"10.1007","volume":"38","author":[{"given":"Xiaoxiao","family":"Zhao","sequence":"first","affiliation":[]},{"given":"Jinlong","family":"Lei","sequence":"additional","affiliation":[]},{"given":"Li","family":"Li","sequence":"additional","affiliation":[]},{"given":"Lucian","family":"Busoniu","sequence":"additional","affiliation":[]},{"given":"Jia","family":"Xu","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2025,8,6]]},"reference":[{"key":"4426_CR1","volume-title":"Reinforcement Learning: An Introduction","author":"R Sutton","year":"2018","unstructured":"Sutton R and Barto A, Reinforcement Learning: An Introduction, MIT Press, Cambridge, 2018."},{"key":"4426_CR2","volume-title":"Reinforcement Learning and Optimal Control","author":"D Bertsekas","year":"2019","unstructured":"Bertsekas D, Reinforcement Learning and Optimal Control, Athena Scientific, Nashua, NH, 2019."},{"issue":"6","key":"4426_CR3","doi-asserted-by":"publisher","first-page":"2325","DOI":"10.1007\/s11424-023-2337-z","volume":"36","author":"S Duan","year":"2023","unstructured":"Duan S, Yu Z, Jiang H, et al., Fixed-time cluster consensus for multi-agent systems with objective optimization on directed networks, Journal of Systems Science & Complexity, 2023, 36(6): 2325\u20132343.","journal-title":"Journal of Systems Science & Complexity"},{"issue":"4","key":"4426_CR4","doi-asserted-by":"publisher","first-page":"1470","DOI":"10.1007\/s11424-024-2149-9","volume":"37","author":"H Hu","year":"2024","unstructured":"Hu H, Mo L, and Cao X, Distributed heterogeneous multi-agent optimization with stochastic sub-gradient, Journal of Systems Science & Complexity, 2024, 37(4): 1470\u20131487.","journal-title":"Journal of Systems Science & Complexity"},{"issue":"2","key":"4426_CR5","doi-asserted-by":"publisher","first-page":"156","DOI":"10.1109\/TSMCC.2007.913919","volume":"38","author":"L Busoniu","year":"2008","unstructured":"Busoniu L, Babuska R, and De Schutter B, A comprehensive survey of multiagent reinforcement learning, IEEE Transactions on Systems, Man & Cybernetics: Part C, 2008, 38(2): 156\u2013172.","journal-title":"IEEE Transactions on Systems, Man & Cybernetics: Part C"},{"key":"4426_CR6","doi-asserted-by":"publisher","first-page":"1774","DOI":"10.1145\/3219819.3219993","volume-title":"Proceedings of the 24th ACM SIGKDD International Conference on Knowledge Discovery & Data Mining (KDD)","author":"K Lin","year":"2018","unstructured":"Lin K, Zhao R, Xu Z, et al., Efficient large-scale fleet management via multi-agent deep reinforcement learning, Proceedings of the 24th ACM SIGKDD International Conference on Knowledge Discovery & Data Mining (KDD), 2018, 1774\u20131783."},{"issue":"4","key":"4426_CR7","doi-asserted-by":"publisher","first-page":"7011","DOI":"10.1109\/JIOT.2019.2913162","volume":"6","author":"J Chen","year":"2019","unstructured":"Chen J, Chen S, Wang Q, et al., iRAF: A deep reinforcement learning approach for collaborative mobile edge computing IoT networks, IEEE Internet of Things Journal, 2019, 6(4): 7011\u20137024.","journal-title":"IEEE Internet of Things Journal"},{"issue":"6","key":"4426_CR8","doi-asserted-by":"publisher","first-page":"743","DOI":"10.1016\/j.isatra.2012.06.010","volume":"51","author":"F Li","year":"2012","unstructured":"Li F, Wu M, He Y, et al., Optimal control in microgrid using multi-agent reinforcement learning, ISA Transactions, 2012, 51(6): 743\u2013751.","journal-title":"ISA Transactions"},{"issue":"7","key":"4426_CR9","doi-asserted-by":"publisher","first-page":"6943","DOI":"10.1109\/TITS.2023.3254147","volume":"24","author":"J Xin","year":"2023","unstructured":"Xin J, Wu X, D\u2019Ariano A, et al. Model predictive path planning of AGVs: Mixed logical dynamical formulation and distributed coordination, IEEE Transactions on Intelligent Transportation Systems, 2023, 24(7): 6943\u20136954.","journal-title":"IEEE Transactions on Intelligent Transportation Systems"},{"key":"4426_CR10","doi-asserted-by":"publisher","first-page":"321","DOI":"10.1007\/978-3-030-60990-0_12","volume-title":"Handbook of Reinforcement Learning & Control","author":"K Zhang","year":"2021","unstructured":"Zhang K, Yang Z, and Basar T, Multi-agent reinforcement learning: A selective overview of theories and algorithms, Handbook of Reinforcement Learning & Control, 2021, 321\u2013384."},{"key":"4426_CR11","first-page":"1057","volume-title":"Proceedings of the 13th Annual Conference on Neural Information Processing Systems (NIPS)","author":"R Sutton","year":"1999","unstructured":"Sutton R, McAllester D, Singh S, et al., Policy gradient methods for reinforcement learning with function approximation, Proceedings of the 13th Annual Conference on Neural Information Processing Systems (NIPS), 1999, 1057\u20131063."},{"issue":"3\u20134","key":"4426_CR12","doi-asserted-by":"publisher","first-page":"229","DOI":"10.1023\/A:1022672621406","volume":"8","author":"R Williams","year":"1992","unstructured":"Williams R, Simple statistical gradient-following algorithms for connectionist reinforcement learning, Machine Learning, 1992, 8(3\u20134): 229\u2013256.","journal-title":"Machine Learning"},{"key":"4426_CR13","doi-asserted-by":"publisher","first-page":"319","DOI":"10.1613\/jair.806","volume":"15","author":"J Baxter","year":"2001","unstructured":"Baxter J and Bartlett P, Infinite-horizon policy-gradient estimation, Journal of Artificial Intelligence Research, 2001, 15: 319\u2013350.","journal-title":"Journal of Artificial Intelligence Research"},{"key":"4426_CR14","first-page":"157","volume-title":"Proceedings of the 11th International Conference on Machine Learning (ICML)","author":"M Littman","year":"1994","unstructured":"Littman M, Markov games as a framework for multi-agent reinforcement learning, Proceedings of the 11th International Conference on Machine Learning (ICML), 1994, 157\u2013163."},{"key":"4426_CR15","first-page":"1039","volume":"4","author":"J Hu","year":"2003","unstructured":"Hu J and Wellman M, Nash Q-learning for general-sum stochastic games, Journal of Machine Learning Research, 2003, 4: 1039\u20131069.","journal-title":"Journal of Machine Learning Research"},{"key":"4426_CR16","first-page":"242","volume-title":"Proceedings of the 20th International Conference on Machine Learning (ICML)","author":"A Greenwald","year":"2003","unstructured":"Greenwald A, Hall K, Serrano R, et al., Correlated Q-learning, Proceedings of the 20th International Conference on Machine Learning (ICML), 2003, 242\u2013249."},{"issue":"6","key":"4426_CR17","doi-asserted-by":"publisher","first-page":"750","DOI":"10.1007\/s10458-019-09421-1","volume":"33","author":"P Hernandez-Leal","year":"2019","unstructured":"Hernandez-Leal P, Kartal B, and Taylor M, A survey and critique of multiagent deep reinforcement learning, Autonomous Agents & Multi-Agent Systems, 2019, 33(6): 750\u2013797.","journal-title":"Autonomous Agents & Multi-Agent Systems"},{"issue":"9","key":"4426_CR18","doi-asserted-by":"publisher","first-page":"3826","DOI":"10.1109\/TCYB.2020.2977374","volume":"50","author":"T Nguyen","year":"2020","unstructured":"Nguyen T, Nguyen N, and Nahavandi S, Deep reinforcement learning for multi-agent systems: A review of challenges, solutions, and applications, IEEE Transactions on Cybernetics, 2020, 50(9): 3826\u20133839.","journal-title":"IEEE Transactions on Cybernetics"},{"key":"4426_CR19","first-page":"2137","volume-title":"Proceedings of the 30th Annual Conference on Neural Information Processing Systems (NIPS)","author":"J Foerster","year":"2016","unstructured":"Foerster J, Assael I, De Freitas N, et al., Learning to communicate with deep multi-agent reinforcement learning, Proceedings of the 30th Annual Conference on Neural Information Processing Systems (NIPS), 2016, 2137\u20132145."},{"key":"4426_CR20","first-page":"6379","volume-title":"Proceedings of the 31st Annual Conference on Neural Information Processing Systems (NIPS)","author":"R Lowe","year":"2017","unstructured":"Lowe R, Wu Y, Tamar A, et al., Multi-agent actor-critic for mixed cooperative-competitive environments, Proceedings of the 31st Annual Conference on Neural Information Processing Systems (NIPS), 2017: 6379\u20136390."},{"key":"4426_CR21","first-page":"1942","volume-title":"Proceedings of the 36th International Conference on Machine Learning (ICML)","author":"J Foerster","year":"2019","unstructured":"Foerster J, Song F, Hughes E, et al., Bayesian action decoder for deep multi-agent reinforcement learning, Proceedings of the 36th International Conference on Machine Learning (ICML), 2019, 1942\u20131951."},{"issue":"7782","key":"4426_CR22","doi-asserted-by":"publisher","first-page":"350","DOI":"10.1038\/s41586-019-1724-z","volume":"575","author":"O Vinyals","year":"2019","unstructured":"Vinyals O, Babuschkin I, Czarnecki W, et al., Grandmaster level in StarCraft II using multi-agent reinforcement learning, Nature, 2019, 575(7782): 350\u2013354.","journal-title":"Nature"},{"issue":"3","key":"4426_CR23","doi-asserted-by":"publisher","first-page":"123","DOI":"10.1109\/MSP.2020.2976000","volume":"37","author":"D Lee","year":"2020","unstructured":"Lee D, He N, Kamalaruban P, et al., Optimization for reinforcement learning: From a single agent to cooperative agents, IEEE Signal Processing Magazine, 2020, 37(3): 123\u2013135.","journal-title":"IEEE Signal Processing Magazine"},{"issue":"5","key":"4426_CR24","doi-asserted-by":"publisher","first-page":"1260","DOI":"10.1109\/TAC.2014.2368731","volume":"60","author":"S Macua","year":"2014","unstructured":"Macua S, Chen J, Zazo S, et al., Distributed policy evaluation under multiple behavior strategies, IEEE Transactions on Automatic Control, 2014, 60(5): 1260\u20131274.","journal-title":"IEEE Transactions on Automatic Control"},{"key":"4426_CR25","first-page":"1967","volume-title":"Proceedings of the 57th IEEE Conference on Decision & Control (CDC)","author":"D Lee","year":"2018","unstructured":"Lee D, Yoon H, and Hovakimyan N, Primal-dual algorithm for distributed reinforcement learning: Distributed GTD, Proceedings of the 57th IEEE Conference on Decision & Control (CDC), 2018, 1967\u20131972."},{"key":"4426_CR26","first-page":"9649","volume-title":"Proceedings of the 32nd Annual Conference on Neural Information Processing Systems (NeurIPS)","author":"H Wai","year":"2018","unstructured":"Wai H, Yang Z, Wang Z, et al., Multi-agent reinforcement learning via double averaging primal-dual optimization, Proceedings of the 32nd Annual Conference on Neural Information Processing Systems (NeurIPS), 2018, 9649\u20139660."},{"key":"4426_CR27","first-page":"1626","volume-title":"Proceedings of the 36th International Conference on Machine Learning (ICML)","author":"T Doan","year":"2019","unstructured":"Doan T, Maguluri S, and Romberg J, Finite-time analysis of distributed TD (0) with linear function approximation on multi-agent reinforcement learning, Proceedings of the 36th International Conference on Machine Learning (ICML), 2019, 1626\u20131635."},{"issue":"4","key":"4426_CR28","doi-asserted-by":"publisher","first-page":"1497","DOI":"10.1109\/TAC.2020.2995814","volume":"66","author":"L Cassano","year":"2021","unstructured":"Cassano L, Yuan K, and Sayed A, Multiagent fully decentralized value function learning with linear convergence rates, IEEE Transactions on Automatic Control, 2021, 66(4): 1497\u20131512.","journal-title":"IEEE Transactions on Automatic Control"},{"key":"4426_CR29","first-page":"13762","volume-title":"Proceedings of the 37th International Conference on Machine Learning (ICML)","author":"G Wang","year":"2020","unstructured":"Wang G, Lu S, Giannakis, et al., Decentralized TD tracking with linear function approximation and its finite-time analysis, Proceedings of the 37th International Conference on Machine Learning (ICML), 2020, 13762\u201313772."},{"key":"4426_CR30","first-page":"1008","volume-title":"Proceedings of the 13th Annual Conference on Neural Information Processing Systems (NIPS)","author":"V Konda","year":"1999","unstructured":"Konda V and Tsitsiklis J, Actor-critic algorithms, Proceedings of the 13th Annual Conference on Neural Information Processing Systems (NIPS), 1999, 1008\u20131014."},{"issue":"11","key":"4426_CR31","doi-asserted-by":"publisher","first-page":"2471","DOI":"10.1016\/j.automatica.2009.07.008","volume":"45","author":"S Bhatnagar","year":"2009","unstructured":"Bhatnagar S, Sutton R, Ghavamzadeh M, et al., Natural actor-critic algorithms, Automatica, 2009, 45(11): 2471\u20132482.","journal-title":"Automatica"},{"issue":"1\u20132","key":"4426_CR32","doi-asserted-by":"publisher","first-page":"83","DOI":"10.1007\/s10107-016-1030-6","volume":"162","author":"M Schmidt","year":"2017","unstructured":"Schmidt M, Le Roux N, and Bach F, Minimizing finite sums with the stochastic average gradient, Mathematical Programming, 2017, 162(1\u20132): 83\u2013112.","journal-title":"Mathematical Programming"},{"key":"4426_CR33","first-page":"1646","volume-title":"Proceedings of the 28th Annual Conference on Neural Information Processing Systems (NIPS)","author":"A Defazio","year":"2014","unstructured":"Defazio A, Bach F, and Lacoste-Julien S, SAGA: A fast incremental gradient method with support for non-strongly convex composite objectives, Proceedings of the 28th Annual Conference on Neural Information Processing Systems (NIPS), 2014, 1646\u20131654."},{"key":"4426_CR34","first-page":"315","volume-title":"Proceedings of the 27th Annual Conference on Neural Information Processing Systems (NIPS)","author":"R Johnson","year":"2013","unstructured":"Johnson R and Zhang T, Accelerating stochastic gradient descent using predictive variance reduction, Proceedings of the 27th Annual Conference on Neural Information Processing Systems (NIPS), 2013, 315\u2013323."},{"key":"4426_CR35","first-page":"2613","volume-title":"Proceedings of the 34th International Conference on Machine Learning (ICML)","author":"L Nguyen","year":"2017","unstructured":"Nguyen L, Liu J, Scheinberg K, et al., SARAH: A novel method for machine learning problems using stochastic recursive gradient, Proceedings of the 34th International Conference on Machine Learning (ICML), 2017, 2613\u20132621."},{"key":"4426_CR36","first-page":"699","volume-title":"Proceedings of the 33rd International Conference on Machine Learning (ICML)","author":"Z Allen-Zhu","year":"2016","unstructured":"Allen-Zhu Z and Hazan E, Variance reduction for faster non-convex optimization, Proceedings of the 33rd International Conference on Machine Learning (ICML), 2016, 699\u2013707."},{"key":"4426_CR37","first-page":"314","volume-title":"Proceedings of the 33rd International Conference on Machine Learning (ICML)","author":"S Reddi","year":"2016","unstructured":"Reddi S, Hefny A, Sra S, et al., Stochastic variance reduction for nonconvex optimization, Proceedings of the 33rd International Conference on Machine Learning (ICML), 2016, 314\u2013323."},{"key":"4426_CR38","first-page":"1394","volume-title":"Proceedings of the 32nd Annual Conference on Learning Theory (COLT)","author":"R Ge","year":"2019","unstructured":"Ge R, Li Z, Wang W, et al., Stabilized SVRG: Simple variance reduction for nonconvex optimization, Proceedings of the 32nd Annual Conference on Learning Theory (COLT), 2019, 1394\u20131448."},{"key":"4426_CR39","first-page":"4026","volume-title":"Proceedings of the 35th International Conference on Machine Learning (ICML)","author":"M Papini","year":"2018","unstructured":"Papini M, Binaghi D, Canonaco G, et al., Stochastic variance-reduced policy gradient, Proceedings of the 35th International Conference on Machine Learning (ICML), 2018, 4026\u20134035."},{"key":"4426_CR40","first-page":"541","volume-title":"Proceedings of the Conference on Uncertainty in Artificial Intelligence (UAI)","author":"P Xu","year":"2020","unstructured":"Xu P, Gao F, and Gu Q, An improved convergence analysis of stochastic variance-reduced policy gradient, Proceedings of the Conference on Uncertainty in Artificial Intelligence (UAI), 2020, 541\u2013551."},{"key":"4426_CR41","first-page":"1","volume-title":"Proceedings of the International Conference on Learning Representations (ICLR)","author":"P Xu","year":"2020","unstructured":"Xu P, Gao F, and Gu Q, Sample efficient policy gradient methods with recursive variance reduction, Proceedings of the International Conference on Learning Representations (ICLR), 2020, 1\u201322."},{"issue":"61","key":"4426_CR42","first-page":"1","volume":"17","author":"A Mokhtari","year":"2016","unstructured":"Mokhtari A and Ribeiro A, DSA: Decentralized double stochastic averaging gradient algorithm, Journal of Machine Learning Research, 2016, 17(61): 1\u201335.","journal-title":"Journal of Machine Learning Research"},{"issue":"2","key":"4426_CR43","doi-asserted-by":"publisher","first-page":"944","DOI":"10.1137\/14096668X","volume":"25","author":"W Shi","year":"2015","unstructured":"Shi W, Ling Q, Wu G, et al., EXTRA: An exact first-order algorithm for decentralized consensus optimization, SIAM Journal on Optimization, 2015, 25(2): 944\u2013966.","journal-title":"SIAM Journal on Optimization"},{"issue":"2","key":"4426_CR44","doi-asserted-by":"publisher","first-page":"351","DOI":"10.1109\/TSP.2018.2872003","volume":"67","author":"K Yuan","year":"2019","unstructured":"Yuan K, Ying B, Liu J, et al., Variance-reduced stochastic learning by networked agents under random reshuffling, IEEE Transactions on Signal Processing, 2019, 67(2): 351\u2013366.","journal-title":"IEEE Transactions on Signal Processing"},{"issue":"3","key":"4426_CR45","doi-asserted-by":"publisher","first-page":"102","DOI":"10.1109\/MSP.2020.2974267","volume":"37","author":"R Xin","year":"2020","unstructured":"Xin R, Kar S, and Khan U, Decentralized stochastic optimization and machine learning: A unified variance-reduction framework for robust performance and fast convergence, IEEE Signal Processing Magazine, 2020, 37(3): 102\u2013113.","journal-title":"IEEE Signal Processing Magazine"},{"issue":"3","key":"4426_CR46","doi-asserted-by":"publisher","first-page":"1245","DOI":"10.1109\/TCNS.2017.2698261","volume":"5","author":"G Qu","year":"2017","unstructured":"Qu G and Li N, Harnessing smoothness to accelerate distributed optimization, IEEE Transactions on Control Network Systems, 2017, 5(3): 1245\u20131260.","journal-title":"IEEE Transactions on Control Network Systems"},{"issue":"1","key":"4426_CR47","doi-asserted-by":"publisher","first-page":"409","DOI":"10.1007\/s10107-020-01487-0","volume":"187","author":"S Pu","year":"2021","unstructured":"Pu S and Nedi\u0107 A, Distributed stochastic gradient tracking methods, Mathematical Programming, 2021, 187(1): 409\u2013457.","journal-title":"Mathematical Programming"},{"issue":"5","key":"4426_CR48","doi-asserted-by":"publisher","first-page":"1927","DOI":"10.1007\/s11424-021-1231-9","volume":"34","author":"X Ma","year":"2021","unstructured":"Ma X, Yi P, and Chen J, Distributed gradient tracking methods with finite data rates, Journal of Systems Science & Complexity, 2021, 34(5): 1927\u20131952.","journal-title":"Journal of Systems Science & Complexity"},{"key":"4426_CR49","doi-asserted-by":"publisher","first-page":"6255","DOI":"10.1109\/TSP.2020.3031071","volume":"68","author":"R Xin","year":"2020","unstructured":"Xin R, Khan U, and Kar S, Variance-reduced decentralized stochastic optimization with accelerated convergence, IEEE Transactions on Signal Processing, 2020, 68: 6255\u20136271.","journal-title":"IEEE Transactions on Signal Processing"},{"key":"4426_CR50","unstructured":"Zhang J and You K, Decentralized stochastic gradient tracking for non-convex empirical risk minimization, 2019, ArXiv: 1909.02712."},{"key":"4426_CR51","doi-asserted-by":"publisher","first-page":"1842","DOI":"10.1109\/TSP.2021.3062553","volume":"69","author":"R Xin","year":"2021","unstructured":"Xin R, Khan U, and Kar S, An improved convergence analysis for decentralized online stochastic non-convex optimization, IEEE Transactions on Signal Processing, 2021, 69: 1842\u20131858.","journal-title":"IEEE Transactions on Signal Processing"}],"container-title":["Journal of Systems Science and Complexity"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11424-025-4426-7.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s11424-025-4426-7\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11424-025-4426-7.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,9,8]],"date-time":"2025-09-08T15:22:38Z","timestamp":1757344958000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s11424-025-4426-7"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,8,6]]},"references-count":51,"journal-issue":{"issue":"5","published-print":{"date-parts":[[2025,10]]}},"alternative-id":["4426"],"URL":"https:\/\/doi.org\/10.1007\/s11424-025-4426-7","relation":{},"ISSN":["1009-6124","1559-7067"],"issn-type":[{"value":"1009-6124","type":"print"},{"value":"1559-7067","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025,8,6]]},"assertion":[{"value":"3 September 2024","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"24 October 2024","order":2,"name":"revised","label":"Revised","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"6 August 2025","order":3,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"The authors declare no conflict of interest.","order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of Interest"}}]}}