{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,7,16]],"date-time":"2025-07-16T12:51:57Z","timestamp":1752670317996},"reference-count":53,"publisher":"Springer Science and Business Media LLC","issue":"7","license":[{"start":{"date-parts":[[2022,7,29]],"date-time":"2022-07-29T00:00:00Z","timestamp":1659052800000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2022,7,29]],"date-time":"2022-07-29T00:00:00Z","timestamp":1659052800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["61903022"],"award-info":[{"award-number":["61903022"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100012237","name":"Beijing Advanced Innovation Center for Intelligent Robots and Systems, Beijing Institute of Technology","doi-asserted-by":"publisher","award":["2019IRS11"],"award-info":[{"award-number":["2019IRS11"]}],"id":[{"id":"10.13039\/501100012237","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Appl Intell"],"published-print":{"date-parts":[[2023,4]]},"DOI":"10.1007\/s10489-022-03821-9","type":"journal-article","created":{"date-parts":[[2022,7,29]],"date-time":"2022-07-29T08:08:03Z","timestamp":1659082083000},"page":"7876-7891","update-policy":"http:\/\/dx.doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":5,"title":["Switching-aware multi-agent deep reinforcement learning for target interception"],"prefix":"10.1007","volume":"53","author":[{"given":"Dongyu","family":"Fan","sequence":"first","affiliation":[]},{"given":"Haikuo","family":"Shen","sequence":"additional","affiliation":[]},{"given":"Lijing","family":"Dong","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2022,7,29]]},"reference":[{"issue":"9","key":"3821_CR1","doi-asserted-by":"publisher","first-page":"3826","DOI":"10.1109\/TCYB.2020.2977374","volume":"50","author":"TT Nguyen","year":"2020","unstructured":"Nguyen TT, Nguyen ND, Nahavandi S (2020) Deep reinforcement learning for multiagent systems: A review of challenges, solutions, and applications. IEEE Trans Cybern 50(9):3826\u20133839","journal-title":"IEEE Trans Cybern"},{"key":"3821_CR2","doi-asserted-by":"publisher","DOI":"10.1201\/9780429289613","volume-title":"Multiagent systems: Introduction and coordination control","author":"MS Mahmoud","year":"2020","unstructured":"Mahmoud M S (2020) Multiagent systems: Introduction and coordination control. CRC Press, Boca Raton, FL, USA"},{"issue":"4","key":"3821_CR3","doi-asserted-by":"publisher","first-page":"7461","DOI":"10.1109\/LRA.2021.3097660","volume":"6","author":"G Ji","year":"2021","unstructured":"Ji G, Yan J, Du J, Yan W, Chen J, Lu Y, Rojas J, Cheng SS (2021) Towards safe control of continuum manipulator using shielded multiagent reinforcement learning. IEEE Robot Autom Lett 6(4):7461\u20137468","journal-title":"IEEE Robot Autom Lett"},{"key":"3821_CR4","doi-asserted-by":"publisher","first-page":"231","DOI":"10.1007\/s13042-020-01167-7","volume":"12","author":"A Perrusqu\u2019ia","year":"2021","unstructured":"Perrusqu\u2019ia A, Yu W, Li X (2021) Multi-agent reinforcement learning for redundant robot control in task-space. Int J Mach Learn Cybern 12:231\u2013241","journal-title":"Int J Mach Learn Cybern"},{"issue":"2","key":"3821_CR5","doi-asserted-by":"publisher","first-page":"598","DOI":"10.1007\/s12555-014-0349-0","volume":"14","author":"H Kim","year":"2016","unstructured":"Kim H, Kim D, Kim H, Shin JU, Myung H (2016) An extended any-angle path planning algorithm for maintaining formation of multi-agent jellyfish elimination robot system. Int J Control Autom Syst 14(2):598\u2013607","journal-title":"Int J Control Autom Syst"},{"key":"3821_CR6","doi-asserted-by":"publisher","first-page":"285","DOI":"10.1016\/j.neucom.2021.09.044","volume":"466","author":"W Zhou","year":"2021","unstructured":"Zhou W, Liu Z, Li J, Xu X, Shen L (2021) Multi-target tracking for unmanned aerial vehicle swarms using deep reinforcement learning. Neurocomputing 466:285\u2013297","journal-title":"Neurocomputing"},{"issue":"2","key":"3821_CR7","doi-asserted-by":"publisher","first-page":"2365","DOI":"10.1109\/LRA.2020.2972889","volume":"5","author":"J Kim","year":"2020","unstructured":"Kim J (2020) Cooperative localization and unknown currents estimation using multiple autonomous underwater vehicles. IEEE Robot Autom Lett 5(2):2365\u20132371","journal-title":"IEEE Robot Autom Lett"},{"issue":"11","key":"3821_CR8","doi-asserted-by":"publisher","first-page":"13702","DOI":"10.1109\/TVT.2020.3023733","volume":"69","author":"Y-J Chen","year":"2020","unstructured":"Chen Y-J, Chang D-K, Zhang C (2020) Autonomous tracking using a swarm of uavs: A constrained multi-agent reinforcement learning approach. IEEE Trans Veh Technol 69(11):13702\u201313717","journal-title":"IEEE Trans Veh Technol"},{"issue":"5","key":"3821_CR9","doi-asserted-by":"publisher","first-page":"3307","DOI":"10.1109\/TAES.2021.3074201","volume":"57","author":"Y Shi","year":"2021","unstructured":"Shi Y, Hu Q (2021) Observer-based spacecraft formation coordinated control via a unified event-triggered communication. IEEE Trans Aerosp Electron Syst 57(5):3307\u20133319","journal-title":"IEEE Trans Aerosp Electron Syst"},{"key":"3821_CR10","doi-asserted-by":"publisher","first-page":"109552","DOI":"10.1016\/j.automatica.2021.109552","volume":"128","author":"Y Dong","year":"2021","unstructured":"Dong Y, Chen J (2021) Nonlinear observer-based approach for cooperative control of networked rigid spacecraft systems. Automatica 128:109552","journal-title":"Automatica"},{"key":"3821_CR11","doi-asserted-by":"publisher","first-page":"197","DOI":"10.1016\/j.ast.2017.12.004","volume":"73","author":"C Zhang","year":"2018","unstructured":"Zhang C, Wang J, Zhang D, Shao X (2018) Fault-tolerant adaptive finite-time attitude synchronization and tracking control for multi-spacecraft formation. Aerosp Sci Technol 73:197\u2013209","journal-title":"Aerosp Sci Technol"},{"issue":"9","key":"3821_CR12","doi-asserted-by":"publisher","first-page":"3220","DOI":"10.1109\/TSMC.2018.2833098","volume":"50","author":"P Duan","year":"2020","unstructured":"Duan P, Liu K, Huang N, Duan Z (2020) Event-based distributed tracking control for second-order multiagent systems with switching networks. IEEE Trans Syst Man Cybern Syst 50(9):3220\u20133230","journal-title":"IEEE Trans Syst Man Cybern Syst"},{"issue":"3","key":"3821_CR13","doi-asserted-by":"publisher","first-page":"519","DOI":"10.1080\/00207179.2018.1479539","volume":"93","author":"L Dong","year":"2020","unstructured":"Dong L, Yu D, Yan H (2020) Stability analysis of nonlinear multi-agent relay tracking systems over a finite time interval. Int J Control 93(3):519\u2013527","journal-title":"Int J Control"},{"issue":"5","key":"3821_CR14","doi-asserted-by":"publisher","first-page":"1820","DOI":"10.1109\/TCYB.2019.2908874","volume":"50","author":"Y-W Wang","year":"2020","unstructured":"Wang Y-W, Lei Y, Bian T, Guan Z-H (2020) Distributed control of nonlinear multiagent systems with unknown and nonidentical control directions via event-triggered communication. IEEE Trans Cybern 50(5):1820\u20131832","journal-title":"IEEE Trans Cybern"},{"issue":"4","key":"3821_CR15","doi-asserted-by":"publisher","first-page":"1646","DOI":"10.1109\/TCSI.2021.3049347","volume":"68","author":"C Liu","year":"2021","unstructured":"Liu C, Jiang B, Zhang K, Patton RJ (2021) Distributed fault-tolerant consensus tracking control of multi-agent systems under fixed and switching topologies. IEEE Transactions on Circuits and Systems I: Regular Papers 68(4):1646\u20131658","journal-title":"IEEE Transactions on Circuits and Systems I: Regular Papers"},{"issue":"5","key":"3821_CR16","doi-asserted-by":"publisher","first-page":"1757","DOI":"10.1109\/TNNLS.2019.2920880","volume":"31","author":"W Zou","year":"2020","unstructured":"Zou W, Shi P, Xiang Z, Shi Y (2020) Finite-time consensus of second-order switched nonlinear multi-agent systems. IEEE Trans Neural Netw Learn Syst 31(5):1757\u20131762","journal-title":"IEEE Trans Neural Netw Learn Syst"},{"key":"3821_CR17","doi-asserted-by":"publisher","first-page":"108804","DOI":"10.1016\/j.automatica.2020.108804","volume":"113","author":"J Jiang","year":"2020","unstructured":"Jiang J, Jiang Y (2020) Leader-following consensus of linear time-varying multi-agent systems under fixed and switching topologies. Automatica 113:108804","journal-title":"Automatica"},{"key":"3821_CR18","volume-title":"Reinforcement learning: An introduction","author":"RS Sutton","year":"2018","unstructured":"Sutton RS, Barto AG (2018) Reinforcement learning: An introduction. MIT Press, Cambridge, MA, USA"},{"issue":"6","key":"3821_CR19","doi-asserted-by":"publisher","first-page":"750","DOI":"10.1007\/s10458-019-09421-1","volume":"33","author":"P Hernandez-Leal","year":"2019","unstructured":"Hernandez-Leal P, Kartal B, Taylor M E (2019) A survey and critique of multiagent deep reinforcement learning. Auton Agent Multi-Agent Syst 33(6):750\u2013797","journal-title":"Auton Agent Multi-Agent Syst"},{"key":"3821_CR20","doi-asserted-by":"crossref","unstructured":"Zhang K, Yang Z, Ba\u015far T (2021) Multi-agent reinforcement learning: A selective overview of theories and algorithms. pp 321\u2013384","DOI":"10.1007\/978-3-030-60990-0_12"},{"key":"3821_CR21","doi-asserted-by":"crossref","unstructured":"Gronauer S, Diepold K (2021) Multi-agent deep reinforcement learning: a survey. Artif Intell Rev, pp 1\u201349","DOI":"10.1007\/s10462-021-09996-w"},{"key":"3821_CR22","doi-asserted-by":"crossref","unstructured":"Gupta S, Singal G, Garg D (2021) Deep reinforcement learning techniques in diversified domains: A survey. Archives of Computational Methods in Engineering, pp 4715\u20134754","DOI":"10.1007\/s11831-021-09552-3"},{"key":"3821_CR23","doi-asserted-by":"publisher","first-page":"708","DOI":"10.1016\/j.ins.2021.04.088","volume":"570","author":"M Shang","year":"2021","unstructured":"Shang M, Zhou Y, Fujita H (2021) Deep reinforcement learning with reference system to handle constraints for energy-efficient train control. Inf Sci 570:708\u2013721","journal-title":"Inf Sci"},{"key":"3821_CR24","doi-asserted-by":"crossref","unstructured":"Le N, Rathour VS, Yamazaki K, Luu K, Savvides M (2021) Deep reinforcement learning in computer vision: a comprehensive survey. Artif Intell Rev","DOI":"10.1007\/s10462-021-10061-9"},{"key":"3821_CR25","doi-asserted-by":"crossref","unstructured":"Zhou SK, Le HN, Luu K, V Nguyen H, Ayache N (2021) Deep reinforcement learning in medical imaging: A literature review. Med Image Anal 73:102193","DOI":"10.1016\/j.media.2021.102193"},{"key":"3821_CR26","doi-asserted-by":"publisher","first-page":"354","DOI":"10.1038\/nature24270","volume":"550","author":"D Silver","year":"2017","unstructured":"Silver D, Schrittwieser J, Simonyan K, et al. (2017) Mastering the game of go without human knowledge. Nature 550:354\u2013 359","journal-title":"Nature"},{"key":"3821_CR27","unstructured":"Schulman J, Wolski F, Dhariwal P, Radford A, Klimov O (2017) Proximal policy optimization algorithms. arXiv:1707.06347v2"},{"key":"3821_CR28","unstructured":"Lillicrap TP, Hunt JJ, Pritzel A, Heess N, Erez T, Tassa Y, Silver D, Wierstra D (2016) Continuous control with deep reinforcement learning. In: 4th International Conference on Learning Representations (ICLR), San Juan, Puerto Rico, May 2-4, 2016"},{"key":"3821_CR29","doi-asserted-by":"publisher","first-page":"484","DOI":"10.1038\/nature16961","volume":"529","author":"D Silver","year":"2016","unstructured":"Silver D, Huang A, Maddison C, et al. (2016) Mastering the game of go with deep neural networks and tree search. Nature 529:484\u2013489","journal-title":"Nature"},{"issue":"7540","key":"3821_CR30","doi-asserted-by":"publisher","first-page":"529","DOI":"10.1038\/nature14236","volume":"518","author":"V Mnih","year":"2015","unstructured":"Mnih V, Kavukcuoglu K, Silver D, et al. (2015) Human-level control through deep reinforcement learning. Nature 518(7540):529\u2013533","journal-title":"Nature"},{"key":"3821_CR31","volume-title":"Multiagent systems - algorithmic, game-theoretic, and logical foundations","author":"Y Shoham","year":"2009","unstructured":"Shoham Y, Leyton-Brown K (2009) Multiagent systems - algorithmic, game-theoretic, and logical foundations. Cambridge University Press, Cambridge, England"},{"key":"3821_CR32","doi-asserted-by":"crossref","unstructured":"Sadhu AK, Konar A (2020) Multi-agent coordination: A reinforcement learning approach. John Wiley & Sons","DOI":"10.1002\/9781119699057"},{"key":"3821_CR33","doi-asserted-by":"publisher","first-page":"128","DOI":"10.1016\/j.future.2021.04.018","volume":"123","author":"Y Zhang","year":"2021","unstructured":"Zhang Y, Zhou Y, Lu H, Fujita H (2021) Cooperative multi-agent actor-critic control of traffic network flow based on edge computing. Futur Gener Comput Syst 123:128\u2013141","journal-title":"Futur Gener Comput Syst"},{"key":"3821_CR34","doi-asserted-by":"crossref","unstructured":"Ye Z, Chen Y, Jiang X, Song G, Yang B, Fan S (2021) Improving sample efficiency in multi-agent actor-critic methods. Appl Intell","DOI":"10.1007\/s10489-021-02554-5"},{"issue":"5","key":"3821_CR35","doi-asserted-by":"publisher","first-page":"4137","DOI":"10.1109\/TSG.2021.3072251","volume":"12","author":"D Cao","year":"2021","unstructured":"Cao D, Zhao J, Hu W, Ding F, Huang Q, Chen Z, Blaabjerg F (2021) Data-driven multi-agent deep reinforcement learning for distribution system decentralized voltage control with high penetration of pvs. IEEE Trans on Smart Grid 12(5):4137\u20134150","journal-title":"IEEE Trans on Smart Grid"},{"issue":"10","key":"3821_CR36","doi-asserted-by":"publisher","first-page":"2947","DOI":"10.1007\/s13042-021-01385-7","volume":"12","author":"W Du","year":"2021","unstructured":"Du W, Ding S, Zhang C, Du S (2021) Modified action decoder using bayesian reasoning for multi-agent deep reinforcement learning. Int J Mach Learn Cybern 12(10):2947\u20132961","journal-title":"Int J Mach Learn Cybern"},{"issue":"2","key":"3821_CR37","doi-asserted-by":"publisher","first-page":"1658","DOI":"10.1109\/TVT.2021.3055511","volume":"70","author":"C Xu","year":"2021","unstructured":"Xu C, Liu S, Zhang C, Huang Y, Lu Z, Yang L (2021) Multi-agent reinforcement learning based distributed transmission in collaborative cloud-edge systems. IEEE Trans Veh Technol 70(2):1658\u20131672","journal-title":"IEEE Trans Veh Technol"},{"key":"3821_CR38","unstructured":"Sunehag P, Lever G, Gruslys A, et al. (2018) Value-decomposition networks for cooperative multi-agent learning based on team reward. In: Proceedings of the 17th international conference on autonomous agents and multiagent systems (AAMAS), Stockholm, Sweden, July 10-15, 2018, pp 2085\u20132087"},{"key":"3821_CR39","unstructured":"Rashid T, Samvelyan M, Schroeder C, Farquhar G, Foerster J, Whiteson S (2018) QMIX: Monotonic value function factorisation for deep multi-agent reinforcement learning. In: Proceedings of the 35th international conference on machine learning (ICML), Stockholm Sweden, 10-15 Jul, 2018, vol 80, pp 4295\u20134304"},{"key":"3821_CR40","unstructured":"Son K, Kim D, Kang W J, Hostallero D, Yi Y (2019) QTRAN: learning to factorize with transformation for cooperative multi-agent reinforcement learning. In: Proceedings of the 36th international conference on machine learning (ICML), Long Beach, California, USA, 9-15 june 2019, vol 97, pp 5887\u20135896"},{"key":"3821_CR41","doi-asserted-by":"crossref","unstructured":"Foerster J N, Farquhar G, Afouras T, Nardelli N, Whiteson S (2018) Counterfactual multi-agent policy gradients. In: Proceedings of the 32nd AAAI conference on artificial intelligence (AAAI), New Orleans, Louisiana, USA, February 2-7, 2018, pp 2974\u20132982","DOI":"10.1609\/aaai.v32i1.11794"},{"key":"3821_CR42","unstructured":"Lowe R, Wu Y, Tamar A, Harb J, Abbeel P, Mordatch I (2017) Multi-agent actor-critic for mixed cooperative-competitive environments. In: Advances in neural information processing systems 30 (NIPS), Long Beach, CA, USA, 4-9 december 2017, pp 6379\u20136390"},{"key":"3821_CR43","doi-asserted-by":"publisher","first-page":"114896","DOI":"10.1016\/j.eswa.2021.114896","volume":"176","author":"L Huang","year":"2021","unstructured":"Huang L, Fu M, Qu H, Wang S, Hu S (2021) A deep reinforcement learning-based method applied for solving multi-agent defense and attack problems. Expert Syst Appl 176:114896","journal-title":"Expert Syst Appl"},{"issue":"13","key":"3821_CR44","doi-asserted-by":"publisher","first-page":"10843","DOI":"10.1109\/JIOT.2021.3050804","volume":"8","author":"X Chen","year":"2021","unstructured":"Chen X, Liu G (2021) Energy-efficient task offloading and resource allocation via deep reinforcement learning for augmented reality in mobile edge networks. IEEE Internet of Things Journal 8(13):10843\u201310856","journal-title":"IEEE Internet of Things Journal"},{"issue":"9","key":"3821_CR45","doi-asserted-by":"publisher","first-page":"1857","DOI":"10.1109\/LWC.2021.3084213","volume":"10","author":"Y Yang","year":"2021","unstructured":"Yang Y, Li B, Zhang S, Zhao W, Zhang H (2021) Cooperative proactive eavesdropping based on deep reinforcement learning. IEEE Wirel Commun Lett 10(9):1857\u20131861","journal-title":"IEEE Wirel Commun Lett"},{"issue":"1","key":"3821_CR46","doi-asserted-by":"publisher","first-page":"73","DOI":"10.1109\/TCCN.2020.3027695","volume":"7","author":"L Wang","year":"2021","unstructured":"Wang L, Wang K, Pan C, Xu W, Aslam N, Hanzo L (2021) Multi-agent deep reinforcement learning-based trajectory planning for multi-uav assisted mobile edge computing. IEEE Trans Cogn Commun Netw 7(1):73\u201384","journal-title":"IEEE Trans Cogn Commun Netw"},{"issue":"1","key":"3821_CR47","doi-asserted-by":"publisher","first-page":"231","DOI":"10.1109\/TNSE.2020.3036456","volume":"8","author":"T Wu","year":"2021","unstructured":"Wu T, Zhou P, Wang B, Li A, Tang X, Xu Z, Chen K, Ding X (2021) Joint traffic control and multi-channel reassignment for core backbone network in sdn-iot: A multi-agent deep reinforcement learning approach. IEEE Trans Netw Sci Eng 8(1):231\u2013245","journal-title":"IEEE Trans Netw Sci Eng"},{"issue":"8","key":"3821_CR48","doi-asserted-by":"publisher","first-page":"2604","DOI":"10.1109\/LCOMM.2021.3078442","volume":"25","author":"A Gao","year":"2021","unstructured":"Gao A, Du C, Ng S X, Liang W (2021) A cooperative spectrum sensing with multi-agent reinforcement learning approach in cognitive radio networks. IEEE Commun Lett 25(8):2604\u2013 2608","journal-title":"IEEE Commun Lett"},{"issue":"4","key":"3821_CR49","doi-asserted-by":"publisher","first-page":"2903","DOI":"10.1109\/TSG.2021.3052998","volume":"12","author":"X Sun","year":"2021","unstructured":"Sun X, Qiu J (2021) Two-stage volt\/var control in active distribution networks with multi-agent deep reinforcement learning method. IEEE Trans on Smart Grid 12(4):2903\u20132912","journal-title":"IEEE Trans on Smart Grid"},{"key":"3821_CR50","doi-asserted-by":"publisher","first-page":"206","DOI":"10.1016\/j.neucom.2020.05.097","volume":"411","author":"F Zhang","year":"2020","unstructured":"Zhang F, Li J, Li Z (2020) A TD3-based multi-agent deep reinforcement learning method in mixed cooperation-competition environment. Neurocomputing 411:206\u2013215","journal-title":"Neurocomputing"},{"key":"3821_CR51","unstructured":"Chaudhuri K, Salakhutdinov R (2019) Actor-attention-critic for multi-agent reinforcement learning. In: Proceedings of the 36th international conference on machine learning (ICML), 9-15 June 2019, Long Beach, California, USA"},{"key":"3821_CR52","doi-asserted-by":"crossref","unstructured":"He K, Zhang X, Ren S, Sun J (2016) Deep residual learning for image recognition. In: Proceedings of the IEEE conference on computer vision and pattern recognition (CVPR), Las Vegas, NV, USA, june 27-30, 2016","DOI":"10.1109\/CVPR.2016.90"},{"key":"3821_CR53","unstructured":"Kingma DP, Ba J (2015) Adam: A method for stochastic optimization. In: 3rd international conference on learning representations (ICLR 2015). ICLR, San Diego, CA, USA"}],"container-title":["Applied Intelligence"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s10489-022-03821-9.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s10489-022-03821-9\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s10489-022-03821-9.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,3,16]],"date-time":"2023-03-16T02:39:02Z","timestamp":1678934342000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s10489-022-03821-9"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022,7,29]]},"references-count":53,"journal-issue":{"issue":"7","published-print":{"date-parts":[[2023,4]]}},"alternative-id":["3821"],"URL":"https:\/\/doi.org\/10.1007\/s10489-022-03821-9","relation":{},"ISSN":["0924-669X","1573-7497"],"issn-type":[{"value":"0924-669X","type":"print"},{"value":"1573-7497","type":"electronic"}],"subject":[],"published":{"date-parts":[[2022,7,29]]},"assertion":[{"value":"25 May 2022","order":1,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"29 July 2022","order":2,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors declare that they have no conflict of interest.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"<!--Emphasis Type='Bold' removed-->Conflict of interest"}}]}}