{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,12,8]],"date-time":"2025-12-08T07:16:16Z","timestamp":1765178176045,"version":"3.41.0"},"reference-count":68,"publisher":"Springer Science and Business Media LLC","issue":"19","license":[{"start":{"date-parts":[[2023,10,24]],"date-time":"2023-10-24T00:00:00Z","timestamp":1698105600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2023,10,24]],"date-time":"2023-10-24T00:00:00Z","timestamp":1698105600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Neural Comput &amp; Applic"],"published-print":{"date-parts":[[2025,7]]},"DOI":"10.1007\/s00521-023-09096-6","type":"journal-article","created":{"date-parts":[[2023,10,24]],"date-time":"2023-10-24T17:02:25Z","timestamp":1698166945000},"page":"13221-13236","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":6,"title":["hammer: Multi-level coordination of reinforcement learning agents via learned messaging"],"prefix":"10.1007","volume":"37","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-1455-0412","authenticated-orcid":false,"given":"Nikunj","family":"Gupta","sequence":"first","affiliation":[]},{"given":"G.","family":"Srinivasaraghavan","sequence":"additional","affiliation":[]},{"given":"Swarup","family":"Mohalik","sequence":"additional","affiliation":[]},{"given":"Nishant","family":"Kumar","sequence":"additional","affiliation":[]},{"given":"Matthew E.","family":"Taylor","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2023,10,24]]},"reference":[{"key":"9096_CR1","doi-asserted-by":"publisher","first-page":"66","DOI":"10.1016\/j.artint.2018.01.002","volume":"258","author":"SV Albrecht","year":"2018","unstructured":"Albrecht SV, Stone P (2018) Autonomous agents modelling other agents: a comprehensive survey and open problems. Artif Intell 258:66\u201395","journal-title":"Artif Intell"},{"issue":"6218","key":"9096_CR2","doi-asserted-by":"publisher","first-page":"145","DOI":"10.1126\/science.1259433","volume":"347","author":"M Bowling","year":"2015","unstructured":"Bowling M, Burch N, Johanson M, Tammelin O (2015) Heads-up limit hold\u2019em poker is solved. Science 347(6218):145\u2013149","journal-title":"Science"},{"issue":"2","key":"9096_CR3","doi-asserted-by":"publisher","first-page":"156","DOI":"10.1109\/TSMCC.2007.913919","volume":"38","author":"L Busoniu","year":"2008","unstructured":"Busoniu L, Babuska R, De Schutter B (2008) A comprehensive survey of multiagent reinforcement learning. IEEE Trans Syst Man Cybern Part C (Applications and Reviews) 38(2):156\u2013172","journal-title":"IEEE Trans Syst Man Cybern Part C (Applications and Reviews)"},{"issue":"1","key":"9096_CR4","doi-asserted-by":"publisher","first-page":"427","DOI":"10.1109\/TII.2012.2219061","volume":"9","author":"Y Cao","year":"2012","unstructured":"Cao Y, Yu W, Ren W, Chen G (2012) An overview of recent progress in the study of distributed multi-agent coordination. IEEE Transa Ind Inform 9(1):427\u2013438","journal-title":"IEEE Transa Ind Inform"},{"key":"9096_CR5","unstructured":"Castellini J, Oliehoek FA, Savani R, Whiteson S (2019) The representational capacity of action-value networks for multi-agent reinforcement learning. arXiv preprint arXiv:1902.07497"},{"key":"9096_CR6","unstructured":"Courbariaux M, Hubara I, Soudry D, El-Yaniv R, Bengio Y (2016) Binarized neural networks: Training deep neural networks with weights and activations constrained to+ 1 or-1. arXiv preprint arXiv:1602.02830"},{"key":"9096_CR7","doi-asserted-by":"publisher","first-page":"227","DOI":"10.1613\/jair.639","volume":"13","author":"TG Dietterich","year":"2000","unstructured":"Dietterich TG (2000) Hierarchical reinforcement learning with the maxq value function decomposition. J Artif Intell Res 13:227\u2013303","journal-title":"J Artif Intell Res"},{"key":"9096_CR8","unstructured":"Enright JJ, Wurman PR (2011) Optimization and coordinated autonomy in mobile fulfillment systems. In: Workshops at the twenty-fifth AAAI conference on artificial intelligence, Citeseer"},{"key":"9096_CR9","unstructured":"Farinelli A, Rogers A, Petcu A, Jennings NR (2008) Decentralised coordination of low-power embedded devices using the max-sum algorithm"},{"key":"9096_CR10","unstructured":"Foerster J, Assael IA, De\u00a0Freitas N, Whiteson S (2016a) Learning to communicate with deep multi-agent reinforcement learning. In: Advances in neural information processing systems, pp 2137\u20132145"},{"key":"9096_CR11","unstructured":"Foerster J, Assael IA, de\u00a0Freitas N, Whiteson S (2016b) Learning to communicate with deep multi-agent reinforcement learning. In: Lee DD, Sugiyama M, Luxburg UV, Guyon I, Garnett R (eds) Advances in Neural Information Processing Systems 29, Curran Associates, Inc., pp 2137\u20132145"},{"key":"9096_CR12","doi-asserted-by":"crossref","unstructured":"Foerster J, Farquhar G, Afouras T, Nardelli N, Whiteson S (2017a) Counterfactual multi-agent policy gradients. arXiv preprint arXiv:1705.08926","DOI":"10.1609\/aaai.v32i1.11794"},{"key":"9096_CR13","unstructured":"Foerster J, Nardelli N, Farquhar G, Afouras T, Torr PH, Kohli P, Whiteson S (2017b) Stabilising experience replay for deep multi-agent reinforcement learning. arXiv preprint arXiv:1702.08887"},{"issue":"3","key":"9096_CR14","doi-asserted-by":"publisher","first-page":"325","DOI":"10.1023\/A:1008937911390","volume":"8","author":"D Fox","year":"2000","unstructured":"Fox D, Burgard W, Kruppa H, Thrun S (2000) A probabilistic approach to collaborative multi-robot localization. Auton Robots 8(3):325\u2013344","journal-title":"Auton Robots"},{"key":"9096_CR15","doi-asserted-by":"crossref","unstructured":"Gupta JK, Egorov M, Kochenderfer M (2017) Cooperative multi-agent control using deep reinforcement learning. In: International Conference on Autonomous Agents and Multiagent Systems, Springer, pp 66\u201383","DOI":"10.1007\/978-3-319-71682-4_5"},{"issue":"1","key":"9096_CR16","doi-asserted-by":"publisher","first-page":"74","DOI":"10.1111\/j.1756-8765.2010.01109.x","volume":"3","author":"G Hinton","year":"2011","unstructured":"Hinton G, Salakhutdinov R (2011) Discovering binary codes for documents by learning deep generative models. Topics Cogn Sci 3(1):74\u201391","journal-title":"Topics Cogn Sci"},{"key":"9096_CR17","doi-asserted-by":"crossref","unstructured":"Ito T, Zhang M, Robu V, Fatima S, Matsuo T, Yamaki H (2010) Innovations in agent-based complex automated negotiations, vol 319. Springer","DOI":"10.1007\/978-3-642-15612-0"},{"key":"9096_CR18","first-page":"258","volume":"11","author":"M Johanson","year":"2011","unstructured":"Johanson M, Waugh K, Bowling M, Zinkevich M (2011) Accelerating best response calculation in large extensive games. IJCAI 11:258\u2013265","journal-title":"IJCAI"},{"key":"9096_CR19","doi-asserted-by":"publisher","first-page":"6079","DOI":"10.1609\/aaai.v33i01.33016079","volume":"33","author":"W Kim","year":"2019","unstructured":"Kim W, Cho M, Sung Y (2019) Message-dropout: An efficient training method for multi-agent deep reinforcement learning. Proceedings of the AAAI Conference on Artificial Intelligence 33:6079\u20136086","journal-title":"Proceedings of the AAAI Conference on Artificial Intelligence"},{"key":"9096_CR20","unstructured":"Kumar S, Shah P, Hakkani-Tur D, Heck L (2017) Federated control with hierarchical multi-agent deep reinforcement learning. arXiv preprint arXiv:1712.08266"},{"key":"9096_CR21","unstructured":"Lanctot M, Zambaldi V, Gruslys A, Lazaridou A, Tuyls K, P\u00e9rolat J, Silver D, Graepel T (2017) A unified game-theoretic approach to multiagent reinforcement learning. In: Advances in neural information processing systems, pp 4190\u20134203"},{"issue":"1","key":"9096_CR22","first-page":"55","volume":"15","author":"GJ Laurent","year":"2011","unstructured":"Laurent GJ, Matignon L, Fort-Piat L et al (2011) The world of independent learners is not Markovian. Int J Knowl Based Intell Eng Syst 15(1):55\u201364","journal-title":"Int J Knowl Based Intell Eng Syst"},{"key":"9096_CR23","unstructured":"Lazaridou A, Peysakhovich A, Baroni M (2016) Multi-agent cooperation and the emergence of (natural) language. arXiv preprint arXiv:1612.07182"},{"key":"9096_CR24","unstructured":"Leibo JZ, Zambaldi V, Lanctot M, Marecki J, Graepel T (2017) Multi-agent reinforcement learning in sequential social dilemmas. arXiv preprint arXiv:1702.03037"},{"key":"9096_CR25","unstructured":"Leibo JZ, Perolat J, Hughes E, Wheelwright S, Marblestone AH, Du\u00e9\u00f1ez-Guzm\u00e1n E, Sunehag P, Dunning I, Graepel T (2018) Malthusian reinforcement learning. arXiv preprint arXiv:1812.07019"},{"key":"9096_CR26","unstructured":"Leibo JZ, Hughes E, Lanctot M, Graepel T (2019) Autocurricula and the emergence of innovation from social interaction: A manifesto for multi-agent intelligence research. arXiv preprint arXiv:1903.00742"},{"key":"9096_CR27","unstructured":"Lillicrap TP, Hunt JJ, Pritzel A, Heess N, Erez T, Tassa Y, Silver D, Wierstra D (2015) Continuous control with deep reinforcement learning. arXiv preprint arXiv:1509.02971"},{"key":"9096_CR28","unstructured":"Liu M, Deng J, Xu M, Zhang X, Wang W (2017) Cooperative deep reinforcement learning for tra ic signal control. In: The 7th International Workshop on Urban Computing (UrbComp 2018)"},{"key":"9096_CR29","unstructured":"Lowe R, Wu Y, Tamar A, Harb J, Abbeel P, Mordatch I (2017) Multi-agent actor-critic for mixed cooperative-competitive environments. Neural Information Processing Systems (NIPS)"},{"key":"9096_CR30","unstructured":"Ma J, Wu F (2020) Feudal multi-agent deep reinforcement learning for traffic signal control. In: Proceedings of the 19th International Conference on Autonomous Agents and Multiagent Systems (AAMAS), pp 816\u2013824"},{"key":"9096_CR31","first-page":"p2017","volume":"2012","author":"L Matignon","year":"2012","unstructured":"Matignon L, Jeanpierre L, Mouaddib AI (2012) Coordinated multi-robot exploration under communication constraints using decentralized markov decision processes. AAAI 2012:p2017-2023","journal-title":"AAAI"},{"key":"9096_CR32","doi-asserted-by":"crossref","unstructured":"Matignon L, Laurent GJ, Le\u00a0Fort-Piat N (2012b) Independent reinforcement learners in cooperative markov games: a survey regarding coordination problems","DOI":"10.1017\/S0269888912000057"},{"key":"9096_CR33","unstructured":"Mnih V, Kavukcuoglu K, Silver D, Graves A, Antonoglou I, Wierstra D, Riedmiller M (2013) Playing atari with deep reinforcement learning. arXiv preprint arXiv:1312.5602"},{"issue":"7540","key":"9096_CR34","doi-asserted-by":"publisher","first-page":"529","DOI":"10.1038\/nature14236","volume":"518","author":"V Mnih","year":"2015","unstructured":"Mnih V, Kavukcuoglu K, Silver D, Rusu AA, Veness J, Bellemare MG, Graves A, Riedmiller M, Fidjeland AK, Ostrovski G et al (2015) Human-level control through deep reinforcement learning. Nature 518(7540):529\u2013533","journal-title":"Nature"},{"key":"9096_CR35","doi-asserted-by":"crossref","unstructured":"Mordatch I, Abbeel P (2018) Emergence of grounded compositional language in multi-agent populations. In: Proceedings of the AAAI Conference on Artificial Intelligence, vol\u00a032","DOI":"10.1609\/aaai.v32i1.11492"},{"key":"9096_CR36","first-page":"6128","volume":"33","author":"S Omidshafiei","year":"2019","unstructured":"Omidshafiei S, Kim DK, Liu M, Tesauro G, Riemer M, Amato C, Campbell M, How JP (2019) Learning to teach in cooperative multiagent reinforcement learning. Proc AAAI Conf Artif Intell 33:6128\u20136136","journal-title":"Proc AAAI Conf Artif Intell"},{"issue":"3","key":"9096_CR37","doi-asserted-by":"publisher","first-page":"387","DOI":"10.1007\/s10458-005-2631-2","volume":"11","author":"L Panait","year":"2005","unstructured":"Panait L, Luke S (2005) Cooperative multi-agent learning: the state of the art. Auton Agents Multi-agent Syst 11(3):387\u2013434","journal-title":"Auton Agents Multi-agent Syst"},{"issue":"7","key":"9096_CR38","doi-asserted-by":"publisher","first-page":"877","DOI":"10.1002\/rob.21601","volume":"33","author":"J Parker","year":"2016","unstructured":"Parker J, Nunes E, Godoy J, Gini M (2016) Exploiting spatial locality and heterogeneity of agents for search and rescue teamwork. J Field Robot 33(7):877\u2013900","journal-title":"J Field Robot"},{"key":"9096_CR39","unstructured":"Peng P, Wen Y, Yang Y, Yuan Q, Tang Z, Long H, Wang J (2017) Multiagent bidirectionally-coordinated nets: Emergence of human-level coordination in learning to play starcraft combat games. arXiv preprint arXiv:1703.10069"},{"key":"9096_CR40","doi-asserted-by":"crossref","unstructured":"Pipattanasomporn M, Feroze H, Rahman S (2009) Multi-agent systems in a distributed smart grid: Design and implementation. In: 2009 IEEE\/PES Power Systems Conference and Exposition, IEEE, pp 1\u20138","DOI":"10.1109\/PSCE.2009.4840087"},{"key":"9096_CR41","unstructured":"Van\u00a0der Pol E, Oliehoek FA (2016) Coordinated deep reinforcement learners for traffic light control. Proceedings of learning, inference and control of multi-agent systems (at NIPS 2016) 1"},{"key":"9096_CR42","unstructured":"Rashid T, Samvelyan M, De\u00a0Witt CS, Farquhar G, Foerster J, Whiteson S (2018) Qmix: Monotonic value function factorisation for deep multi-agent reinforcement learning. arXiv preprint arXiv:1803.11485"},{"issue":"2","key":"9096_CR43","doi-asserted-by":"publisher","first-page":"730","DOI":"10.1016\/j.artint.2010.11.001","volume":"175","author":"A Rogers","year":"2011","unstructured":"Rogers A, Farinelli A, Stranders R, Jennings NR (2011) Bounded approximate decentralised coordination via the max-sum algorithm. Artif Intell 175(2):730\u2013759","journal-title":"Artif Intell"},{"issue":"2","key":"9096_CR44","doi-asserted-by":"publisher","first-page":"213","DOI":"10.1109\/TEVC.2012.2208755","volume":"17","author":"S Samothrakis","year":"2012","unstructured":"Samothrakis S, Lucas S, Runarsson T, Robles D (2012) Coevolving game-playing agents: measuring performance and intransitivities. IEEE Trans Evolut Comput 17(2):213\u2013226","journal-title":"IEEE Trans Evolut Comput"},{"key":"9096_CR45","unstructured":"Schulman J, Levine S, Abbeel P, Jordan M, Moritz P (2015) Trust region policy optimization. In: International conference on machine learning, pp 1889\u20131897"},{"key":"9096_CR46","unstructured":"Schulman J, Wolski F, Dhariwal P, Radford A, Klimov O (2017) Proximal policy optimization algorithms. arXiv preprint arXiv:1707.06347"},{"key":"9096_CR47","unstructured":"Seuken S, Zilberstein S (2012) Improved memory-bounded dynamic programming for decentralized pomdps. arXiv preprint arXiv:1206.5295"},{"key":"9096_CR48","unstructured":"Sheng J, Wang X, Jin B, Yan J, Li W, Chang TH, Wang J, Zha H (2020) Learning structured communication for multi-agent reinforcement learning. arXiv preprint arXiv:2002.04235"},{"issue":"7","key":"9096_CR49","doi-asserted-by":"publisher","first-page":"365","DOI":"10.1016\/j.artint.2006.02.006","volume":"171","author":"Y Shoham","year":"2007","unstructured":"Shoham Y, Powers R, Grenager T (2007) If multi-agent learning is the answer, what is the question? Artif Intell 171(7):365\u2013377","journal-title":"Artif Intell"},{"key":"9096_CR50","unstructured":"Silva FLD, Warnell G, Costa AHR, Stone P (2020) Agents teaching agents: a survey on inter-agent transfer learning. Autonomous Agents and Multi-Agent Systems"},{"issue":"7587","key":"9096_CR51","doi-asserted-by":"publisher","first-page":"484","DOI":"10.1038\/nature16961","volume":"529","author":"D Silver","year":"2016","unstructured":"Silver D, Huang A, Maddison CJ, Guez A, Sifre L, Van Den Driessche G, Schrittwieser J, Antonoglou I, Panneershelvam V, Lanctot M et al (2016) Mastering the game of go with deep neural networks and tree search. Nature 529(7587):484\u2013489","journal-title":"Nature"},{"issue":"1","key":"9096_CR52","first-page":"1929","volume":"15","author":"N Srivastava","year":"2014","unstructured":"Srivastava N, Hinton G, Krizhevsky A, Sutskever I, Salakhutdinov R (2014) Dropout: a simple way to prevent neural networks from overfitting. J Mach Learn Res 15(1):1929\u20131958","journal-title":"J Mach Learn Res"},{"key":"9096_CR53","unstructured":"Sukhbaatar S, Fergus R, et\u00a0al. (2016) Learning multiagent communication with backpropagation. In: Advances in neural information processing systems, pp 2244\u20132252"},{"key":"9096_CR54","unstructured":"Sunehag P, Lever G, Gruslys A, Czarnecki WM, Zambaldi V, Jaderberg M, Lanctot M, Sonnerat N, Leibo JZ, Tuyls K, et\u00a0al. (2017) Value-decomposition networks for cooperative multi-agent learning. arXiv preprint arXiv:1706.05296"},{"key":"9096_CR55","unstructured":"Sunehag P, Lever G, Gruslys A, Czarnecki WM, Zambaldi VF, Jaderberg M, Lanctot M, Sonnerat N, Leibo JZ, Tuyls K, et\u00a0al. (2018) Value-decomposition networks for cooperative multi-agent learning based on team reward. In: AAMAS, pp 2085\u20132087"},{"key":"9096_CR56","unstructured":"Szegedy C, Zaremba W, Sutskever I, Bruna J, Erhan D, Goodfellow I, Fergus R (2013) Intriguing properties of neural networks. arXiv preprint arXiv:1312.6199"},{"issue":"4","key":"9096_CR57","doi-asserted-by":"publisher","first-page":"e0172395","DOI":"10.1371\/journal.pone.0172395","volume":"12","author":"A Tampuu","year":"2017","unstructured":"Tampuu A, Matiisen T, Kodelja D, Kuzovkin I, Korjus K, Aru J, Aru J, Vicente R (2017) Multiagent cooperation and competition with deep reinforcement learning. PloS one 12(4):e0172395","journal-title":"PloS one"},{"key":"9096_CR58","doi-asserted-by":"crossref","unstructured":"Tan M (1993) Multi-agent reinforcement learning: Independent vs. cooperative agents. In: Proceedings of the tenth international conference on machine learning, pp 330\u2013337","DOI":"10.1016\/B978-1-55860-307-3.50049-6"},{"key":"9096_CR59","unstructured":"Tang H, Hao J, Lv T, Chen Y, Zhang Z, Jia H, Ren C, Zheng Y, Fan C, Wang L (2018) Hierarchical deep multiagent reinforcement learning. arXiv preprint arXiv:1809.09332"},{"key":"9096_CR60","doi-asserted-by":"publisher","unstructured":"Taylor ME, Carboni * N, Fachantidis A, Vlahavas I, Torrey L, (2014) Reinforcement learning agents providing advice in complex video games. Connection Science 26(1):45\u201363. https:\/\/doi.org\/10.1080\/09540091.2014.885279","DOI":"10.1080\/09540091.2014.885279"},{"issue":"3","key":"9096_CR61","doi-asserted-by":"publisher","first-page":"58","DOI":"10.1145\/203330.203343","volume":"38","author":"G Tesauro","year":"1995","unstructured":"Tesauro G (1995) Temporal difference learning and td-gammon. Commun ACM 38(3):58\u201368","journal-title":"Commun ACM"},{"issue":"3","key":"9096_CR62","doi-asserted-by":"publisher","first-page":"41","DOI":"10.1609\/aimag.v33i3.2426","volume":"33","author":"K Tuyls","year":"2012","unstructured":"Tuyls K, Weiss G (2012) Multiagent learning: basics, challenges, and prospects. Ai Magaz 33(3):41\u201341","journal-title":"Ai Magaz"},{"key":"9096_CR63","unstructured":"Vezhnevets AS, Osindero S, Schaul T, Heess N, Jaderberg M, Silver D, Kavukcuoglu K (2017) Feudal networks for hierarchical reinforcement learning. arXiv preprint arXiv:1703.01161"},{"key":"9096_CR64","doi-asserted-by":"crossref","unstructured":"Whiteson S, Tanner B, Taylor ME, Stone P (2011) Protecting against evaluation overfitting in empirical reinforcement learning. In: 2011 IEEE symposium on adaptive dynamic programming and reinforcement learning (ADPRL), IEEE, pp 120\u2013127","DOI":"10.1109\/ADPRL.2011.5967363"},{"key":"9096_CR65","first-page":"73","volume":"19","author":"M Wunder","year":"2009","unstructured":"Wunder M, Littman M, Stone M (2009) Communication, credibility and negotiation using a cognitive hierarchy model. AAMAS Workshop, Citeseer 19:73\u201380","journal-title":"AAMAS Workshop, Citeseer"},{"issue":"2","key":"9096_CR66","doi-asserted-by":"publisher","first-page":"431","DOI":"10.1016\/j.eswa.2005.04.039","volume":"29","author":"W Ying","year":"2005","unstructured":"Ying W, Dayong S (2005) Multi-agent framework for third party logistics in e-commerce. Expert Systems with Applications 29(2):431\u2013436","journal-title":"Expert Systems with Applications"},{"key":"9096_CR67","unstructured":"Za\u00efem MS, Bennequin E (2019) Learning to communicate in multi-agent reinforcement learning: A review. arXiv preprint arXiv:1911.05438"},{"key":"9096_CR68","unstructured":"Zhou M, Luo J, Villela J, Yang Y, Rusu D, Miao J, Zhang W, Alban M, Fadakar I, Chen Z, et\u00a0al. (2020) Smarts: Scalable multi-agent reinforcement learning training school for autonomous driving. arXiv preprint arXiv:2010.09776"}],"container-title":["Neural Computing and Applications"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s00521-023-09096-6.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s00521-023-09096-6\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s00521-023-09096-6.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,6,27]],"date-time":"2025-06-27T08:29:05Z","timestamp":1751012945000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s00521-023-09096-6"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,10,24]]},"references-count":68,"journal-issue":{"issue":"19","published-print":{"date-parts":[[2025,7]]}},"alternative-id":["9096"],"URL":"https:\/\/doi.org\/10.1007\/s00521-023-09096-6","relation":{},"ISSN":["0941-0643","1433-3058"],"issn-type":[{"type":"print","value":"0941-0643"},{"type":"electronic","value":"1433-3058"}],"subject":[],"published":{"date-parts":[[2023,10,24]]},"assertion":[{"value":"15 November 2021","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"20 September 2023","order":2,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"24 October 2023","order":3,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors declare that they have no conflict of interest.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}},{"value":"Data sharing not applicable to this article as no datasets were generated or analyzed during the current study.","order":3,"name":"Ethics","group":{"name":"EthicsHeading","label":"Data availability"}}]}}