{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,13]],"date-time":"2026-03-13T20:49:23Z","timestamp":1773434963585,"version":"3.50.1"},"reference-count":162,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","license":[{"start":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T00:00:00Z","timestamp":1704067200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by-nc-nd\/4.0\/"}],"funder":[{"name":"Korea Institute of Planning and Evaluation for Technology in Food, Agriculture and Forestry (IPET) through the Agriculture and Food Convergence Technologies Program for Research Manpower Development"},{"DOI":"10.13039\/501100003624","name":"Ministry of Agriculture, Food and Rural Affairs","doi-asserted-by":"crossref","award":["RS-2024-00397026, 33%"],"award-info":[{"award-number":["RS-2024-00397026, 33%"]}],"id":[{"id":"10.13039\/501100003624","id-type":"DOI","asserted-by":"crossref"}]},{"name":"Innovative Human Resource Development for Local Intellectualization Program through the Institute of Information and Communications Technology Planning and Evaluation (IITP) Grant"},{"name":"Korea Government","award":["IITP-2024-00156287, 33%"],"award-info":[{"award-number":["IITP-2024-00156287, 33%"]}]},{"name":"IITP through the Artificial Intelligence Convergence Innovation Human Resources Development Grant"},{"name":"Korea Government","award":["IITP-2023-RS-2023-00256629, 34%"],"award-info":[{"award-number":["IITP-2023-RS-2023-00256629, 34%"]}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Access"],"published-print":{"date-parts":[[2024]]},"DOI":"10.1109\/access.2024.3433540","type":"journal-article","created":{"date-parts":[[2024,7,25]],"date-time":"2024-07-25T17:31:57Z","timestamp":1721928717000},"page":"103026-103048","source":"Crossref","is-referenced-by-count":20,"title":["A Review of Reinforcement Learning for Fixed-Wing Aircraft Control Tasks"],"prefix":"10.1109","volume":"12","author":[{"ORCID":"https:\/\/orcid.org\/0000-0001-5413-6710","authenticated-orcid":false,"given":"David J.","family":"Richter","sequence":"first","affiliation":[{"name":"Department of Artificial Intelligence Convergence, Chonnam National University, Gwangju, South Korea"}]},{"given":"Ricardo A.","family":"Calix","sequence":"additional","affiliation":[{"name":"College of Technology, Purdue University Northwest, Hammond, IN, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-9985-3051","authenticated-orcid":false,"given":"Kyungbaek","family":"Kim","sequence":"additional","affiliation":[{"name":"Department of Artificial Intelligence Convergence, Chonnam National University, Gwangju, South Korea"}]}],"member":"263","reference":[{"key":"ref1","article-title":"Playing Atari with deep reinforcement learning","author":"Mnih","year":"2013","journal-title":"arXiv:1312.5602"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1038\/nature14236"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1007\/BF00992698"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1145\/203330.203343"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.13140\/RG.2.2.18893.74727"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1038\/nature24270"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.2352\/ISSN.2470-1173.2017.19.AVM-023"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v31i1.10827"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1145\/3426020.3426072"},{"key":"ref10","article-title":"A deep reinforcement learning approach to the ancient Indian game\u2014Chowka Bhara","author":"Patil","year":"2021","journal-title":"TechRxiv"},{"key":"ref11","first-page":"91","article-title":"Learning to walk in minutes using massively parallel deep reinforcement learning","volume-title":"Proc. Conf. Robot Learn.","author":"Rudin"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1145\/3301273"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/ICUAS.2019.8798254"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/ICUS50048.2020.9274875"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1016\/j.trip.2021.100425"},{"key":"ref16","first-page":"1","article-title":"Autonomous helicopter flight via reinforcement learning","volume-title":"Proc. NIPS","volume":"16","author":"Kim"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/TVT.2020.3023733"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1007\/s10846-017-0696-1"},{"key":"ref19","article-title":"Autonomous control of simulated fixed wing aircraft using deep reinforcement learning","author":"Rennie","year":"2018"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/TVT.2018.2890773"},{"key":"ref21","volume-title":"Scikit-Decide\u2014AI Framework for Reinforcement Learning, Automated Planning and Scheduling","year":"2020"},{"key":"ref22","volume-title":"Scikit-Decide\u2014AI Framework for Reinforcement Learning, Automated Planning and Scheduling","year":"2020"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2021.3115711"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/CDC51059.2022.9992959"},{"key":"ref25","first-page":"1","article-title":"Scalable inverse reinforcement learning via instructed feature construction","volume-title":"Proc. Workshops 26th AAAI Conf. Artif. Intell.","author":"Singliar"},{"key":"ref26","volume-title":"XPlane-ML-an environment for learning and decision systems for airplane operations","author":"Staudinger","year":"2018"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1007\/978-981-33-6984-9_40"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1155\/2017\/3296874"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.3390\/electronics10090999"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1007\/s41315-023-00308-9"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-77939-9_2"},{"key":"ref32","doi-asserted-by":"crossref","DOI":"10.1007\/978-3-030-77939-9","volume-title":"Deep Learning for Unmanned Systems","author":"Koubaa","year":"2021"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1016\/j.engappai.2022.105321"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1016\/j.jtrangeo.2015.08.017"},{"key":"ref35","volume-title":"Human Error in Aviation","author":"Dismukes","year":"2009"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1016\/B978-0-12-374518-7.00001-8"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1109\/ICSAI61474.2023.10423342"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1016\/j.jag.2021.102456"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.3390\/s19224837"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1109\/COMST.2023.3312221"},{"key":"ref41","volume-title":"Modern Control Engineering","author":"Katsuhiko","year":"2009"},{"key":"ref42","volume-title":"Feedback Control of Dynamic Systems","volume":"4","author":"Franklin","year":"2002"},{"key":"ref43","doi-asserted-by":"crossref","DOI":"10.1007\/978-1-4612-5671-7","volume-title":"Introduction to Optimal Control Theory","author":"Macki","year":"1982"},{"key":"ref44","volume-title":"Modern Control Systems","author":"Dorf","year":"2011"},{"key":"ref45","volume-title":"Automatic Control Systems","volume":"8","author":"Kuo","year":"1995"},{"key":"ref46","volume-title":"PID Controllers: Theory, Design, and Tuning","author":"\u00c5str\u00f6m","year":"1995"},{"key":"ref47","doi-asserted-by":"publisher","DOI":"10.18576\/amis\/100136"},{"key":"ref48","doi-asserted-by":"publisher","DOI":"10.1134\/S0869864316020049"},{"key":"ref49","doi-asserted-by":"publisher","DOI":"10.1109\/ICOMITEE53461.2021.9650314"},{"key":"ref50","first-page":"285","article-title":"Application of reinforcement learning in heading control of a fixed wing UAV using X-plane platform","volume":"6","author":"Kimathi","year":"2017","journal-title":"Int. J. Sci. Technol. Res."},{"key":"ref51","article-title":"UAV heading controller using reinforcement learning","volume-title":"Proc. Pan Afr. Conf. Sci., Comput. Telecommun. (PACT)","author":"Kimathi"},{"key":"ref52","volume-title":"Optimal Control: Linear Quadratic Methods","author":"Anderson","year":"2007"},{"key":"ref53","doi-asserted-by":"publisher","DOI":"10.1016\/j.proeng.2014.09.084"},{"key":"ref54","doi-asserted-by":"publisher","DOI":"10.12928\/telkomnika.v21i2.22051"},{"issue":"1","key":"ref55","first-page":"19","article-title":"Using linear quadratic Gaussian optimal control for lateral motion of aircraft","volume":"3","author":"Maddi","year":"2009","journal-title":"Int. J. Aerosp. Mech. Eng."},{"key":"ref56","volume-title":"A Course in Robust Control Theory: A Convex Approach: 36","author":"Dullerud","year":"1999"},{"key":"ref57","doi-asserted-by":"publisher","DOI":"10.2514\/6.2021-1057"},{"key":"ref58","doi-asserted-by":"publisher","DOI":"10.3390\/math12071118"},{"key":"ref59","doi-asserted-by":"publisher","DOI":"10.1109\/ICUAS.2014.6842364"},{"key":"ref60","volume-title":"Model Predictive Control System Design and Implementation Using MATLAB","author":"Wang","year":"2009"},{"key":"ref61","doi-asserted-by":"publisher","DOI":"10.2514\/1.52162"},{"key":"ref62","doi-asserted-by":"publisher","DOI":"10.11591\/tijee.v15i2.1538"},{"key":"ref63","doi-asserted-by":"crossref","DOI":"10.1007\/978-0-85729-664-1","volume-title":"Adaptive Control: Algorithms, Analysis and Applications","author":"Landau","year":"2011"},{"key":"ref64","doi-asserted-by":"publisher","DOI":"10.1109\/TCST.2012.2200104"},{"key":"ref65","doi-asserted-by":"publisher","DOI":"10.1002\/acs.2759"},{"key":"ref66","volume-title":"Introduction to Reinforcement Learning","volume":"135","author":"Sutton","year":"1998"},{"issue":"1","key":"ref67","first-page":"126","article-title":"Reinforcement learning","volume":"11","author":"Sutton","year":"1999","journal-title":"J. Cognit. Neurosci."},{"key":"ref68","first-page":"2613","article-title":"Double Q-learning","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"23","author":"Hasselt"},{"key":"ref69","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v30i1.10295"},{"key":"ref70","article-title":"Continuous control with deep reinforcement learning","author":"Lillicrap","year":"2015","journal-title":"arXiv:1509.02971"},{"key":"ref71","volume-title":"Grokking Deep Reinforcement Learning","author":"Morales","year":"2020"},{"key":"ref72","doi-asserted-by":"publisher","DOI":"10.48550\/arXiv.1812.05905"},{"key":"ref73","article-title":"Proximal policy optimization algorithms","author":"Schulman","year":"2017","journal-title":"arXiv:1707.06347"},{"key":"ref74","article-title":"Deep reinforcement learning from human preferences","author":"Christiano","year":"2017","journal-title":"arXiv:1706.03741"},{"key":"ref75","article-title":"Fine-tuning language models from human preferences","author":"Ziegler","year":"2019","journal-title":"arXiv:1909.08593"},{"key":"ref76","first-page":"3008","article-title":"Learning to summarize with human feedback","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"33","author":"Stiennon"},{"key":"ref77","article-title":"Llama 2: Open foundation and fine-tuned chat models","author":"Touvron","year":"2023","journal-title":"arXiv:2307.09288"},{"key":"ref78","doi-asserted-by":"publisher","DOI":"10.1017\/9781009089517"},{"key":"ref79","first-page":"1","article-title":"Direct preference optimization: Your language model is secretly a reward model","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"36","author":"Rafailov"},{"key":"ref80","doi-asserted-by":"publisher","DOI":"10.2307\/2334029"},{"key":"ref81","volume-title":"Gym X-Plane","author":"Lukman","year":"2019"},{"key":"ref82","volume-title":"Gym X-Plane","author":"Alleon","year":"2019"},{"key":"ref83","article-title":"Learning to fly\u2013building an autopilot system based on neural networks and reinforcement learning","author":"Eckstein","year":"2020"},{"key":"ref84","doi-asserted-by":"publisher","DOI":"10.1145\/3458305.3478446"},{"key":"ref85","volume-title":"Fixedwing-Airsim","author":"Quessy","year":"2021"},{"key":"ref86","volume-title":"Gym JSBSim","author":"Rennie","year":"2018"},{"key":"ref87","article-title":"OpenAI gym","author":"Brockman","year":"2016","journal-title":"arXiv:1606.01540"},{"key":"ref88","volume-title":"OpenAI Baselines","author":"Dhariwal","year":"2017"},{"key":"ref89","volume-title":"Stable Baselines","author":"Hill","year":"2018"},{"issue":"268","key":"ref90","first-page":"1","article-title":"Stable-baselines3: Reliable reinforcement learning implementations","volume":"22","author":"Raffin","year":"2021","journal-title":"J. Mach. Learn. Res."},{"key":"ref91","doi-asserted-by":"publisher","DOI":"10.2514\/6.2004-4923"},{"key":"ref92","volume-title":"X-Plane","year":"2021"},{"key":"ref93","volume-title":"Gym-Fixed-Wing","author":"B\u00f8hn","year":"2019"},{"key":"ref94","volume-title":"Pyfly\u2014Python Fixed Wing Flight Simulator","year":"2019"},{"key":"ref95","volume-title":"Marko-Pilot","author":"Eckstein","year":"2020"},{"key":"ref96","volume-title":"Qplane Fixed Wing Flight Simulation Environment for Reinforcement Learning","author":"Richter","year":"2021"},{"key":"ref97","volume-title":"JSBsim\u2014An Open Source Flight Dynamics & Control Software Library","author":"Coconnier","year":"2011"},{"key":"ref98","first-page":"1","article-title":"The flightgear flight simulator","volume-title":"Proc. USENIX Annu. Tech. Conf.","volume":"686","author":"Perry"},{"key":"ref99","first-page":"621","article-title":"AirSim: High-fidelity visual and physical simulation for autonomous vehicles","volume-title":"Proc. Field Service Robot.","author":"Shah"},{"key":"ref100","volume-title":"Geofs","author":"Tassin","year":"2010"},{"key":"ref101","volume-title":"Reinforcement learning for optimization of nonlinear and predictive control","author":"B\u00f8hn","year":"2022"},{"key":"ref102","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2023.3263430"},{"key":"ref103","volume-title":"On nonlinear and optimization-based control of fixed-wing unmanned aerial vehicles","author":"Reinhardt","year":"2022"},{"key":"ref104","doi-asserted-by":"publisher","DOI":"10.1109\/ICUAS54217.2022.9836064"},{"key":"ref105","volume-title":"Nonlinear attitude and path-following control of fixed-wing aircraft","author":"Coates","year":"2023"},{"key":"ref106","first-page":"1022","article-title":"Automated aircraft recovery via reinforcement learning: Initial experiments","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"10","author":"Monaco"},{"key":"ref107","doi-asserted-by":"publisher","DOI":"10.1016\/B978-1-55860-377-6.50013-X"},{"key":"ref108","article-title":"Multi-player residual advantage learning with general function approximation","author":"Harmon","year":"1996"},{"key":"ref109","doi-asserted-by":"publisher","DOI":"10.1109\/ROBOT.2001.932842"},{"key":"ref110","doi-asserted-by":"crossref","first-page":"363","DOI":"10.1007\/11552246_35","article-title":"Autonomous inverted helicopter flight via reinforcement learning","volume-title":"Experimental Robotics IX","author":"Ng","year":"2006"},{"key":"ref111","doi-asserted-by":"publisher","DOI":"10.7551\/mitpress\/7503.003.0006"},{"key":"ref112","doi-asserted-by":"publisher","DOI":"10.1145\/3447928.3456707"},{"key":"ref113","doi-asserted-by":"publisher","DOI":"10.1109\/LRA.2017.2720851"},{"key":"ref114","article-title":"Machine learning for intelligent control: Application of reinforcement learning techniques to the development of flight control systems for miniature UAV rotorcraft","author":"Hayes","year":"2013"},{"key":"ref115","doi-asserted-by":"publisher","DOI":"10.1109\/CAC48633.2019.8996970"},{"key":"ref116","doi-asserted-by":"publisher","DOI":"10.3390\/aerospace8010018"},{"key":"ref117","first-page":"1928","article-title":"Asynchronous methods for deep reinforcement learning","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Mnih"},{"key":"ref118","doi-asserted-by":"publisher","DOI":"10.3390\/act11120374"},{"key":"ref119","doi-asserted-by":"publisher","DOI":"10.1109\/AERO53065.2022.9843777"},{"key":"ref120","first-page":"1","article-title":"ROS: An open-source robot operating system","volume-title":"Proc. IEEE Int. Conf. Robot. Autom. (ICRA) Workshop Open Source Robot.","author":"Quigley"},{"key":"ref121","doi-asserted-by":"publisher","DOI":"10.1109\/IROS.2004.1389727"},{"key":"ref122","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-98404-5_59"},{"key":"ref123","article-title":"Simulated fixed-wing aircraft attitude control using reinforcement learning methods","author":"Richter","year":"2021"},{"key":"ref124","doi-asserted-by":"publisher","DOI":"10.1109\/SITIS57111.2022.00102"},{"key":"ref125","doi-asserted-by":"publisher","DOI":"10.1007\/978-981-99-0479-2_212"},{"key":"ref126","doi-asserted-by":"crossref","DOI":"10.1137\/1.9780898719376","volume-title":"L1 Adaptive Control Theory: Guaranteed Robustness With Fast Adaptation","author":"Hovakimyan","year":"2010"},{"key":"ref127","doi-asserted-by":"publisher","DOI":"10.1061\/(ASCE)AS.1943-5525.0001381"},{"key":"ref128","doi-asserted-by":"publisher","DOI":"10.1109\/TAES.2024.3351608"},{"key":"ref129","volume-title":"On-Line Q-Learning Using Connectionist Systems","volume":"37","author":"Rummery","year":"1994"},{"key":"ref130","first-page":"1","article-title":"Multi-agent actor-critic for mixed cooperative-competitive environments","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"30","author":"Lowe"},{"key":"ref131","doi-asserted-by":"publisher","DOI":"10.1109\/CCDC52312.2021.9602605"},{"key":"ref132","doi-asserted-by":"publisher","DOI":"10.1109\/ICRAS.2018.8443226"},{"key":"ref133","doi-asserted-by":"publisher","DOI":"10.1007\/s10846-019-01073-3"},{"key":"ref134","doi-asserted-by":"publisher","DOI":"10.1007\/s40430-023-04570-7"},{"key":"ref135","doi-asserted-by":"publisher","DOI":"10.1109\/SMC.2015.335"},{"key":"ref136","doi-asserted-by":"publisher","DOI":"10.1109\/TCYB.2015.2509646"},{"key":"ref137","doi-asserted-by":"publisher","DOI":"10.1016\/B978-1-55860-335-6.50035-0"},{"key":"ref138","doi-asserted-by":"publisher","DOI":"10.1109\/ACC.2013.6580133"},{"key":"ref139","first-page":"64","article-title":"A continuous actor-critic reinforcement learning approach to flocking with fixed-wing UAVs","volume-title":"Proc. Asian Conf. Mach. Learn.","author":"Wang"},{"key":"ref140","doi-asserted-by":"publisher","DOI":"10.1016\/j.robot.2020.103594"},{"key":"ref141","doi-asserted-by":"publisher","DOI":"10.1109\/ADPRL.2007.368199"},{"key":"ref142","doi-asserted-by":"publisher","DOI":"10.1109\/IJCNN.2009.5178745"},{"key":"ref143","doi-asserted-by":"publisher","DOI":"10.1109\/IROS51168.2021.9636183"},{"key":"ref144","doi-asserted-by":"publisher","DOI":"10.1109\/TII.2021.3094207"},{"key":"ref145","doi-asserted-by":"publisher","DOI":"10.1109\/tnnls.2023.3245124"},{"key":"ref146","doi-asserted-by":"publisher","DOI":"10.3390\/drones7010028"},{"key":"ref147","doi-asserted-by":"publisher","DOI":"10.23919\/ChiCC.2019.8866189"},{"key":"ref148","doi-asserted-by":"publisher","DOI":"10.1109\/ICUAS48674.2020.9213987"},{"key":"ref149","article-title":"Robust auto-landing control of an agile regional jet using fuzzy Q-learning","author":"Zahmatkesh","year":"2023","journal-title":"arXiv:2302.10997"},{"key":"ref150","doi-asserted-by":"publisher","DOI":"10.1109\/FUZZY.1997.622790"},{"key":"ref151","doi-asserted-by":"publisher","DOI":"10.2514\/6.2021-1282"},{"key":"ref152","doi-asserted-by":"publisher","DOI":"10.2514\/6.2022-1288"},{"key":"ref153","doi-asserted-by":"publisher","DOI":"10.1016\/j.cja.2021.03.029"},{"key":"ref154","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2023.3321196"},{"key":"ref155","doi-asserted-by":"publisher","DOI":"10.1016\/j.cja.2020.05.001"},{"key":"ref156","doi-asserted-by":"publisher","DOI":"10.1016\/j.ifacol.2022.12.035"},{"key":"ref157","doi-asserted-by":"publisher","DOI":"10.1109\/BDAI56143.2022.9862674"},{"key":"ref158","doi-asserted-by":"publisher","DOI":"10.1109\/CAC57257.2022.10055344"},{"key":"ref159","doi-asserted-by":"publisher","DOI":"10.1016\/j.ast.2022.107623"},{"key":"ref160","doi-asserted-by":"publisher","DOI":"10.3390\/drones7070418"},{"key":"ref161","doi-asserted-by":"publisher","DOI":"10.1007\/s00521-023-08232-6"},{"key":"ref162","volume-title":"Gymnasium","author":"Towers","year":"2023"}],"container-title":["IEEE Access"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/6287639\/10380310\/10609369.pdf?arnumber=10609369","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,1,29]],"date-time":"2025-01-29T19:03:44Z","timestamp":1738177424000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10609369\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024]]},"references-count":162,"URL":"https:\/\/doi.org\/10.1109\/access.2024.3433540","relation":{},"ISSN":["2169-3536"],"issn-type":[{"value":"2169-3536","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024]]}}}