{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,8]],"date-time":"2026-02-08T02:33:42Z","timestamp":1770518022118,"version":"3.49.0"},"reference-count":132,"publisher":"Springer Science and Business Media LLC","issue":"1","license":[{"start":{"date-parts":[[2021,10,29]],"date-time":"2021-10-29T00:00:00Z","timestamp":1635465600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"},{"start":{"date-parts":[[2021,10,29]],"date-time":"2021-10-29T00:00:00Z","timestamp":1635465600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"}],"funder":[{"DOI":"10.13039\/501100000038","name":"Natural Sciences and Engineering Research Council of Canada","doi-asserted-by":"publisher","award":["Discovery Grant 341887"],"award-info":[{"award-number":["Discovery Grant 341887"]}],"id":[{"id":"10.13039\/501100000038","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["SN COMPUT. SCI."],"published-print":{"date-parts":[[2022,1]]},"DOI":"10.1007\/s42979-021-00934-9","type":"journal-article","created":{"date-parts":[[2021,10,29]],"date-time":"2021-10-29T17:03:13Z","timestamp":1635526993000},"update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":12,"title":["Extending the Capabilities of Reinforcement Learning Through Curriculum: A Review of Methods and Applications"],"prefix":"10.1007","volume":"3","author":[{"given":"Kashish","family":"Gupta","sequence":"first","affiliation":[]},{"given":"Debasmita","family":"Mukherjee","sequence":"additional","affiliation":[]},{"given":"Homayoun","family":"Najjaran","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2021,10,29]]},"reference":[{"issue":"6","key":"934_CR1","doi-asserted-by":"publisher","first-page":"981","DOI":"10.1139\/l03-014","volume":"30","author":"B Abdulhai","year":"2003","unstructured":"Abdulhai B, Kattan L. Reinforcement learning: introduction to theory and potential for transport applications. Can J Civ Eng. 2003;30(6):981\u201391. https:\/\/doi.org\/10.1139\/l03-014.","journal-title":"Can J Civ Eng."},{"key":"934_CR2","doi-asserted-by":"publisher","unstructured":"Abdulhai B, Pringle R, Karakoulas GJ. Reinforcement learning for $$True$$ adaptive traffic signal control. J Transport Eng. 2003;129(3):278\u201385. https:\/\/doi.org\/10.1061\/(ASCE)0733-947X(2003)129:3(278).","DOI":"10.1061\/(ASCE)0733-947X(2003)129:3(278)"},{"key":"934_CR3","unstructured":"Achiam J, Sastry S. Surprise-based intrinsic motivation for deep reinforcement learning. 2017. arXiv:1703.01732."},{"key":"934_CR4","doi-asserted-by":"publisher","unstructured":"Allgower EL, Georg K. Numerical continuation methods, Springer series in computational mathematics, vol 13. Berlin: Springer; 1990. https:\/\/doi.org\/10.1007\/978-3-642-61257-2.","DOI":"10.1007\/978-3-642-61257-2"},{"key":"934_CR5","unstructured":"Andrychowicz M, Wolski F, Ray A, Schneider J, Fong R, Welinder P, McGrew B, Tobin J, Abbeel P, Zaremba W. Hindsight experience replay. Tech. rep. 2017. https:\/\/goo.gl\/SMrQnI."},{"key":"934_CR6","doi-asserted-by":"publisher","unstructured":"Barto AG. Intrinsic motivation and reinforcement learning. In: Intrinsically motivated learning in natural and artificial systems, vol 9783642323751. Berlin: Springer; 2013. p. 17\u201347. https:\/\/doi.org\/10.1007\/978-3-642-32375-1_2.","DOI":"10.1007\/978-3-642-32375-1_2"},{"key":"934_CR7","doi-asserted-by":"publisher","unstructured":"Barto AG, Mahadevan S. Recent advances in hierarchical reinforcement learning. 2003. https:\/\/doi.org\/10.1023\/A:1022140919877.","DOI":"10.1023\/A:1022140919877"},{"key":"934_CR8","unstructured":"Bellemare MG, Srinivasan S, Ostrovski G, Schaul T, Saxton D, Deepmind G, Munos R. Unifying count-based exploration and intrinsic motivation. Tech. rep. 2016."},{"key":"934_CR9","doi-asserted-by":"publisher","unstructured":"Bengio Y , Louradour J, Collobert R, Weston J. Curriculum learning. In: ACM international conference proceeding series, vol 382. New York: ACM Press; 2009. p. 1\u20138. https:\/\/doi.org\/10.1145\/1553374.1553380.","DOI":"10.1145\/1553374.1553380"},{"key":"934_CR10","unstructured":"Berseth G, Xie C, Cernek P, Van de Panne M. Progressive reinforcement learning with distillation for multi-skilled motion control. In: 6th international conference on learning representations, ICLR 2018\u2014conference track proceedings. 2018. arXiv:1802.04765"},{"key":"934_CR11","doi-asserted-by":"publisher","first-page":"68","DOI":"10.1016\/s2212-5671(12)00122-0","volume":"3","author":"F Bertoluzzo","year":"2012","unstructured":"Bertoluzzo F, Corazza M. Testing different reinforcement learning configurations for financial trading: introduction and applications. Proc Econ Finance. 2012;3:68\u201377. https:\/\/doi.org\/10.1016\/s2212-5671(12)00122-0.","journal-title":"Proc Econ Finance."},{"key":"934_CR12","doi-asserted-by":"publisher","DOI":"10.3390\/en11102604","author":"A Boodi","year":"2018","unstructured":"Boodi A, Beddiar K, Benamour M, Amirat Y, Benbouzid M. Intelligent systems for building energy and occupant comfort optimization: a state of the art review and recommendations. Energies. 2018. https:\/\/doi.org\/10.3390\/en11102604.","journal-title":"Energies."},{"key":"934_CR13","doi-asserted-by":"publisher","DOI":"10.1016\/j.conb.2012.05.008","author":"MM Botvinick","year":"2012","unstructured":"Botvinick MM. Hierarchical reinforcement learning and decision making. Curr Opin Neurobiol. 2012. https:\/\/doi.org\/10.1016\/j.conb.2012.05.008.","journal-title":"Curr Opin Neurobiol."},{"key":"934_CR14","unstructured":"Boyan JA, Moore AW. Generalization in reinforcement learning: safely approximating the value function. Tech. rep."},{"key":"934_CR15","unstructured":"Brockman G, Cheung V, Pettersson L, Schneider J, Schulman J, Tang J, Zaremba W. Openai gym. 2016."},{"key":"934_CR16","doi-asserted-by":"publisher","unstructured":"Bu X, Rao J, Xu CZ. A reinforcement learning approach to online web systems auto-configuration. In: Proceedings\u2014international conference on distributed computing systems. 2009. p. 2\u201311. https:\/\/doi.org\/10.1109\/ICDCS.2009.76.","DOI":"10.1109\/ICDCS.2009.76"},{"key":"934_CR17","unstructured":"Burda Y, Edwards H, Pathak D, Storkey A, Darrell T, Efros AA. Large-scale study of curiosity-driven learning. In: 7th international conference on learning representations, ICLR 2019. 2018. arXiv:1808.04355."},{"key":"934_CR18","doi-asserted-by":"publisher","unstructured":"Chebotar Y, Handa A, Makoviychuk V, MacKlin M, Issac J, Ratliff N, Fox D. Closing the sim-to-real loop: adapting simulation randomization with real world experience. In: Proceedings\u2014IEEE international conference on robotics and automation, vol 2019-May. Institute of Electrical and Electronics Engineers Inc.; 2019. p. 8973\u20138979. https:\/\/doi.org\/10.1109\/ICRA.2019.8793789.","DOI":"10.1109\/ICRA.2019.8793789"},{"key":"934_CR19","doi-asserted-by":"publisher","first-page":"144","DOI":"10.1016\/j.neucom.2018.07.061","volume":"316","author":"X Chen","year":"2018","unstructured":"Chen X, Chen Y, Gupta K, Zhou J, Najjaran H. Slicenet: a proficient model for real-time 3d shape-based recognition. Neurocomputing. 2018;316:144\u201355.","journal-title":"Neurocomputing."},{"key":"934_CR20","doi-asserted-by":"crossref","unstructured":"Clement B, Roy D, Oudeyer PY, Lopes M. Multi-armed bandits for intelligent tutoring systems. 2013. arXiv:1310.3174.","DOI":"10.1109\/DEVLRN.2014.6983019"},{"key":"934_CR21","unstructured":"Czarnecki WM, Jayakumar SM, Jadcrbcrg M, Hasenclever L, Tch YW, Osindero S, Heess N, Pascanu R. Mix & match\u2014agent curricula for reinforcement learning. In: 35th international conference on machine learning, ICML 2018, vol 3. International Machine Learning Society (IMLS): 2018. p. 1761\u201373."},{"issue":"7","key":"934_CR22","doi-asserted-by":"publisher","first-page":"2686","DOI":"10.1016\/j.buildenv.2006.07.010","volume":"42","author":"K Dalamagkidis","year":"2007","unstructured":"Dalamagkidis K, Kolokotsa D, Kalaitzakis K, Stavrakakis GS. Reinforcement learning for energy conservation and comfort in buildings. Build Environ. 2007;42(7):2686\u201398. https:\/\/doi.org\/10.1016\/j.buildenv.2006.07.010.","journal-title":"Build Environ."},{"key":"934_CR23","doi-asserted-by":"publisher","unstructured":"Deisenroth MP, Englert P, Peters J, Fox D. Multi-task policy search for robotics. In: Proceedings\u2014IEEE international conference on robotics and automation. Institute of Electrical and Electronics Engineers Inc.: 2014. p. 3876\u201381. https:\/\/doi.org\/10.1109\/ICRA.2014.6907421.","DOI":"10.1109\/ICRA.2014.6907421"},{"key":"934_CR24","unstructured":"Duan Y, Chen X, Edu CXB, Schulman J, Abbeel P, Edu PB. Benchmarking deep reinforcement learning for continuous control. Tech. rep. 2016. https:\/\/github.com\/."},{"issue":"1","key":"934_CR25","doi-asserted-by":"publisher","first-page":"71","DOI":"10.1016\/0010-0277(93)90058-4","volume":"48","author":"JL Elman","year":"1993","unstructured":"Elman JL. Learning and development in neural networks: the importance of starting small. Cognition. 1993;48(1):71\u201399. https:\/\/doi.org\/10.1016\/0010-0277(93)90058-4.","journal-title":"Cognition."},{"key":"934_CR26","doi-asserted-by":"publisher","unstructured":"Eppe M, Magg S, Wermter S. Curriculum goal masking for continuous deep reinforcement learning. In: 2019 Joint IEEE 9th international conference on development and learning and epigenetic robotics, ICDL-EpiRob 2019. Institute of Electrical and Electronics Engineers Inc.; 2019. p. 183\u201388. https:\/\/doi.org\/10.1109\/DEVLRN.2019.8850721.","DOI":"10.1109\/DEVLRN.2019.8850721"},{"key":"934_CR27","unstructured":"Fang M, Zhou T, Du Y, Han L, Zhang Z. Curriculum-guided hindsight experience replay. In: Advances in neural information processing systems, vol 32. 2019. p. 12623\u2013634. https:\/\/github.com\/mengf1\/CHER."},{"key":"934_CR28","doi-asserted-by":"publisher","unstructured":"Ferro N, Maistro M, Lucchese C, Perego R. Continuation methods and curriculum learning for learning to rank. In: International conference on information and knowledge management, proceedings. New York: Association for Computing Machinery; 2018. p. 1523\u201326. https:\/\/doi.org\/10.1145\/3269206.3269239.","DOI":"10.1145\/3269206.3269239"},{"key":"934_CR29","unstructured":"Florensa C, Held D, Geng X, Abbeel P. Automatic goal generation for reinforcement learning agents. In: 35th international conference on machine learning, ICML 2018, vol 4. 2017. p. 2458\u201371. arXiv:1705.06366."},{"key":"934_CR30","unstructured":"Florensa C, Held D, Wulfmeier M, Zhang M, Abbeel P. Reverse curriculum generation for reinforcement learning. 2017. arXiv:1707.05300."},{"key":"934_CR31","unstructured":"Forestier S, Mollard Y, Oudeyer PY. Intrinsically motivated goal exploration processes with automatic curriculum learning. 2017. arXiv:1708.02190."},{"key":"934_CR32","unstructured":"Fournier P, Sigaud O, Chetouani M, Oudeyer PY. Accuracy-based curriculum learning in deep reinforcement learning. 2018. arXiv:1806.09614."},{"key":"934_CR33","doi-asserted-by":"publisher","unstructured":"Frank M, Leitner J, Stollenga M, F\u00f6rster A, Schmidhuber J. Curiosity driven reinforcement learning for motion planning on humanoids. Front Neurorobot. 7(JAN), 25 (2014). https:\/\/doi.org\/10.3389\/fnbot.2013.00025.","DOI":"10.3389\/fnbot.2013.00025"},{"key":"934_CR34","unstructured":"Fu J, Luo K, Levine S. Learning robust rewards with adversarial inverse reinforcement learning. In: 6th international conference on learning representations, ICLR 2018\u2014conference track proceedings. 2017. arXiv:1710.11248"},{"issue":"7","key":"934_CR35","doi-asserted-by":"publisher","first-page":"3249","DOI":"10.1109\/TIP.2016.2563981","volume":"25","author":"C Gong","year":"2016","unstructured":"Gong C, Tao D, Maybank SJ, Liu W, Kang G, Yang J. Multi-modal curriculum learning for semi-supervised image classification. IEEE Trans Image Process. 2016;25(7):3249\u201360. https:\/\/doi.org\/10.1109\/TIP.2016.2563981.","journal-title":"IEEE Trans Image Process."},{"key":"934_CR36","doi-asserted-by":"publisher","unstructured":"Goodfellow IJ, Pouget-Abadie J, Mirza M, Xu B, Warde-Farley D, Ozair S, Courville A, Bengio Y. Generative adversarial nets. In: Advances in neural information processing systems, vol 3. Neural information processing systems foundation; 2014. p. 2672\u201380. https:\/\/doi.org\/10.3156\/jsoft.29.5_177_2.","DOI":"10.3156\/jsoft.29.5_177_2"},{"key":"934_CR37","unstructured":"Gosavi A. Reinforcement learning: a tutorial survey and recent advances. Tech. rep."},{"key":"934_CR38","unstructured":"Graves A, Bellemare MG, Menick J, Munos R, Kavukcuoglu K. Automated curriculum learning for neural networks. In: 34th international conference on machine learning, ICML 2017, vol 3. 2017. p. 2120\u201329. arXiv:1704.03003."},{"key":"934_CR39","doi-asserted-by":"crossref","unstructured":"Guo S, Huang W, Zhang H, Zhuang C, Dong D, Scott MR, Huang D. Curriculumnet: weakly supervised learning from large-scale web images. In: Proceedings of the European conference on computer vision (ECCV). 2018. p. 135\u201350.","DOI":"10.1007\/978-3-030-01249-6_9"},{"key":"934_CR40","unstructured":"Guo X, Singh S, Lee H, Lewis R, Wang X. Deep learning for real-time atari game play using offline Monte-Carlo tree search planning. Tech. rep. 2014."},{"key":"934_CR41","doi-asserted-by":"publisher","unstructured":"Gupta JK, Egorov M, Kochenderfer M. cooperative multi-agent control using deep reinforcement learning. In: Lecture notes in computer science (including subseries lecture notes in artificial intelligence and lecture notes in bioinformatics), vol 10642 LNAI. Springer; 2017. p. 66\u201383. https:\/\/doi.org\/10.1007\/978-3-319-71682-4_5.","DOI":"10.1007\/978-3-319-71682-4_5"},{"issue":"1","key":"934_CR42","volume":"6","author":"K Gupta","year":"2021","unstructured":"Gupta K, Najjaran H. Curriculum-based deep reinforcement learning for adaptive robotics: a mini-review. Int J Robot Eng. 2021;6(1): 102231.","journal-title":"Int J Robot Eng."},{"key":"934_CR43","unstructured":"Haarnoja T, Zhou A, Hartikainen K, Tucker G, Ha S, Tan J, Kumar V, Zhu H, Gupta A, Abbeel P, Levine S. Soft actor-critic algorithms and applications. 2018. arXiv:1812.05905."},{"key":"934_CR44","doi-asserted-by":"publisher","unstructured":"Han M, May R, Zhang X, Wang X, Pan S, Yan D, Jin Y, Xu L. A review of reinforcement learning methodologies for controlling occupant comfort in buildings. 2019. https:\/\/doi.org\/10.1016\/j.scs.2019.101748.","DOI":"10.1016\/j.scs.2019.101748"},{"key":"934_CR45","unstructured":"Heess N, TB D, Sriram S, Lemmon J, Merel J, Wayne G, Tassa Y, Erez T, Wang Z, Eslami SMA, Riedmiller M, Silver D. Emergence of locomotion behaviours in rich environments. 2017. arXiv:1707.02286."},{"key":"934_CR46","doi-asserted-by":"crossref","unstructured":"Henderson P, Islam R, Bachman P, Pineau J, Precup D, Meger D. Deep reinforcement learning that matters. Tech. rep. 2018. www.aaai.org.","DOI":"10.1609\/aaai.v32i1.11694"},{"key":"934_CR47","doi-asserted-by":"crossref","unstructured":"Ie E, Jain V, Wang J, Narvekar S, Agarwal R, Wu R, Cheng HT, Chandra T, Boutilier C. SlateQ: a tractable decomposition for reinforcement learning with recommendation sets. 2019.","DOI":"10.24963\/ijcai.2019\/360"},{"key":"934_CR48","unstructured":"Ioffe S, Szegedy C. Batch normalization: accelerating deep network training by reducing internal covariate shift. In: 32nd international conference on machine learning, ICML 2015, vol 1. International Machine Learning Society (IMLS); 2015. p. 448\u201356."},{"key":"934_CR49","doi-asserted-by":"publisher","unstructured":"Ivanovic B, Harrison J, Sharma A, Chen M, Pavone M. BaRC: Backward reachability curriculum for robotic reinforcement learning. In: Proceedings\u2014IEEE international conference on robotics and automation, vol 2019-May. Institute of Electrical and Electronics Engineers Inc.; 2019. p. 15\u201321. https:\/\/doi.org\/10.1109\/ICRA.2019.8794206.","DOI":"10.1109\/ICRA.2019.8794206"},{"key":"934_CR50","unstructured":"Jaderberg M, Mnih V, Czarnecki WM, Schaul T, Leibo JZ, Silver D, Kavukcuoglu K. Reinforcement learning with unsupervised auxiliary tasks. In: 5th international conference on learning representations, ICLR 2017\u2014conference track proceedings. 2016. arXiv:1611.05397."},{"key":"934_CR51","doi-asserted-by":"publisher","unstructured":"Jiang Z, Liang J. Cryptocurrency portfolio management with deep reinforcement learning. In: 2017 intelligent systems conference, IntelliSys 2017, vol 2018-January. Institute of Electrical and Electronics Engineers Inc.; 2018. p. 905\u2013913. https:\/\/doi.org\/10.1109\/IntelliSys.2017.8324237.","DOI":"10.1109\/IntelliSys.2017.8324237"},{"key":"934_CR52","doi-asserted-by":"crossref","unstructured":"Jin J, Song C, Li H, Gai K, Wang J, Zhang W. Real-time bidding with multi-agent reinforcement learning in display advertising. Tech. rep. 2018.","DOI":"10.1145\/3269206.3272021"},{"key":"934_CR53","doi-asserted-by":"publisher","unstructured":"Justesen N, Risi S. Automated curriculum learning by rewarding temporally rare events. In: ieee conference on computatonal intelligence and games, CIG, vol 2018-August. IEEE Computer Society (2018). https:\/\/doi.org\/10.1109\/CIG.2018.8490448.","DOI":"10.1109\/CIG.2018.8490448"},{"key":"934_CR54","unstructured":"Kaiser L, Babaeizadeh M, Milos P, Osinski B, Campbell RH, Czechowski K, Erhan D, Finn C, Kozakowski P, Levine S, Mohiuddin A, Sepassi R, Tucker G, Michalewski H. Model-based reinforcement learning for atari (2019). arXiv:1903.00374."},{"key":"934_CR55","doi-asserted-by":"publisher","unstructured":"Kappen HJ. An introduction to stochastic control theory, path integrals and reinforcement learning. In: AIP conference proceedings, vol 887. AIP; 2007. p. 149\u201381. https:\/\/doi.org\/10.1063\/1.2709596.","DOI":"10.1063\/1.2709596"},{"key":"934_CR56","doi-asserted-by":"publisher","unstructured":"Karatzoglou A, Baltrunas L, Shi Y. Learning to rank for recommender systems. In: RecSys 2013\u2014proceedings of the 7th ACM conference on recommender systems. New York: ACM Press; 2013. p. 493\u201394. https:\/\/doi.org\/10.1145\/2507157.2508063.","DOI":"10.1145\/2507157.2508063"},{"key":"934_CR57","doi-asserted-by":"publisher","unstructured":"Karpathy A, Van De Panne M. Curriculum learning for motor skills. In: Lecture notes in computer science (including subseries lecture notes in artificial intelligence and lecture notes in bioinformatics), vol 7310 LNAI. Berlin: Springer; 2012. p. 325\u201330. https:\/\/doi.org\/10.1007\/978-3-642-30353-1_31.","DOI":"10.1007\/978-3-642-30353-1_31"},{"key":"934_CR58","doi-asserted-by":"publisher","unstructured":"Kempka M, Wydmuch M, Runc G, Toczek J, Jaskowski W. ViZDoom: a doom-based AI research platform for visual reinforcement learning. In: IEEE conference on computatonal intelligence and games, CIG. IEEE Computer Society; 2016. https:\/\/doi.org\/10.1109\/CIG.2016.7860433.","DOI":"10.1109\/CIG.2016.7860433"},{"issue":"1","key":"934_CR59","doi-asserted-by":"publisher","first-page":"42","DOI":"10.1016\/j.arcontrol.2012.03.004","volume":"36","author":"SG Khan","year":"2012","unstructured":"Khan SG, Herrmann G, Lewis FL, Pipe T, Melhuish C. Reinforcement learning and optimal adaptive control: an overview and implementation examples. Annu Rev Control. 2012;36(1):42\u201359. https:\/\/doi.org\/10.1016\/j.arcontrol.2012.03.004.","journal-title":"Annu Rev Control."},{"issue":"11","key":"934_CR60","doi-asserted-by":"publisher","first-page":"1238","DOI":"10.1177\/0278364913495721","volume":"32","author":"J Kober","year":"2013","unstructured":"Kober J, Bagnell JA, Peters J. Reinforcement learning in robotics: a survey. Int J Robot Res. 2013;32(11):1238\u201374.","journal-title":"Int J Robot Res."},{"key":"934_CR61","unstructured":"Kong X, Xin B, Liu F, Wang Y. Revisiting the master-slave architecture in multi-agent deep reinforcement learning. 2017. arXiv:1712.07305."},{"key":"934_CR62","unstructured":"Konidaris G, Barto A. Skill discovery in continuous reinforcement learning domains using skill chaining. Tech. rep. 2009."},{"issue":"3","key":"934_CR63","doi-asserted-by":"publisher","first-page":"122","DOI":"10.3390\/robotics2030122","volume":"2","author":"P Kormushev","year":"2013","unstructured":"Kormushev P, Calinon S, Caldwell D. Reinforcement learning in robotics: applications and real-world challenges. Robotics. 2013;2(3):122\u201348.","journal-title":"Robotics."},{"key":"934_CR64","doi-asserted-by":"publisher","first-page":"133","DOI":"10.1016\/j.energy.2013.05.060","volume":"59","author":"E Kuznetsova","year":"2013","unstructured":"Kuznetsova E, Li YF, Ruiz C, Zio E, Ault G, Bell K. Reinforcement learning for microgrid energy management. Energy. 2013;59:133\u201346. https:\/\/doi.org\/10.1016\/j.energy.2013.05.060.","journal-title":"Energy."},{"key":"934_CR65","unstructured":"Lakshminarayanan AS, Krishnamurthy R, Kumar P, Ravindran B. Option discovery in hierarchical reinforcement learning using spatio-temporal clustering. 2016. arXiv:1605.05359."},{"key":"934_CR66","unstructured":"Leno Da Silva F, Glatt R, Reali Costa AH. Simultaneously learning and advising in multiagent reinforcement learning. Tech. rep. www.ifaamas.org."},{"key":"934_CR67","unstructured":"Leno Da Silva F, Reali Costa AH. Object-oriented curriculum generation for reinforcement learning. In: Proceedings of the 17th international conference on autonomous agents and multiagent systems, Stockholm, Sweden. 2018. p. 1026\u201334."},{"issue":"3","key":"934_CR68","doi-asserted-by":"publisher","first-page":"247","DOI":"10.1109\/JAS.2016.7508798","volume":"3","author":"L Li","year":"2016","unstructured":"Li L, Lv Y, Wang FY. Traffic signal timing via deep reinforcement learning. IEEE\/CAA J Autom Sin. 2016;3(3):247\u201354. https:\/\/doi.org\/10.1109\/JAS.2016.7508798.","journal-title":"IEEE\/CAA J Autom Sin."},{"key":"934_CR69","doi-asserted-by":"crossref","unstructured":"Li Y. Reinforcement learning applications. 2019. arXiv:1908.06973.","DOI":"10.1201\/9781351006620-3"},{"key":"934_CR70","unstructured":"Liang Y, Machado MC, Talvitie E, Bowling M. State of the art control of atari games using shallow reinforcement learning. In: Proceedings of the international joint conference on autonomous agents and multiagent systems, AAMAS. 2015. p. 485\u201393. arXiv:1512.01563."},{"key":"934_CR71","unstructured":"Lillicrap TP, Hunt JJ, Pritzel A, Heess N, Erez T, Tassa Y, Silver D, Wierstra D. Continuous control with deep reinforcement learning. In: 4th international conference on learning representations, ICLR 2016\u2014conference track proceedings (2016)"},{"key":"934_CR72","unstructured":"Liu H, Trott A, Socher R, Xiong C. Competitive experience replay. In: 7th international conference on learning representations, ICLR 2019. 2019. arXiv:1902.00528"},{"issue":"4","key":"934_CR73","doi-asserted-by":"publisher","first-page":"1497","DOI":"10.1109\/TMECH.2017.2707338","volume":"22","author":"T Liu","year":"2017","unstructured":"Liu T, Hu X, Li SE, Cao D. Reinforcement learning optimized look-ahead energy management of a parallel hybrid electric vehicle. IEEE\/ASME Trans Mechatron. 2017;22(4):1497\u2013507. https:\/\/doi.org\/10.1109\/TMECH.2017.2707338.","journal-title":"IEEE\/ASME Trans Mechatron."},{"key":"934_CR74","doi-asserted-by":"publisher","unstructured":"Lopes M, Oudeyer PY, The strategic student approach for life-long exploration and learning. In: 2012 IEEE international conference on development and learning and epigenetic robotics, ICDL 2012. 2012. https:\/\/doi.org\/10.1109\/DevLrn.2012.6400807.","DOI":"10.1109\/DevLrn.2012.6400807"},{"key":"934_CR75","doi-asserted-by":"publisher","unstructured":"Mahadevan S, Connell J. Scaling reinforcement learning to robotics by exploiting the subsumption architecture. In: Machine learning proceedings 1991. Elsevier; 1991. p. 328\u201332. https:\/\/doi.org\/10.1016\/b978-1-55860-200-7.50068-4.","DOI":"10.1016\/b978-1-55860-200-7.50068-4"},{"key":"934_CR76","doi-asserted-by":"publisher","unstructured":"Mahmood T, Ricci F. Learning and adaptivity in interactive recommender systems. In: ACM international conference proceeding series, vol 258. New York: ACM Press; 2007. p. 75\u201384. https:\/\/doi.org\/10.1145\/1282100.1282114.","DOI":"10.1145\/1282100.1282114"},{"key":"934_CR77","doi-asserted-by":"publisher","unstructured":"Mahmood T, Ricci F. Improving recommender systems with adaptive conversational strategies. In: Proceedings of the 20th ACM conference on hypertext and hypermedia, HT\u201909. New York: ACM Press; 2009. p. 73\u201382. https:\/\/doi.org\/10.1145\/1557914.1557930.","DOI":"10.1145\/1557914.1557930"},{"key":"934_CR78","unstructured":"Matiisen T, Oliver A, Cohen T, Schulman J. Teacher-student curriculum learning. IEEE Trans Neural Netw Learn Syst. 2017. arXiv:1707.00183."},{"key":"934_CR79","unstructured":"Mirowski P, Pascanu R, Viola F, Soyer H, Ballard AJ, Banino A, Denil M, Goroshin R, Sifre L, Kavukcuoglu K, Kumaran D, Hadsell R. Learning to navigate in complex environments. In: 5th International conference on learning representations, ICLR 2017\u2014conference track proceedings. 2019."},{"key":"934_CR80","unstructured":"Mnih V, Kavukcuoglu K, Silver D, Graves A, Antonoglou I, Wierstra D, Riedmiller M. Playing atari with deep reinforcement learning. 2013. arXiv:1312.5602."},{"key":"934_CR81","unstructured":"Mnih V, Puigdom\u00e8nech Badia A, Mirza M, Graves A, Harley T, Lillicrap,TP, Silver D, Kavukcuoglu K. Asynchronous methods for deep reinforcement learning. Tech. rep. 2016."},{"key":"934_CR82","doi-asserted-by":"publisher","DOI":"10.1016\/j.rcim.2021.102231","volume":"73","author":"D Mukherjee","year":"2022","unstructured":"Mukherjee D, Gupta K, Chang LH, Najjaran H. A survey of robot learning strategies for human-robot collaboration in industrial settings. Robot Comput Integr Manuf. 2022;73: 102231.","journal-title":"Robot Comput Integr Manuf."},{"key":"934_CR83","doi-asserted-by":"publisher","unstructured":"Nevmyvaka Y, Yi F, Kearns M. Reinforcement learning for optimized trade execution. In: ACM international conference proceeding series, vol 148. New York: ACM Press; 2006. p. 673\u201380. https:\/\/doi.org\/10.1145\/1143844.1143929. http:\/\/portal.acm.org\/citation.cfm?doid=1143844.1143929.","DOI":"10.1145\/1143844.1143929"},{"issue":"3","key":"934_CR84","doi-asserted-by":"publisher","first-page":"139","DOI":"10.1016\/j.jmp.2008.12.005","volume":"53","author":"Y Niv","year":"2009","unstructured":"Niv Y. Reinforcement learning in the brain. J Math Psychol. 2009;53(3):139\u201354. https:\/\/doi.org\/10.1016\/j.jmp.2008.12.005.","journal-title":"J Math Psychol."},{"issue":"1","key":"934_CR85","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1186\/s13321-017-0235-x","volume":"9","author":"M Olivecrona","year":"2017","unstructured":"Olivecrona M, Blaschke T, Engkvist O, Chen H. Molecular de-novo design through deep reinforcement learning. J Cheminform. 2017;9(1):1\u201314. https:\/\/doi.org\/10.1186\/s13321-017-0235-x.","journal-title":"J Cheminform."},{"key":"934_CR86","doi-asserted-by":"publisher","unstructured":"O\u2019Neill D, Levorato M, Goldsmith A, Mitra U. Residential demand response using reinforcement learning. Institute of Electrical and Electronics Engineers (IEEE); 2010. p. 409\u201314. https:\/\/doi.org\/10.1109\/smartgrid.2010.5622078.","DOI":"10.1109\/smartgrid.2010.5622078"},{"key":"934_CR87","unstructured":"Parisotto E, Salakhutdinov R. Neural map: structured memory for deep reinforcement learning. In: 6th international conference on learning representations, ICLR 2018\u2014conference track proceedings. 2017. arXiv:1702.08360."},{"key":"934_CR88","doi-asserted-by":"publisher","unstructured":"Peng XB, Andrychowicz M, Zaremba W, Abbeel P. Sim-to-real transfer of robotic control with dynamics randomization. In: Proceedings\u2014IEEE international conference on robotics and automation. Institute of Electrical and Electronics Engineers Inc.; 2018. p. 3803\u201310. https:\/\/doi.org\/10.1109\/ICRA.2018.8460528.","DOI":"10.1109\/ICRA.2018.8460528"},{"issue":"7\u20139","key":"934_CR89","doi-asserted-by":"publisher","first-page":"1180","DOI":"10.1016\/j.neucom.2007.11.026","volume":"71","author":"J Peters","year":"2008","unstructured":"Peters J, Schaal S. Natural actor-critic. Neurocomputing. 2008;71(7\u20139):1180\u201390. https:\/\/doi.org\/10.1016\/j.neucom.2007.11.026.","journal-title":"Neurocomputing."},{"key":"934_CR90","unstructured":"Plappert M, Andrychowicz M, Ray A, McGrew B, Baker B, Powell G, Schneider J, Tobin J, Chociej M, Welinder P, Kumar V, Zaremba W. Multi-goal reinforcement learning: challenging robotics environments and request for research. 2018. arXiv:1802.09464."},{"key":"934_CR91","doi-asserted-by":"publisher","unstructured":"Popova M, Isayev O, Tropsha A. Deep reinforcement learning for de novo drug design. Sci Adv 4(7), eaap7885 (2018). https:\/\/doi.org\/10.1126\/sciadv.aap7885","DOI":"10.1126\/sciadv.aap7885"},{"key":"934_CR92","unstructured":"Portelas R, Colas C, Hofmann K, Oudeyer PY. Teacher algorithms for curriculum learning of Deep RL in continuously parameterized environments. 2019. arXiv:1910.07224."},{"issue":"7383","key":"934_CR93","doi-asserted-by":"publisher","first-page":"268","DOI":"10.1136\/bmj.326.7383.268","volume":"326","author":"D Prideaux","year":"2003","unstructured":"Prideaux D. Curriculum design. BMJ. 2003;326(7383):268. https:\/\/doi.org\/10.1136\/bmj.326.7383.268.","journal-title":"BMJ."},{"key":"934_CR94","doi-asserted-by":"publisher","unstructured":"Qiao Z, Muelling K, Dolan JM, Palanisamy P, Mudalige P. Automatically generated curriculum based reinforcement learning for autonomous vehicles in urban environment. In: IEEE intelligent vehicles symposium, proceedings, vol 2018-June. Institute of Electrical and Electronics Engineers Inc.; 2018. p. 1233\u201338. https:\/\/doi.org\/10.1109\/IVS.2018.8500603.","DOI":"10.1109\/IVS.2018.8500603"},{"key":"934_CR95","doi-asserted-by":"publisher","unstructured":"Ranasinghe N, Shen WM. Surprise-based learning for developmental robotics. In: Proceedings of the 2008 ECSIS symposium on learning and adaptive behaviors for robotic systems, LAB-RS 2008. 2018. p. 65\u201370. https:\/\/doi.org\/10.1109\/LAB-RS.2008.18.","DOI":"10.1109\/LAB-RS.2008.18"},{"issue":"6","key":"934_CR96","doi-asserted-by":"publisher","first-page":"2216","DOI":"10.1109\/TNNLS.2018.2790981","volume":"29","author":"Z Ren","year":"2018","unstructured":"Ren Z, Dong D, Li H, Chen C. Self-paced prioritized curriculum learning with coverage penalty in deep reinforcement learning. IEEE Trans Neural Netw Learn Syst. 2018;29(6):2216\u201326. https:\/\/doi.org\/10.1109\/TNNLS.2018.2790981.","journal-title":"IEEE Trans Neural Netw Learn Syst."},{"key":"934_CR97","unstructured":"Rosenfeld A, Taylor ME, Kraus S. Speeding up tabular reinforcement learning using state-action similarities. Tech. rep. www.ifaamas.org."},{"key":"934_CR98","unstructured":"Rusu AA, Vecerik M, Roth\u00f6rl T, Heess N, Pascanu R, Hadsell R. Sim-to-real robot learning from pixels with progressive nets. 2016. arXiv:1610.04286."},{"key":"934_CR99","doi-asserted-by":"publisher","unstructured":"Saito A. Curriculum learning based on reward sparseness for deep reinforcement learning of task completion dialogue management. 2018. p. 46\u201351. https:\/\/doi.org\/10.18653\/V1\/W18-5707.","DOI":"10.18653\/V1\/W18-5707"},{"key":"934_CR100","unstructured":"Schaul T, Quan J, Antonoglou I, Silver D. Prioritized experience replay. In: 4th international conference on learning representations, ICLR 2016\u2014conference track proceedings."},{"key":"934_CR101","doi-asserted-by":"publisher","unstructured":"Shantia A, Begue E, Wiering M. Connectionist reinforcement learning for intelligent unit micro management in StarCraft. In: Proceedings of the international joint conference on neural networks. 2011. p. 1794\u20131801. https:\/\/doi.org\/10.1109\/IJCNN.2011.6033442.","DOI":"10.1109\/IJCNN.2011.6033442"},{"issue":"1","key":"934_CR102","doi-asserted-by":"publisher","first-page":"73","DOI":"10.1109\/tetci.2018.2823329","volume":"3","author":"K Shao","year":"2018","unstructured":"Shao K, Zhu Y, Zhao D. StarCraft micromanagement with reinforcement learning and curriculum transfer learning. IEEE Trans Emerg Top Comput Intell. 2018;3(1):73\u201384. https:\/\/doi.org\/10.1109\/tetci.2018.2823329.","journal-title":"IEEE Trans Emerg Top Comput Intell."},{"key":"934_CR103","unstructured":"Silver D, Heess N, Degris T, Wierstra D, Riedmiller M. Deterministic policy gradient algorithms. Tech. rep."},{"issue":"6419","key":"934_CR104","doi-asserted-by":"publisher","first-page":"1140","DOI":"10.1126\/science.aar6404","volume":"362","author":"D Silver","year":"2018","unstructured":"Silver D, Hubert T, Schrittwieser J, Antonoglou I, Lai M, Guez A, Lanctot M, Sifre L, Kumaran D, Graepel T, Lillicrap T, Simonyan K, Hassabis D. A general reinforcement learning algorithm that masters chess, shogi, and Go through self-play. Science. 2018;362(6419):1140\u20134. https:\/\/doi.org\/10.1126\/science.aar6404.","journal-title":"Science."},{"issue":"3","key":"934_CR105","doi-asserted-by":"publisher","first-page":"139","DOI":"10.1007\/s12064-011-0142-z","volume":"131","author":"S Still","year":"2012","unstructured":"Still S, Precup D. An information-theoretic approach to curiosity-driven reinforcement learning. Theory Biosci. 2012;131(3):139\u201348. https:\/\/doi.org\/10.1007\/s12064-011-0142-z.","journal-title":"Theory Biosci."},{"key":"934_CR106","unstructured":"Sukhbaatar S, Lin Z, Kostrikov I, Synnaeve G, Szlam A, Fergus R. Intrinsic motivation and automatic curricula via asymmetric self-play. In: 6th international conference on learning representations, ICLR 2018\u2014conference track proceedings. 2017. arXiv:1703.05407."},{"key":"934_CR107","unstructured":"Sukhbaatar S, Szlam A, Synnaeve G, Chintala S, Fergus R. MazeBase: a sandbox for learning from games. 2015. arXiv:1511.07401."},{"key":"934_CR108","doi-asserted-by":"publisher","unstructured":"Sutton RS, Barto AG. Reinforcement learning: an introduction (second edition). 2018. https:\/\/doi.org\/10.1007\/978-3-540-29678-2_6199.","DOI":"10.1007\/978-3-540-29678-2_6199"},{"issue":"1","key":"934_CR109","doi-asserted-by":"publisher","first-page":"181","DOI":"10.1016\/S0004-3702(99)00052-1","volume":"112","author":"RS Sutton","year":"1999","unstructured":"Sutton RS, Precup D, Singh S. Between MDPs and semi-MDPs: a framework for temporal abstraction in reinforcement learning. Artif Intell. 1999;112(1):181\u2013211. https:\/\/doi.org\/10.1016\/S0004-3702(99)00052-1.","journal-title":"Artif Intell."},{"key":"934_CR110","unstructured":"Svetlik M, Leonetti M, Sinapov J, Shah R, Walker N, Stone P. Automatic curriculum graph generation for reinforcement learning agents. Tech. rep. www.aaai.org."},{"key":"934_CR111","doi-asserted-by":"publisher","unstructured":"Szepesv\u00e1ri C. Algorithms for reinforcement learning. In: Synthesis lectures on artificial intelligence and machine learning, vol 9. Morgan & Claypool Publishers; 2010. p. 1\u201389. https:\/\/doi.org\/10.2200\/S00268ED1V01Y201005AIM009.","DOI":"10.2200\/S00268ED1V01Y201005AIM009"},{"key":"934_CR112","doi-asserted-by":"publisher","unstructured":"Taghipour N, Kardan A, Ghidary SS. Usage-based web recommendations: a reinforcement learning approach. In: RecSys\u201907: proceedings of the 2007 ACM conference on recommender systems. New York: ACM Press; 2007. p. 113\u201320. https:\/\/doi.org\/10.1145\/1297231.1297250.","DOI":"10.1145\/1297231.1297250"},{"key":"934_CR113","doi-asserted-by":"crossref","unstructured":"Tan J, Zhang T, Coumans E, Iscen A, Bai Y, Hafner D, Bohez S, Vanhoucke V. Sim-to-real: learning agile locomotion for quadruped robots. 2018. arXiv:1804.10332.","DOI":"10.15607\/RSS.2018.XIV.010"},{"key":"934_CR114","doi-asserted-by":"crossref","unstructured":"Tavares A, On LCIC. Undefined 2018: tabular reinforcement learning in real-time strategy games via options. https:\/\/ieeexplore.ieee.org\/abstract\/document\/8490427\/.","DOI":"10.1109\/CIG.2018.8490427"},{"key":"934_CR115","unstructured":"Torrey L, Taylor ME. Teaching on a budget: agents advising agents in reinforcement learning. In: International conference on autonomous agents and multiagent systems (AAMAS). 2013."},{"key":"934_CR116","unstructured":"Tu K, Honavar V. On the utility of curricula in unsupervised learning of probabilistic grammars. In: Twenty-second international joint conference on artificial intelligence. 2011."},{"key":"934_CR117","unstructured":"Van Der Linden R, Lopes R, Bidarra R. Designing procedurally generated levels. Tech. rep. 2013. www.aaai.org."},{"key":"934_CR118","unstructured":"Van Hasselt H. Double Q-learning. Tech. rep. 2010."},{"key":"934_CR119","doi-asserted-by":"crossref","unstructured":"Van Hasselt H, Guez A, Silver D. Deep reinforcement learning with double Q-learning. Tech. rep. 2016. www.aaai.org.","DOI":"10.1609\/aaai.v30i1.10295"},{"key":"934_CR120","unstructured":"Vecerik M, Hester T, Scholz J, Wang F, Pietquin O, Piot B, Heess N, Roth\u00f6rl T, Lampe T, Riedmiller M. Leveraging demonstrations for deep reinforcement learning on robotics problems with sparse rewards. 2017. arXiv:1707.08817."},{"key":"934_CR121","unstructured":"Vezhnevets AS, Osindero S, Schaul T, Heess N, Jaderberg M, Silver D, Kavukcuoglu K. FeUdal networks for hierarchical reinforcement learning. In: 34th international conference on machine learning. ICML 2017, vol 7. 2017. p. 5409\u201318. arXiv:1703.01161."},{"key":"934_CR122","unstructured":"Vinyals O, Ewalds T, Bartunov S, Georgiev P, Vezhnevets AS, Yeo M, Makhzani A, K\u00fcttler H, Agapiou J, Schrittwieser J, Quan J, Gaffney S, Petersen S, Simonyan K, Schaul T, van Hasselt H, Silver D, Lillicrap T, Calderone K, Keet P, Brunasso A, Lawrence D, Ekermo A, Repp J, Tsing R. StarCraft II: a new challenge for reinforcement learning. 2017. arXiv:1708.04782"},{"key":"934_CR123","unstructured":"Wang Z, Schaul T, Hessel M, Van Hasselt H, Lanctot M, De Frcitas N. Dueling network architectures for deep reinforcement learning. In: 33rd international conference on machine learning, ICML 2016, vol 4. International Machine Learning Society (IMLS); 2016. p. 2939\u201347."},{"key":"934_CR124","doi-asserted-by":"publisher","unstructured":"Wender S, Watson I. Applying reinforcement learning to small scale combat in the real-time strategy game StarCraft:Broodwar. In: 2012 IEEE conference on computational intelligence and games, CIG 2012. 2012. p. 402\u201308. https:\/\/doi.org\/10.1109\/CIG.2012.6374183.","DOI":"10.1109\/CIG.2012.6374183"},{"key":"934_CR125","unstructured":"White A, Modayil J, Sutton RS. Surprise and curiosity for big data robotics. Tech. rep. 2014. www.aaai.org."},{"key":"934_CR126","unstructured":"Wu Y, Tian, Y. Training agent for first-person shooter game with actor-critic curriculum learning. Tech. rep. http:\/\/vizdoom.cs.put.edu.pl\/competition-cig-2016\/results."},{"key":"934_CR127","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1016\/j.ins.2013.08.037","volume":"261","author":"X Xu","year":"2014","unstructured":"Xu X, Zuo L, Huang Z. Reinforcement learning algorithms with function approximation: recent advances and applications. Inf Sci. 2014;261:1\u201331. https:\/\/doi.org\/10.1016\/j.ins.2013.08.037.","journal-title":"Inf Sci."},{"key":"934_CR128","doi-asserted-by":"publisher","first-page":"577","DOI":"10.1016\/j.apenergy.2015.07.050","volume":"156","author":"L Yang","year":"2015","unstructured":"Yang L, Nagy Z, Goffin P, Schlueter A. Reinforcement learning for optimal control of low exergy buildings. Appl Energy. 2015;156:577\u201386. https:\/\/doi.org\/10.1016\/j.apenergy.2015.07.050.","journal-title":"Appl Energy."},{"key":"934_CR129","doi-asserted-by":"publisher","unstructured":"Zheng G, Zhang F, Zheng Z, Xiang Y, Yuan NJ, Xie X, Li Z. DRN: a deep reinforcement learning framework for news recommendation. 2018. https:\/\/doi.org\/10.1145\/3178876.3185994.","DOI":"10.1145\/3178876.3185994"},{"issue":"12","key":"934_CR130","doi-asserted-by":"publisher","first-page":"1337","DOI":"10.1021\/acscentsci.7b00492","volume":"3","author":"Z Zhou","year":"2017","unstructured":"Zhou Z, Li X, Zare RN. Optimizing chemical reactions with deep reinforcement learning. ACS Cent Sci. 2017;3(12):1337\u201344. https:\/\/doi.org\/10.1021\/acscentsci.7b00492.","journal-title":"ACS Cent Sci."},{"key":"934_CR131","doi-asserted-by":"crossref","unstructured":"Zhu X, Goldberg AB. Introduction to semi-supervised learning. Synthesis lectures on artificial intelligence and machine learning, vol 3, no 1. 2009. p. 1\u2013130.","DOI":"10.2200\/S00196ED1V01Y200906AIM006"},{"key":"934_CR132","unstructured":"Zimmer M, Viappiani P, Weng P. Teacher-student framework: a reinforcement learning approach. Tech. rep. 2014. https:\/\/hal.archives-ouvertes.fr\/hal-01215273."}],"container-title":["SN Computer Science"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s42979-021-00934-9.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s42979-021-00934-9\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s42979-021-00934-9.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,1,14]],"date-time":"2023-01-14T04:41:49Z","timestamp":1673671309000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s42979-021-00934-9"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2021,10,29]]},"references-count":132,"journal-issue":{"issue":"1","published-print":{"date-parts":[[2022,1]]}},"alternative-id":["934"],"URL":"https:\/\/doi.org\/10.1007\/s42979-021-00934-9","relation":{},"ISSN":["2662-995X","2661-8907"],"issn-type":[{"value":"2662-995X","type":"print"},{"value":"2661-8907","type":"electronic"}],"subject":[],"published":{"date-parts":[[2021,10,29]]},"assertion":[{"value":"26 May 2021","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"8 October 2021","order":2,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"29 October 2021","order":3,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors declare that they have no conflict of interest.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}}],"article-number":"28"}}