{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,3,27]],"date-time":"2025-03-27T08:47:25Z","timestamp":1743065245523,"version":"3.40.3"},"publisher-location":"Cham","reference-count":57,"publisher":"Springer Nature Switzerland","isbn-type":[{"type":"print","value":"9783031779404"},{"type":"electronic","value":"9783031779411"}],"license":[{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025]]},"DOI":"10.1007\/978-3-031-77941-1_12","type":"book-chapter","created":{"date-parts":[[2025,1,27]],"date-time":"2025-01-27T11:39:30Z","timestamp":1737977970000},"page":"154-170","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["Approaching Single-Episode Survival Reinforcement Learning with Safety-Threshold Q-Learning"],"prefix":"10.1007","author":[{"given":"Filipo Studzinski","family":"Perotto","sequence":"first","affiliation":[]},{"given":"Melvine","family":"Nargeot","sequence":"additional","affiliation":[]},{"given":"Aymane","family":"Ouahbi","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2025,1,27]]},"reference":[{"key":"12_CR1","unstructured":"Achiam, J., Held, D., Tamar, A., Abbeel, P.: Constrained policy optimization. In: Proceedingss of the $$34^\\text{th}$$ ICML, pp. 22\u201331. PMLR (2017)"},{"key":"12_CR2","doi-asserted-by":"crossref","unstructured":"Alshiekh, M., Bloem, R., Ehlers, R., K\u00f6nighofer, B., Niekum, S., Topcu, U.: Safe reinforcement learning via shielding. In: Proceedings of $$32^\\text{ nd }$$ AAAI, pp. 2669\u20132678 (2018)","DOI":"10.1609\/aaai.v32i1.11797"},{"key":"12_CR3","unstructured":"Altman, E.: Constrained Markov Decision Processes. Chapman & Hall (1999)"},{"issue":"2\u20133","key":"12_CR4","doi-asserted-by":"publisher","first-page":"235","DOI":"10.1023\/A:1013689704352","volume":"47","author":"P Auer","year":"2002","unstructured":"Auer, P., Cesa-Bianchi, N., Fischer, P.: Finite-time analysis of the multiarmed bandit problem. Mach. Learn. 47(2\u20133), 235\u2013256 (2002)","journal-title":"Mach. Learn."},{"key":"12_CR5","doi-asserted-by":"crossref","unstructured":"Auer, P., Ortner, R.: Logarithmic online regret bounds for undiscounted reinforcement learning. In: Advances in Neural Information Processing Systems 19, Proceedings of the $$20^\\text{ th }$$ NeurIPS (2006), pp. 49\u201356. MIT Press (2007)","DOI":"10.7551\/mitpress\/7503.003.0011"},{"issue":"8","key":"12_CR6","doi-asserted-by":"publisher","first-page":"716","DOI":"10.1073\/pnas.38.8.716","volume":"38","author":"R Bellman","year":"1952","unstructured":"Bellman, R.: On the theory of dynamic programming. Proc. Natl. Acad. Sci. U.S.A. 38(8), 716\u2013719 (1952)","journal-title":"Proc. Natl. Acad. Sci. U.S.A."},{"key":"12_CR7","unstructured":"Berkenkamp, F., Turchetta, M., Schoellig, A., Krause, A.: Safe model-based reinforcement learning with stability guarantees. In: Proceedings of the $$30^\\text{ th }$$ NeurIPS (2016), pp. 908\u2013918. Curran (2017)"},{"key":"12_CR8","unstructured":"Bertsekas, D.: Reinforcement Learning and Optimal Control. Athena (2019)"},{"key":"12_CR9","unstructured":"Boutilier, C., Lu, T.: Budget allocation using weakly coupled, constrained markov decision processes. In: Proceedings of $$32^\\text{ nd }$$ UAI, pp. 52\u201361. AUAI Press (2016)"},{"key":"12_CR10","first-page":"213","volume":"3","author":"R Brafman","year":"2002","unstructured":"Brafman, R., Tennenholtz, M.: R-max - a general polynomial time algorithm for near-optimal reinforcement learning. J. Mach. Learn. Res. 3, 213\u2013231 (2002)","journal-title":"J. Mach. Learn. Res."},{"issue":"10","key":"12_CR11","doi-asserted-by":"publisher","first-page":"2813","DOI":"10.1109\/TAC.2014.2314211","volume":"59","author":"C Caramanis","year":"2014","unstructured":"Caramanis, C., Dimitrov, N., Morton, D.: Efficient algorithms for budget-constrained markov decision processes. IEEE Trans. Automat. Contr. 59(10), 2813\u20132817 (2014)","journal-title":"IEEE Trans. Automat. Contr."},{"key":"12_CR12","doi-asserted-by":"crossref","unstructured":"Carpin, S., Chow, Y., Pavone, M.: Risk aversion in finite markov decision processes using total cost criteria and average value at risk. In: Proceedings of ICRA, pp. 335\u2013342. IEEE (2016)","DOI":"10.1109\/ICRA.2016.7487152"},{"key":"12_CR13","unstructured":"Chen, A.S., Sharma, A., Levine, S., Finn, C.: You only live once: Single-life reinforcement learning via learned reward shaping. In: Decision Awareness in Reinforcement Learning Workshop at ICML 2022 (2022)"},{"key":"12_CR14","doi-asserted-by":"crossref","unstructured":"Cheng, R., Orosz, G., Murray, R., Burdick, J.: End-to-end safe reinforcement learning through barrier functions for safety-critical continuous control tasks. In: Proceedings of $$33^\\text{ rd }$$ AAAI, pp. 3387\u20133395 (2019)","DOI":"10.1609\/aaai.v33i01.33013387"},{"issue":"167","key":"12_CR15","first-page":"1","volume":"18","author":"Y Chow","year":"2018","unstructured":"Chow, Y., Ghavamzadeh, M., Janson, L., Pavone, M.: Risk-constrained reinforcement learning with percentile risk criteria. JMLR 18(167), 1\u201351 (2018)","journal-title":"JMLR"},{"key":"12_CR16","unstructured":"Chow, Y., Nachum, O., Duenez-Guzman, E., Ghavamzadeh, M.: A lyapunov-based approach to safe reinforcement learning. In: Proceedings of NeurIPS, vol. 31, pp. 8103-8112. Curran (2018)"},{"key":"12_CR17","unstructured":"Du, Y., Wang, S., Huang, L.: Provably efficient risk-sensitive reinforcement learning: Iterated cvar and worst path. In: The $$11^\\text{ th }$$ Int. Conference on Learning Representations, ICLR 2023 (2023)"},{"issue":"9","key":"12_CR18","doi-asserted-by":"publisher","first-page":"2419","DOI":"10.1007\/s10994-021-05961-4","volume":"110","author":"G Dulac-Arnold","year":"2021","unstructured":"Dulac-Arnold, G., Levine, N., Mankowitz, D.J., Li, J., Paduraru, C., Gowal, S., Hester, T.: Challenges of real-world reinforcement learning: definitions, benchmarks and analysis. Mach. Learn. 110(9), 2419\u20132468 (2021). https:\/\/doi.org\/10.1007\/s10994-021-05961-4","journal-title":"Mach. Learn."},{"key":"12_CR19","unstructured":"Efroni, Y., Mannor, S., Pirotta, M.: Exploration-exploitation in constrained mdps. ArXiv:abs2003.02189 (2020)"},{"issue":"3\u20134","key":"12_CR20","doi-asserted-by":"publisher","first-page":"219","DOI":"10.1561\/2200000071","volume":"11","author":"V Fran\u00e7ois-Lavet","year":"2018","unstructured":"Fran\u00e7ois-Lavet, V., Henderson, P., Islam, R., Bellemare, M.G., Pineau, J.: An introduction to deep reinforcement learning. Found. Trends Mach. Learn. 11(3\u20134), 219\u2013354 (2018)","journal-title":"Found. Trends Mach. Learn."},{"key":"12_CR21","doi-asserted-by":"crossref","unstructured":"Fulton, N., Platzer, A.: Safe reinforcement learning via formal methods: Toward safe control through proof and learning. In: Proceedings of the AAAI Conference on Artificial Intelligence (2018)","DOI":"10.1609\/aaai.v32i1.12107"},{"key":"12_CR22","unstructured":"Garcelon, E., Ghavamzadeh, M., Lazaric, A., Pirotta, M.: Conservative exploration in reinforcement learning. In: Proceedings of the $$23^\\text{ rd }$$ International Conference on Artificial Intelligence and Statistics (AISTATS). Proceedings of Machine Learning Research, vol.\u00a0108, pp. 1431\u20131441. PMLR (2020)"},{"key":"12_CR23","doi-asserted-by":"crossref","unstructured":"Garc\u00eda, J., Shafie, D.: Teaching a humanoid robot to walk faster through safe reinforcement learning. Engineering Applications of Artif. Intel. 88 (2020)","DOI":"10.1016\/j.engappai.2019.103360"},{"key":"12_CR24","first-page":"1437","volume":"16","author":"J Garc\u00eda","year":"2015","unstructured":"Garc\u00eda, J., Fern\u00e1ndez, F.: A comprehensive survey on safe reinforcement learning. JMLR 16, 1437\u20131480 (2015)","journal-title":"JMLR"},{"key":"12_CR25","unstructured":"Gu, S., Yang, L., Du, Y., Chen, G., Walter, F., Wang, J., Yang, Y., Knoll, A.: A review of safe reinforcement learning: Methods, theory and applications (2023)"},{"key":"12_CR26","volume-title":"Dynamic Programming and Markov Processes","author":"R Howard","year":"1960","unstructured":"Howard, R.: Dynamic Programming and Markov Processes. MIT Press, Cambridge, MA (1960)"},{"key":"12_CR27","unstructured":"Jansen, N., K\u00f6nighofer, B., Junges, J., Serban, A., Bloem, R.: Safe reinforcement learning using probabilistic shields. In: $$31^\\text{ st }$$ International Conference on Concurrency Theory (CONCUR 2020). LIPICS: Schloss Dagstuhl (2020)"},{"key":"12_CR28","unstructured":"Kaiser, L., Babaeizadeh, M., et al.: Model based reinforcement learning for atari. In: $$8^\\text{ th }$$ International Conference on Learning Representations, ICLR 2020. OpenReview.net (2020)"},{"issue":"2\u20133","key":"12_CR29","doi-asserted-by":"publisher","first-page":"209","DOI":"10.1023\/A:1017984413808","volume":"49","author":"M Kearns","year":"2002","unstructured":"Kearns, M., Singh, S.: Near-optimal reinforcement learning in polynomial time. Mach. Learn. 49(2\u20133), 209\u2013232 (2002)","journal-title":"Mach. Learn."},{"key":"12_CR30","unstructured":"Lam, T., Verma, A., Low, B.K.H., Jaillet, P.: Risk-aware reinforcement learning with coherent risk measures and non-linear function approximation. In: The $$11^\\text{ th }$$ Int. Conference on Learning Representations, ICLR 2023. OpenReview.net (2023)"},{"key":"12_CR31","doi-asserted-by":"publisher","first-page":"436","DOI":"10.1038\/nature14539","volume":"521","author":"Y LeCun","year":"2015","unstructured":"LeCun, Y., Bengio, Y., Hinton, G.: Deep learning. Nature 521, 436\u2013444 (2015)","journal-title":"Nature"},{"key":"12_CR32","doi-asserted-by":"crossref","unstructured":"Liu, D., Wei, Q., Wang, D., Yang, X., Li, H.: Overview of Adaptive Dynamic Programming, pp. 1\u201333. Springer, Cham (2017)","DOI":"10.1007\/978-3-319-50815-3_1"},{"key":"12_CR33","unstructured":"Liu, Z., Guo, Z., Cen, Z., Zhang, H., Tan, J., Li, B., Zhao, D.: On the robustness of safe reinforcement learning under observational perturbations. In: The $$11^\\text{ th }$$ International Conference on Learning Representations, ICLR 2023. OpenReview.net (2023)"},{"key":"12_CR34","doi-asserted-by":"crossref","unstructured":"L\u00fctjens, B., Everett, M., How, J.P.: Safe reinforcement learning with model uncertainty estimates. In: 2019 International Conference on Robotics and Automation (ICRA), pp. 8662\u20138668. IEEE (2019)","DOI":"10.1109\/ICRA.2019.8793611"},{"key":"12_CR35","doi-asserted-by":"publisher","first-page":"75","DOI":"10.1007\/978-3-030-28619-4_10","volume":"10","author":"A Majumdar","year":"2020","unstructured":"Majumdar, A., Pavone, M.: How should a robot assess risk? towards an axiomatic theory of risk in robotics. Robot. Res. 10, 75\u201384 (2020)","journal-title":"Robot. Res."},{"issue":"2","key":"12_CR36","doi-asserted-by":"publisher","first-page":"117","DOI":"10.1023\/A:1007541107674","volume":"35","author":"N Meuleau","year":"1999","unstructured":"Meuleau, N., Bourgine, P.: Exploration of multi-state environments: local measures and back-propagation of uncertainty. Mach. Learn. 35(2), 117\u2013154 (1999)","journal-title":"Mach. Learn."},{"key":"12_CR37","unstructured":"Miryoosefi, S., Brantley, K., Daume, H., Dudik, M., Schapire, R.: Reinforcement learning with convex constraints. In: Proceedings of NeurIPS, vol. 32. pp. 14093\u201314102. Curran (2019)"},{"key":"12_CR38","unstructured":"Miryoosefi, S., Jin, C.: A simple reward-free approach to constrained reinforcement learning. In: International Confernce on Machine Learning (ICML), vol.\u00a0162, pp. 15666\u201315698 (2022)"},{"key":"12_CR39","doi-asserted-by":"publisher","first-page":"529","DOI":"10.1038\/nature14236","volume":"518","author":"V Mnih","year":"2015","unstructured":"Mnih, V., Kavukcuoglu, K., Silver, D., Graves, A., Antonoglou, I., Wierstra, D., Riedmiller, M.: Human-level control through deep reinforcement learning. Nature 518, 529\u2013533 (2015)","journal-title":"Nature"},{"key":"12_CR40","unstructured":"Mnih, V., et al.: Playing atari with deep reinforcement learning. CoRR ArXiv:abs1312.5602 (2013)"},{"key":"12_CR41","doi-asserted-by":"crossref","unstructured":"Perotto, F.S., Pucel, X., Farges, J.: Time is budget: A heuristic for reducing the risk of ruin in multi-armed gambler bandits. In: Bramer, M., Stahl, F. (eds.) Artificial Intelligence XXXIX - 42nd SGAI International Conference on Artificial Intelligence, AI 2022, Proceedings. LNCS, vol. 13652, pp. 346\u2013352. Springer (2022)","DOI":"10.1007\/978-3-031-21441-7_29"},{"key":"12_CR42","unstructured":"Perotto, F.S., Vakili, S., Gajane, P., Faghan, Y., Bourgais, M.: Gambler bandits and the regret of being ruined. In: Dignum, F., Lomuscio, A., Endriss, U., Now\u00e9, A. (eds.) AAMAS \u201921: $$20^\\text{ th }$$ International Conference on Autonomous Agents and Multiagent Systems, pp. 1664\u20131667. ACM (2021)"},{"key":"12_CR43","unstructured":"Perotto, F., Bourgais, M., Silva, B., Vercouter, L.: Open problem: Risk of ruin in multiarmed bandits. In: Proceedings of COLT, pp. 3194\u20133197 (2019)"},{"issue":"2","key":"12_CR44","doi-asserted-by":"publisher","first-page":"153","DOI":"10.1007\/s10846-017-0468-y","volume":"86","author":"AS Polydoros","year":"2017","unstructured":"Polydoros, A.S., Nalpantidis, L.: Survey of model-based reinforcement learning: Applications on robotics. J. Intell. Robotic Syst. 86(2), 153\u2013173 (2017)","journal-title":"J. Intell. Robotic Syst."},{"key":"12_CR45","doi-asserted-by":"crossref","unstructured":"Poupart, P., Vlassis, N., Hoey, J., Regan, K.: An analytic solution to discrete bayesian reinforcement learning. In: Proceedings of the 23rd ICML, pp. 697\u2013704. ACM (2006)","DOI":"10.1145\/1143844.1143932"},{"key":"12_CR46","doi-asserted-by":"crossref","unstructured":"Puterman, M., Patrick, J.: Dynamic programming. In: Encyclopedia of Machine Learning, pp. 298\u2013308. Springer (2010)","DOI":"10.1007\/978-0-387-30164-8_237"},{"key":"12_CR47","doi-asserted-by":"crossref","unstructured":"Song, Y., Steinweg, M., Kaufmann, E., Scaramuzza, D.: Autonomous drone racing with deep reinforcement learning. In: IEEE\/RSJ International Confernce on Intelligent Robots and Systems, IROS, 2021, pp. 1205\u20131212. IEEE (2021)","DOI":"10.1109\/IROS51168.2021.9636053"},{"key":"12_CR48","unstructured":"Sutton, R.S., Barto, A.G.: Reinforcement Learning: An Introduction. MIT Press, 2 edn. (2018)"},{"key":"12_CR49","first-page":"13859","volume":"34","author":"G Thomas","year":"2021","unstructured":"Thomas, G., Luo, Y., Ma, T.: Safe reinforcement learning by imagining the near future. Adv. Neural. Inf. Process. Syst. 34, 13859\u201313869 (2021)","journal-title":"Adv. Neural. Inf. Process. Syst."},{"key":"12_CR50","first-page":"12151","volume":"33","author":"M Turchetta","year":"2020","unstructured":"Turchetta, M., Kolobov, A., Shah, S., Krause, A., Agarwal, A.: Safe reinforcement learning via curriculum induction. Adv. Neural. Inf. Process. Syst. 33, 12151\u201312162 (2020)","journal-title":"Adv. Neural. Inf. Process. Syst."},{"key":"12_CR51","doi-asserted-by":"crossref","unstructured":"Valencia, D., et al.: Comparison of model-based and model-free reinforcement learning for real-world dexterous robotic manipulation tasks. In: 2023 IEEE International Conference on Robotics and Automation (ICRA), pp. 871\u2013878 (2023)","DOI":"10.1109\/ICRA48891.2023.10160983"},{"key":"12_CR52","unstructured":"Wachi, A., Sui, Y.: Safe reinforcement learning in constrained markov decision processes. In: International Confernce on Machine Learning, pp. 9797\u20139806. PMLR (2020)"},{"key":"12_CR53","unstructured":"Wagener, N.C., Boots, B., Cheng, C.A.: Safe reinforcement learning using advantage-based intervention. In: International Conference on Machine Learning, pp. 10630\u201310640. PMLR (2021)"},{"issue":"3","key":"12_CR54","doi-asserted-by":"publisher","first-page":"279","DOI":"10.1007\/BF00992698","volume":"8","author":"CJCH Watkins","year":"1992","unstructured":"Watkins, C.J.C.H., Dayan, P.: Q-learning. Mach. Learn. 8(3), 279\u2013292 (1992)","journal-title":"Mach. Learn."},{"key":"12_CR55","doi-asserted-by":"crossref","unstructured":"Wiering, M., Otterlo, M.: Reinforcement learning and markov decision processes. In: Reinforcement Learning: State-of-the-Art, pp. 3\u201342. Springer (2012)","DOI":"10.1007\/978-3-642-27645-3_1"},{"key":"12_CR56","doi-asserted-by":"crossref","unstructured":"Wu, D., et al.: Budget constrained bidding by model-free reinforcement learning in display advertising. In: Proceedings of $$27^\\text{ th }$$ CIKM, pp. 1443\u20131451. ACM (2018)","DOI":"10.1145\/3269206.3271748"},{"key":"12_CR57","doi-asserted-by":"crossref","unstructured":"Yang, Y., Vamvoudakis, K.G., Modares, H.: Safe reinforcement learning for dynamical games. Int. J. Robust Nonlinear Contr. 30(9), 3706\u20133726 (2020)","DOI":"10.1002\/rnc.4962"}],"container-title":["Communications in Computer and Information Science","Optimization and Learning"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-77941-1_12","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,1,27]],"date-time":"2025-01-27T11:39:53Z","timestamp":1737977993000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-77941-1_12"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025]]},"ISBN":["9783031779404","9783031779411"],"references-count":57,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-77941-1_12","relation":{},"ISSN":["1865-0929","1865-0937"],"issn-type":[{"type":"print","value":"1865-0929"},{"type":"electronic","value":"1865-0937"}],"subject":[],"published":{"date-parts":[[2025]]},"assertion":[{"value":"27 January 2025","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"OLA","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Conference on Optimization and Learning","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Dubrovnik","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Croatia","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2024","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"13 May 2024","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"15 May 2024","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"7","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"ola2024","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/ola2024.sciencesconf.org","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}