{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,10,24]],"date-time":"2025-10-24T08:23:28Z","timestamp":1761294208676,"version":"3.40.3"},"publisher-location":"Cham","reference-count":27,"publisher":"Springer International Publishing","isbn-type":[{"type":"print","value":"9783030598532"},{"type":"electronic","value":"9783030598549"}],"license":[{"start":{"date-parts":[[2020,1,1]],"date-time":"2020-01-01T00:00:00Z","timestamp":1577836800000},"content-version":"tdm","delay-in-days":0,"URL":"http:\/\/www.springer.com\/tdm"},{"start":{"date-parts":[[2020,1,1]],"date-time":"2020-01-01T00:00:00Z","timestamp":1577836800000},"content-version":"vor","delay-in-days":0,"URL":"http:\/\/www.springer.com\/tdm"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2020]]},"DOI":"10.1007\/978-3-030-59854-9_3","type":"book-chapter","created":{"date-parts":[[2020,11,2]],"date-time":"2020-11-02T23:02:42Z","timestamp":1604358162000},"page":"18-26","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":2,"title":["$$\\mathsf {SafePILCO}$$: A Software Tool for Safe and Data-Efficient Policy Synthesis"],"prefix":"10.1007","author":[{"given":"Kyriakos","family":"Polymenakos","sequence":"first","affiliation":[]},{"given":"Nikitas","family":"Rontsis","sequence":"additional","affiliation":[]},{"given":"Alessandro","family":"Abate","sequence":"additional","affiliation":[]},{"given":"Stephen","family":"Roberts","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2020,11,3]]},"reference":[{"key":"3_CR1","unstructured":"Abadi, M., et al.: TensorFlow: Large-scale machine learning on heterogeneous systems (2015). https:\/\/www.tensorflow.org\/. software available from tensorflow.org"},{"key":"3_CR2","unstructured":"Berkenkamp, F., Krause, A., Schoellig, A.P.: Bayesian optimization with safety constraints: safe and automatic parameter tuning in robotics. CoRR abs\/1602.04450 (2016). http:\/\/arxiv.org\/abs\/1602.04450"},{"key":"3_CR3","unstructured":"Brockman, G., et al.: OpenAI Gym. arXiv preprint arXiv:1606.01540 (2016)"},{"key":"3_CR4","doi-asserted-by":"crossref","unstructured":"Cauchi, N., Abate, A.: Benchmarks for cyber-physical systems: a modular model library for building automation systems. In: Proceedings of ADHS, pp. 49\u201354 (2018)","DOI":"10.1016\/j.ifacol.2018.08.009"},{"key":"3_CR5","doi-asserted-by":"crossref","unstructured":"Chatzilygeroudis, K., Rama, R., Kaushik, R., Goepp, D., Vassiliades, V., Mouret, J.B.: Black-box data-efficient policy search for robotics. In: 2017 IEEE\/RSJ International Conference on Intelligent Robots and Systems (IROS), pp. 51\u201358. IEEE (2017)","DOI":"10.1109\/IROS.2017.8202137"},{"key":"3_CR6","unstructured":"Chua, K., Calandra, R., McAllister, R., Levine, S.: Deep reinforcement learning in a handful of trials using probabilistic dynamics models. In: Advances in Neural Information Processing Systems, pp. 4754\u20134765 (2018)"},{"key":"3_CR7","unstructured":"Deisenroth, M.P.: Efficient reinforcement learning using Gaussian processes. Ph.D. thesis, Karlsruhe Institute of Technology (2010)"},{"key":"3_CR8","doi-asserted-by":"crossref","unstructured":"Deisenroth, M.P., Englert, P., Peters, J., Fox, D.: Multi-task policy search for robotics. In: 2014 IEEE International Conference on Robotics and Automation (ICRA), pp. 3876\u20133881. IEEE (2014)","DOI":"10.1109\/ICRA.2014.6907421"},{"key":"3_CR9","doi-asserted-by":"crossref","unstructured":"Deisenroth, M.P., Neumann, G., Peters, J., et al.: A survey on policy search for robotics. Found. Trends$$\\textregistered $$ Robot. 2(1\u20132), 1\u2013142 (2013)","DOI":"10.1561\/2300000021"},{"key":"3_CR10","doi-asserted-by":"crossref","unstructured":"Deisenroth, M.P., Rasmussen, C.E., Fox, D.: Learning to control a low-cost manipulator using data-efficient reinforcement learning. In: Robotics: Science and Systems (2011)","DOI":"10.15607\/RSS.2011.VII.008"},{"key":"3_CR11","unstructured":"Deisenroth, M.P., Rasmussen, C.E.: PILCO: a model-based and data-efficient approach to policy search. In: In Proceedings of the International Conference on Machine Learning (2011)"},{"key":"3_CR12","unstructured":"Duan, Y., Chen, X., Houthooft, R., Schulman, J., Abbeel, P.: Benchmarking deep reinforcement learning for continuous control. In: International Conference on Machine Learning (ICML), pp. 1329\u20131338 (2016)"},{"key":"3_CR13","doi-asserted-by":"crossref","unstructured":"Duivenvoorden, R.R., Berkenkamp, F., Carion, N., Krause, A., Schoellig, A.P.: Constrained Bayesian optimization with particle swarms for safe adaptive controller tuning. In: Proceedings of the IFAC (International Federation of Automatic Control) World Congress, pp. 12306\u201312313 (2017)","DOI":"10.1016\/j.ifacol.2017.08.1991"},{"key":"3_CR14","unstructured":"Haarnoja, T., Zhou, A., Abbeel, P., Levine, S.: Soft actor-critic: off-policy maximum entropy deep reinforcement learning with a stochastic actor. arXiv preprint arXiv:1801.01290 (2018)"},{"key":"3_CR15","doi-asserted-by":"crossref","unstructured":"Koller, T., Berkenkamp, F., Turchetta, M., Krause, A.: Learning-based model predictive control for safe exploration. In: 2018 IEEE Conference on Decision and Control (CDC), pp. 6059\u20136066. IEEE (2018)","DOI":"10.1109\/CDC.2018.8619572"},{"issue":"1","key":"3_CR16","first-page":"1334","volume":"17","author":"S Levine","year":"2016","unstructured":"Levine, S., Finn, C., Darrell, T., Abbeel, P.: End-to-end training of deep visuomotor policies. J. Mach. Learn. Res. 17(1), 1334\u20131373 (2016)","journal-title":"J. Mach. Learn. Res."},{"key":"3_CR17","doi-asserted-by":"crossref","unstructured":"Mataric, M.J.: Reward functions for accelerated learning. In: Machine Learning Proceedings 1994, pp. 181\u2013189. Elsevier (1994)","DOI":"10.1016\/B978-1-55860-335-6.50030-1"},{"key":"3_CR18","unstructured":"Matthews, A.G.d.G., et al.: GPflow: a Gaussian process library using TensorFlow. J. Mach. Learn. Res. 18(40), 1\u20136 (2017). http:\/\/jmlr.org\/papers\/v18\/16-537.html"},{"issue":"7540","key":"3_CR19","doi-asserted-by":"publisher","first-page":"529","DOI":"10.1038\/nature14236","volume":"518","author":"V Mnih","year":"2015","unstructured":"Mnih, V., et al.: Human-level control through deep reinforcement learning. Nature 518(7540), 529\u2013533 (2015)","journal-title":"Nature"},{"key":"3_CR20","unstructured":"Ng, A.Y., Jordan, M.I.: Shaping and policy search in reinforcement learning. Ph.D. thesis, University of California, Berkeley Berkeley (2003)"},{"key":"3_CR21","unstructured":"Polymenakos, K., Abate, A., Roberts, S.: Safe policy search using Gaussian process models. In: Proceedings of the 18th International Conference on Autonomous Agents and MultiAgent Systems, pp. 1565\u20131573. International Foundation for Autonomous Agents and Multiagent Systems (2019)"},{"key":"3_CR22","volume-title":"Gaussian Processes for Machine Learning","author":"CE Rasmussen","year":"2006","unstructured":"Rasmussen, C.E., Williams, C.K.I.: Gaussian Processes for Machine Learning. MIT Press, Cambridge (2006)"},{"key":"3_CR23","unstructured":"Sui, Y., Gotovos, A., Burdick, J., Krause, A.: Safe exploration for optimization with Gaussian processes. In: Proceedings of The 32nd International Conference on Machine Learning, pp. 997\u20131005 (2015)"},{"key":"3_CR24","doi-asserted-by":"publisher","first-page":"164","DOI":"10.1109\/TPAMI.2018.2879335","volume":"42","author":"J Vinogradska","year":"2018","unstructured":"Vinogradska, J., Bischoff, B., Achterhold, J., Koller, T., Peters, J.: Numerical quadrature for probabilistic policy search. IEEE Trans. Pattern Anal. Mach. Intell. 42, 164\u2013175 (2018)","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"3_CR25","unstructured":"Vinogradska, J., Bischoff, B., Nguyen-Tuong, D., Romer, A., Schmidt, H., Peters, J.: Stability of controllers for gaussian process forward models. In: Proceedings of The 33rd International Conference on Machine Learning, pp. 545\u2013554 (2016)"},{"key":"3_CR26","unstructured":"Vuong, T.L., Tran, K.: Uncertainty-aware model-based policy optimization. arXiv preprint arXiv:1906.10717 (2019)"},{"key":"3_CR27","unstructured":"Wang, T., et al.: Benchmarking model-based reinforcement learning (2019)"}],"container-title":["Lecture Notes in Computer Science","Quantitative Evaluation of Systems"],"original-title":[],"language":"en","link":[{"URL":"http:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-030-59854-9_3","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2020,12,17]],"date-time":"2020-12-17T16:10:09Z","timestamp":1608221409000},"score":1,"resource":{"primary":{"URL":"http:\/\/link.springer.com\/10.1007\/978-3-030-59854-9_3"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2020]]},"ISBN":["9783030598532","9783030598549"],"references-count":27,"URL":"https:\/\/doi.org\/10.1007\/978-3-030-59854-9_3","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"type":"print","value":"0302-9743"},{"type":"electronic","value":"1611-3349"}],"subject":[],"published":{"date-parts":[[2020]]},"assertion":[{"value":"3 November 2020","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"QEST","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Conference on Quantitative Evaluation of Systems","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Vienna","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Austria","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2020","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"31 August 2020","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"3 September 2020","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"17","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"qest2020","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"http:\/\/www.qest.org\/qest2020\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Single-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"easychair","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"42","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"12","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"7","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"29% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3,10","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"4,06","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Yes","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}