{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,3,26]],"date-time":"2025-03-26T12:00:33Z","timestamp":1742990433291,"version":"3.40.3"},"publisher-location":"Cham","reference-count":39,"publisher":"Springer Nature Switzerland","isbn-type":[{"type":"print","value":"9783031579622"},{"type":"electronic","value":"9783031579639"}],"license":[{"start":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T00:00:00Z","timestamp":1704067200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T00:00:00Z","timestamp":1704067200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2024]]},"DOI":"10.1007\/978-3-031-57963-9_7","type":"book-chapter","created":{"date-parts":[[2024,4,23]],"date-time":"2024-04-23T07:02:12Z","timestamp":1713855732000},"page":"89-100","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["Towards Offline Reinforcement Learning with\u00a0Pessimistic Value Priors"],"prefix":"10.1007","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-9824-9378","authenticated-orcid":false,"given":"Filippo","family":"Valdettaro","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-0813-7207","authenticated-orcid":false,"given":"A. Aldo","family":"Faisal","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,4,24]]},"reference":[{"key":"7_CR1","unstructured":"An, G., Moon, S., Kim, J.-H., Song, H.O.: Uncertainty-based offline reinforcement learning with diversified q-ensemble. In: Advances in Neural Information Processing Systems, vol. 34, pp. 7436\u20137447 (2021)"},{"key":"7_CR2","doi-asserted-by":"crossref","unstructured":"Bachtiger, P., et al.: Artificial intelligence, data sensors and interconnectivity: future opportunities for heart failure. Card. Fail. Rev. 6 (2020)","DOI":"10.15420\/cfr.2019.14"},{"key":"7_CR3","unstructured":"Brandfonbrener, D., Whitney, W., Ranganath, R., Bruna, J.: Offline RL without off-policy evaluation. In: Advances in Neural Information Processing Systems, vol. 34, pp. 4933\u20134946 (2021)"},{"key":"7_CR4","unstructured":"Burt, D.R., Ober, S.W., Garriga-Alonso, A., van der Wilk, M.: Understanding variational inference in function-space. arXiv preprint: arXiv:2011.09421 (2020)"},{"key":"7_CR5","unstructured":"Dasari, S., et al.: RoboNet: Large-scale multi-robot learning. In: Conference on Robot Learning, pp. 885\u2013897. PMLR (2020)"},{"key":"7_CR6","unstructured":"Degris, T., White, M., Sutton, R.S.: Off-policy actor-critic. In: Proceedings of the 29th International Conference on Machine Learning, ICML 2012 (2012)"},{"key":"7_CR7","unstructured":"D\u2019Angelo, F., Fortuin, V.: Repulsive deep ensembles are Bayesian. In:\u00a0Ranzato, M.,\u00a0Beygelzimer, A.,\u00a0Dauphin, Y.,\u00a0Liang, P., Vaughan, J.W. (eds.) Advances in Neural Information Processing Systems, vol.\u00a034, pp. 3451\u20133465. Curran Associates, Inc. (2021)"},{"key":"7_CR8","doi-asserted-by":"crossref","unstructured":"Engel, Y.,\u00a0Mannor, S.,\u00a0Meir, R.: Reinforcement learning with Gaussian processes. In: Proceedings of the 22nd International Conference on Machine Learning, pp. 201\u2013208 (2005)","DOI":"10.1145\/1102351.1102377"},{"key":"7_CR9","unstructured":"Fujimoto, S., Gu, S.S.: A minimalist approach to offline reinforcement learning. In: Advances in Neural Information Processing Systems, vol. 34, pp. 20132\u201320145 (2021)"},{"key":"7_CR10","unstructured":"Fujimoto, S.,\u00a0Meger, D.,\u00a0Precup, D.: Off-policy deep reinforcement learning without exploration. In: International Conference on Machine Learning, pp. 2052\u20132062. PMLR (2019)"},{"key":"7_CR11","unstructured":"Hafner, D.,\u00a0Tran, D.,\u00a0Lillicrap, T.,\u00a0Irpan, A.,\u00a0Davidson, J.: Noise contrastive priors for functional uncertainty. In: Uncertainty in Artificial Intelligence, pp. 905\u2013914. PMLR (2020)"},{"key":"7_CR12","unstructured":"Hensman, J.,\u00a0Fusi, N., Lawrence, N.D.: Gaussian processes for big data. arXiv preprint: arXiv:1309.6835 (2013)"},{"key":"7_CR13","doi-asserted-by":"crossref","unstructured":"Huang, Z.,\u00a0Wu, J.,\u00a0Lv, C.: Efficient deep reinforcement learning with imitative expert priors for autonomous driving. IEEE Trans. Neural Netw. Learn. Syst. (2022)","DOI":"10.1109\/TNNLS.2022.3142822"},{"key":"7_CR14","unstructured":"Kalashnikov, D., et\u00a0al.: Scalable deep reinforcement learning for vision-based robotic manipulation. In: Conference on Robot Learning, pp. 651\u2013673. PMLR (2018)"},{"key":"7_CR15","doi-asserted-by":"crossref","unstructured":"Kendall, A., et a.: Learning to drive in a day. In: 2019 International Conference on Robotics and Automation (ICRA), pp. 8248\u20138254. IEEE (2019)","DOI":"10.1109\/ICRA.2019.8793742"},{"key":"7_CR16","unstructured":"Kidambi, R., Rajeswaran, A., Netrapalli, P., Joachims, T.: MOReL: model-based offline reinforcement learning. In: Advances in Neural Information Processing Systems, vol. 33, pp. 21810\u201321823 (2020)"},{"key":"7_CR17","unstructured":"Kingma, D.P.,\u00a0Ba, J.: Adam: a method for stochastic optimization. arXiv preprint: arXiv:1412.6980 (2014)"},{"issue":"11","key":"7_CR18","doi-asserted-by":"publisher","first-page":"1716","DOI":"10.1038\/s41591-018-0213-5","volume":"24","author":"M Komorowski","year":"2018","unstructured":"Komorowski, M., Celi, L.A., Badawi, O., Gordon, A.C., Faisal, A.A.: The artificial intelligence clinician learns optimal treatment strategies for sepsis in intensive care. Nat. Med. 24(11), 1716\u20131720 (2018)","journal-title":"Nat. Med."},{"key":"7_CR19","unstructured":"Kostrikov, I.,\u00a0Fergus, R.,\u00a0Tompson, J.,\u00a0Nachum, O.: Offline reinforcement learning with fisher divergence critic regularization. In: International Conference on Machine Learning, pp. 5774\u20135783. PMLR (2021)"},{"key":"7_CR20","unstructured":"Kostrikov, I.,\u00a0Nair, A.,\u00a0Levine, S.: Offline reinforcement learning with implicit Q-learning. In: International Conference on Learning Representations (2022)"},{"key":"7_CR21","unstructured":"Kumar, A., Fu, J., Soh, M., Tucker, G., Levine, S.: Stabilizing off-policy Q-learning via bootstrapping error reduction. In: Advances in Neural Information Processing Systems, vol. 32 (2019)"},{"key":"7_CR22","unstructured":"Kumar, A., Zhou, A., Tucker, G., Levine, S.: Conservative q-learning for offline reinforcement learning. In: Advances in Neural Information Processing Systems, vol. 33, pp. 1179\u20131191 (2020)"},{"key":"7_CR23","unstructured":"Lakshminarayanan, B., Pritzel, A., Blundell, C.: Simple and scalable predictive uncertainty estimation using deep ensembles. In: Advances in Neural Information Processing Systems, vol. 30 (2017)"},{"key":"7_CR24","unstructured":"Ma, C., Hern\u00e1ndez-Lobato, J.M.: Functional variational inference based on stochastic process generators. In: Advances in Neural Information Processing Systems, vol. 34, pp. 21795\u201321807 (2021)"},{"key":"7_CR25","unstructured":"Matsushima, T.,\u00a0Furuta, H.,\u00a0Matsuo, Y.,\u00a0Nachum, O.,\u00a0Gu, S.: Deployment-efficient reinforcement learning via model-based offline optimization. In: 9th International Conference on Learning Representations, ICLR 2021, Virtual Event, Austria, May 3-7, 2021. OpenReview.net (2021)"},{"issue":"7540","key":"7_CR26","doi-asserted-by":"publisher","first-page":"529","DOI":"10.1038\/nature14236","volume":"518","author":"V Mnih","year":"2015","unstructured":"Mnih, V., et al.: Human-level control through deep reinforcement learning. Nature 518(7540), 529\u2013533 (2015)","journal-title":"Nature"},{"key":"7_CR27","unstructured":"Ober, S.W., Rasmussen, C.E.,\u00a0van\u00a0der Wilk, M.: The promises and pitfalls of deep kernel learning. In: Uncertainty in Artificial Intelligence, pp. 1206\u20131216. PMLR (2021)"},{"key":"7_CR28","unstructured":"Osband, I.,\u00a0Aslanides, J.,\u00a0Cassirer, A.: Randomized prior functions for deep reinforcement learning. In: Advances in Neural Information Processing Systems, vol. 31 (2018)"},{"key":"7_CR29","unstructured":"Ovadia, Y., et al.: Can you trust your model\u2019s uncertainty? evaluating predictive uncertainty under dataset shift. In: Advances in Neural Information Processing Systems, vol. 32, (2019)"},{"key":"7_CR30","volume-title":"Gaussian Processes for Machine Learning","author":"CE Rasmussen","year":"2006","unstructured":"Rasmussen, C.E., et al.: Gaussian Processes for Machine Learning, vol. 1. Springer, Cham (2006)"},{"key":"7_CR31","unstructured":"Shi, L.,\u00a0Li, G.,\u00a0Wei, Y.,\u00a0Chen, Y.,\u00a0Chi, Y.: Pessimistic Q-learning for offline reinforcement learning: towards optimal sample complexity. In: International Conference on Machine Learning, pp. 19967\u201320025. PMLR (2022)"},{"key":"7_CR32","unstructured":"Sinha, S.,\u00a0Mandlekar, A.,\u00a0Garg, A.: S4rl: surprisingly simple self-supervision for offline reinforcement learning in robotics. In: Conference on Robot Learning, pp. 907\u2013917. PMLR (2022)"},{"key":"7_CR33","unstructured":"Sun, S.,\u00a0Zhang, G.,\u00a0Shi, J.,\u00a0Grosse, R.: Functional variational Bayesian neural networks. arXiv preprint: arXiv:1903.05779 (2019)"},{"key":"7_CR34","unstructured":"Titsias, M.: Variational learning of inducing variables in sparse Gaussian processes. In: Artificial Intelligence and Statistics, pp. 567\u2013574. PMLR (2009)"},{"key":"7_CR35","unstructured":"Touati, A.,\u00a0Satija, H.,\u00a0Romoff, J.,\u00a0Pineau, J.,\u00a0Vincent, P.: Randomized value functions via multiplicative normalizing flows. In: Uncertainty in Artificial Intelligence, pp. 422\u2013432. PMLR (2020)"},{"key":"7_CR36","unstructured":"Van Amersfoort, J., Smith, L., Jesson, A., Key, O., Gal, Y.: On feature collapse and deep kernel learning for single forward pass uncertainty. arXiv preprint: arXiv:2102.11409 (2021)"},{"key":"7_CR37","unstructured":"Wilson, A.G.,\u00a0Hu, Z.,\u00a0Salakhutdinov, R., Xing, E.P.: Deep kernel learning. In: Artificial Intelligence and Statistics, pp. 370\u2013378. PMLR (2016)"},{"key":"7_CR38","doi-asserted-by":"crossref","unstructured":"Xiao, T., Wang, D.: A general offline reinforcement learning framework for interactive recommendation. In: Proceedings of the AAAI Conference on Artificial Intelligence, vol. 35, pp. 4512\u20134520 (2021)","DOI":"10.1609\/aaai.v35i5.16579"},{"key":"7_CR39","unstructured":"Yu, T., et al.: MOPO: model-based offline policy optimization. In: Advances in Neural Information Processing Systems, vol. 33, pp. 14129\u201314142 (2020)"}],"container-title":["Lecture Notes in Computer Science","Epistemic Uncertainty in Artificial Intelligence"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-57963-9_7","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,4,23]],"date-time":"2024-04-23T07:03:40Z","timestamp":1713855820000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-57963-9_7"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024]]},"ISBN":["9783031579622","9783031579639"],"references-count":39,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-57963-9_7","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"type":"print","value":"0302-9743"},{"type":"electronic","value":"1611-3349"}],"subject":[],"published":{"date-parts":[[2024]]},"assertion":[{"value":"24 April 2024","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"Epi UAI","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Workshop on Epistemic Uncertainty in Artificial Intelligence","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Pittsburgh, PA","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"USA","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2023","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"4 August 2023","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"4 August 2023","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"1","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"epiuai2023","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/sites.google.com\/view\/epi-workshop-uai-2023\/home","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}