{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,31]],"date-time":"2026-03-31T06:10:09Z","timestamp":1774937409431,"version":"3.50.1"},"reference-count":47,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"3","license":[{"start":{"date-parts":[[2021,7,1]],"date-time":"2021-07-01T00:00:00Z","timestamp":1625097600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2021,7,1]],"date-time":"2021-07-01T00:00:00Z","timestamp":1625097600000},"content-version":"am","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2021,7,1]],"date-time":"2021-07-01T00:00:00Z","timestamp":1625097600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2021,7,1]],"date-time":"2021-07-01T00:00:00Z","timestamp":1625097600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/100000001","name":"National Science Foundation","doi-asserted-by":"publisher","award":["CNS 1837515"],"award-info":[{"award-number":["CNS 1837515"]}],"id":[{"id":"10.13039\/100000001","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Automat. Sci. Eng."],"published-print":{"date-parts":[[2021,7]]},"DOI":"10.1109\/tase.2020.3043636","type":"journal-article","created":{"date-parts":[[2021,1,5]],"date-time":"2021-01-05T22:59:30Z","timestamp":1609887570000},"page":"917-931","source":"Crossref","is-referenced-by-count":25,"title":["An Ergodic Measure for Active Learning From Equilibrium"],"prefix":"10.1109","volume":"18","author":[{"ORCID":"https:\/\/orcid.org\/0000-0003-0299-1760","authenticated-orcid":false,"given":"Ian","family":"Abraham","sequence":"first","affiliation":[]},{"given":"Ahalya","family":"Prabhakar","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-2262-8176","authenticated-orcid":false,"given":"Todd D.","family":"Murphey","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1109\/TRO.2019.2923880"},{"key":"ref38","author":"sutton","year":"2018","journal-title":"Reinforcement Learning An Introduction"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1016\/0362-546X(83)90049-4"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1016\/j.robot.2014.03.001"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1007\/978-1-4471-0807-8_40"},{"key":"ref30","author":"kullback","year":"1997","journal-title":"Information Theory and Statistics"},{"key":"ref37","author":"bishop","year":"2006","journal-title":"Pattern Recognition and Machine Learning"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1109\/CCA.2001.973983"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1145\/192115.192132"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.29007\/k34m"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1007\/s10514-016-9613-x"},{"key":"ref40","article-title":"Online feedback control for input-saturated robotic systems on lie groups","author":"fan","year":"2017","journal-title":"arXiv 1709 00376"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2017.7989041"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.15607\/RSS.2018.XIV.010"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2017.7989186"},{"key":"ref14","first-page":"908","article-title":"Safe model-based reinforcement learning with stability guarantees","author":"berkenkamp","year":"2017","journal-title":"Proc Adv Neural Inf Process Syst"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/CCTA.2017.8062715"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/IRDS.2002.1041446"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1007\/BF01834206"},{"key":"ref18","first-page":"417","article-title":"Off-policy temporal-difference learning with function approximation","author":"precup","year":"2001","journal-title":"Proc ICML"},{"key":"ref19","article-title":"Continuous control with deep reinforcement learning","author":"lillicrap","year":"2015","journal-title":"arXiv 1509 02971"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/ACC.2013.6580484"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/IROS.2017.8206410"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1016\/j.physd.2010.10.010"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/TRO.2017.2766265"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.15607\/RSS.2018.XIV.045"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1214\/aoms\/1177729694"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/LRA.2017.2654542"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1016\/j.jfranklin.2014.01.002"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/LRA.2018.2849588"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/TRO.2015.2500441"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/IROS.2010.5649089"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1007\/s10957-007-9305-y"},{"key":"ref46","article-title":"Risk-aware active inverse reinforcement learning","author":"brown","year":"2019","journal-title":"arXiv 1901 02161"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1613\/jair.301"},{"key":"ref45","article-title":"Batch active preference-based learning of reward functions","author":"biyik","year":"2018","journal-title":"arXiv 1810 04303"},{"key":"ref22","first-page":"2951","article-title":"Practical Bayesian optimization of machine learning algorithms","author":"snoek","year":"2012","journal-title":"Proc Adv Neural Inf Process Syst"},{"key":"ref47","author":"anderson","year":"2007","journal-title":"Optimal Control Linear Quadratic Methods"},{"key":"ref21","article-title":"A tutorial on Bayesian optimization","author":"frazier","year":"2018","journal-title":"arXiv 1807 02811"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1109\/TASE.2016.2594147"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-29363-9_26"},{"key":"ref41","first-page":"34","article-title":"Improving PILCO with Bayesian neural network dynamics models","volume":"4","author":"gal","year":"2016","journal-title":"Proc Data-Efficient Mach Learn Workshop (ICML)"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1007\/s10472-015-9463-9"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2016.7487277"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1201\/9780203026786"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.1103\/PhysRev.36.823"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1016\/j.neucom.2010.06.033"}],"container-title":["IEEE Transactions on Automation Science and Engineering"],"original-title":[],"link":[{"URL":"https:\/\/ieeexplore.ieee.org\/ielam\/8856\/9470960\/9312988-aam.pdf","content-type":"application\/pdf","content-version":"am","intended-application":"syndication"},{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/8856\/9470960\/09312988.pdf?arnumber=9312988","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,5,10]],"date-time":"2022-05-10T14:52:31Z","timestamp":1652194351000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9312988\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2021,7]]},"references-count":47,"journal-issue":{"issue":"3"},"URL":"https:\/\/doi.org\/10.1109\/tase.2020.3043636","relation":{},"ISSN":["1545-5955","1558-3783"],"issn-type":[{"value":"1545-5955","type":"print"},{"value":"1558-3783","type":"electronic"}],"subject":[],"published":{"date-parts":[[2021,7]]}}}