{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,20]],"date-time":"2026-01-20T06:56:58Z","timestamp":1768892218895,"version":"3.49.0"},"reference-count":31,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"4","license":[{"start":{"date-parts":[[2021,10,1]],"date-time":"2021-10-01T00:00:00Z","timestamp":1633046400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2021,10,1]],"date-time":"2021-10-01T00:00:00Z","timestamp":1633046400000},"content-version":"am","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2021,10,1]],"date-time":"2021-10-01T00:00:00Z","timestamp":1633046400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2021,10,1]],"date-time":"2021-10-01T00:00:00Z","timestamp":1633046400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100008982","name":"National Science Foundation","doi-asserted-by":"publisher","award":["SHF-2048094"],"award-info":[{"award-number":["SHF-2048094"]}],"id":[{"id":"10.13039\/501100008982","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100008982","name":"National Science Foundation","doi-asserted-by":"publisher","award":["CCF-1837131"],"award-info":[{"award-number":["CCF-1837131"]}],"id":[{"id":"10.13039\/501100008982","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100008982","name":"National Science Foundation","doi-asserted-by":"publisher","award":["CNS-1932620"],"award-info":[{"award-number":["CNS-1932620"]}],"id":[{"id":"10.13039\/501100008982","id-type":"DOI","asserted-by":"publisher"}]},{"name":"Toyota R&amp;D"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Robot. Autom. Lett."],"published-print":{"date-parts":[[2021,10]]},"DOI":"10.1109\/lra.2021.3092676","type":"journal-article","created":{"date-parts":[[2021,6,28]],"date-time":"2021-06-28T21:08:21Z","timestamp":1624914501000},"page":"6250-6257","source":"Crossref","is-referenced-by-count":24,"title":["Learning From Demonstrations Using Signal Temporal Logic in Stochastic and Continuous Domains"],"prefix":"10.1109","volume":"6","author":[{"ORCID":"https:\/\/orcid.org\/0000-0003-0010-9789","authenticated-orcid":false,"given":"Aniruddh G.","family":"Puranic","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-4683-5540","authenticated-orcid":false,"given":"Jyotirmoy V.","family":"Deshmukh","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-1081-250X","authenticated-orcid":false,"given":"Stefanos","family":"Nikolaidis","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2017\/426"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2018.8460811"},{"key":"ref10","article-title":"Learning from demonstrations using signal temporal logic","author":"puranic","year":"0","journal-title":"Proc Conf Robot Learn"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1007\/s10703-017-0286-7"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1016\/j.tcs.2009.06.021"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1007\/s10703-018-0319-x"},{"key":"ref14","first-page":"92","article-title":"Robust satisfaction of temporal logic over real-valued signals","author":"donz\u00e9","year":"0","journal-title":"Proc Int Conf Formal Model Anal Timed Syst"},{"key":"ref15","first-page":"167","article-title":"Breach, a toolbox for verification and parameter synthesis of hybrid systems","author":"donz\u00e9","year":"0","journal-title":"Proc Int Conf Comput Aided Verification"},{"key":"ref16","article-title":"Openai Gym","author":"brockman","year":"2016","journal-title":"CoRR"},{"key":"ref17","first-page":"2613","article-title":"Double q-learning","author":"hasselt","year":"0","journal-title":"Proc Int Conf Neural Inf Process"},{"key":"ref18","article-title":"Modeling purposeful adaptive behavior with the principle of maximum causal entropy","author":"ziebart","year":"2010"},{"key":"ref19","article-title":"Learning from suboptimal demonstration via self-supervised reward regression","author":"chen","year":"2020"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-96145-3_38"},{"key":"ref4","first-page":"663","article-title":"Algorithms for inverse reinforcement learning","author":"ng","year":"0","journal-title":"Proc Int Conf Mach Learn"},{"key":"ref27","article-title":"Automata guided reinforcement learning with demonstrations","author":"li","year":"2018"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2018\/687"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1145\/1015330.1015430"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.15607\/RSS.2020.XVI.004"},{"key":"ref5","first-page":"1433","article-title":"Maximum entropy inverse reinforcement learning","author":"ziebart","year":"0","journal-title":"Proc AAAI Conf Artif Intell"},{"key":"ref8","article-title":"Concrete problems in AI safety","author":"amodei","year":"2016"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1146\/annurev-control-100819-063206"},{"key":"ref2","first-page":"1040","article-title":"Learning from demonstration","author":"s","year":"0","journal-title":"Proc Int Conf Neural Inf Process"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1145\/3054912"},{"key":"ref1","first-page":"12","article-title":"Robot learning from demonstration","author":"atkeson","year":"0","journal-title":"Proc Int Conf Mach Learn"},{"key":"ref20","first-page":"429","article-title":"Learning from demonstration for shaping through inverse reinforcement learning","author":"suay","year":"0","journal-title":"AAMAS '03 Proc Intl Conf on Autonomous Agents and Multiagent Systems"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/CDC.2016.7799279"},{"key":"ref21","article-title":"Adam: A method for stochastic optimization","author":"kingma","year":"0","journal-title":"Proc Int Conf Learn Representations"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/IROS40897.2019.8968254"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/IROS.2017.8206234"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.23919\/ACC.2018.8431181"},{"key":"ref25","article-title":"Temporal-logic-based reward shaping for continuing learning tasks","author":"jiang","year":"2020"}],"container-title":["IEEE Robotics and Automation Letters"],"original-title":[],"link":[{"URL":"https:\/\/ieeexplore.ieee.org\/ielam\/7083369\/9475905\/9465661-aam.pdf","content-type":"application\/pdf","content-version":"am","intended-application":"syndication"},{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/7083369\/9475905\/09465661.pdf?arnumber=9465661","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,5,10]],"date-time":"2022-05-10T14:54:09Z","timestamp":1652194449000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9465661\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2021,10]]},"references-count":31,"journal-issue":{"issue":"4"},"URL":"https:\/\/doi.org\/10.1109\/lra.2021.3092676","relation":{},"ISSN":["2377-3766","2377-3774"],"issn-type":[{"value":"2377-3766","type":"electronic"},{"value":"2377-3774","type":"electronic"}],"subject":[],"published":{"date-parts":[[2021,10]]}}}