{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,10,15]],"date-time":"2025-10-15T10:38:22Z","timestamp":1760524702367,"version":"3.32.0"},"reference-count":45,"publisher":"IEEE","license":[{"start":{"date-parts":[[2024,10,14]],"date-time":"2024-10-14T00:00:00Z","timestamp":1728864000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2024,10,14]],"date-time":"2024-10-14T00:00:00Z","timestamp":1728864000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/100000001","name":"National Science Foundation","doi-asserted-by":"publisher","id":[{"id":"10.13039\/100000001","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2024,10,14]]},"DOI":"10.1109\/iros58592.2024.10801924","type":"proceedings-article","created":{"date-parts":[[2024,12,25]],"date-time":"2024-12-25T19:17:39Z","timestamp":1735154259000},"page":"11147-11154","source":"Crossref","is-referenced-by-count":2,"title":["Signal Temporal Logic-Guided Apprenticeship Learning"],"prefix":"10.1109","author":[{"given":"Aniruddh G.","family":"Puranic","sequence":"first","affiliation":[{"name":"University of Southern California,Department of Computer Science,USA"}]},{"given":"Jyotirmoy V.","family":"Deshmukh","sequence":"additional","affiliation":[{"name":"University of Southern California,Department of Computer Science,USA"}]},{"given":"Stefanos","family":"Nikolaidis","sequence":"additional","affiliation":[{"name":"University of Southern California,Department of Computer Science,USA"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2018.8463162"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2018\/687"},{"key":"ref3","first-page":"663","article-title":"Algorithms for inverse reinforcement learning","volume-title":"ICML","author":"Ng"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1145\/1015330.1015430"},{"volume-title":"Reinforcement Learning: An Introduction","year":"2018","author":"Sutton","key":"ref5"},{"article-title":"Maximum entropy inverse reinforcement learning","volume-title":"AAAI","author":"Ziebart","key":"ref6"},{"key":"ref7","article-title":"Modeling purposeful adaptive behavior with the principle of maximum causal entropy","volume-title":"Ph.D. dissertation","author":"Ziebart","year":"2010"},{"key":"ref8","first-page":"457","article-title":"Score-based inverse reinforcement learning","volume-title":"AAMAS","author":"Asri"},{"article-title":"Learning from suboptimal demonstration via self-supervised reward regression","volume-title":"CoRL","author":"Chen","key":"ref9"},{"article-title":"Better-than-demonstrator imitation learning via automatically-ranked demonstrations","volume-title":"CoRL","author":"Brown","key":"ref10"},{"article-title":"On the expressivity of markov reward","volume-title":"NeurIPS","author":"Abel","key":"ref11"},{"article-title":"Rational multi-objective agents must admit non-markov reward representations","volume-title":"NeurIPS ML Safety Workshop","author":"Pitis","key":"ref12"},{"article-title":"Expressing non-markov reward to a markov agent","volume-title":"RLDM","author":"Abe","key":"ref13"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2019\/840"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1613\/jair.1.12440"},{"article-title":"Learning from demonstrations using signal temporal logic","volume-title":"CoRL","author":"Puranic","key":"ref16"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/LRA.2021.3092676"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/LRA.2022.3226072"},{"article-title":"Learning robust rewards with adversarial inverse reinforcement learning","volume-title":"ICLR","author":"Fu","key":"ref19"},{"article-title":"Generative adversarial imitation from observation","year":"2019","author":"Torabi","key":"ref20"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-96145-3_38"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2018.8460811"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.48550\/ARXIV.1809.06305"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/CDC42340.2020.9304190"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2017\/426"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-540-30206-3_12"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-642-15297-9_9"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1016\/j.tcs.2009.06.021"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/CDC40024.2019.9029429"},{"article-title":"Reproducibility of benchmarked deep reinforcement learning tasks for continuous control","volume-title":"Reproducibility in Machine Learning Workshop (ICML)","author":"Islam","key":"ref30"},{"key":"ref31","first-page":"278","article-title":"Policy invariance under reward transformations: Theory and application to reward shaping","volume-title":"ICML","author":"Ng"},{"article-title":"Signal temporal logic-guided apprenticeship learning - supplemental document","year":"2024","author":"Puranic","key":"ref32"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-59152-6_34"},{"article-title":"panda-gym: Open-Source Goal-Conditioned Environments for Robotic Learning","volume-title":"NeurIPS Workshop","author":"Gallou\u00e9dec","key":"ref34"},{"article-title":"Controlling overestimation bias with truncated mixture of continuous distributional quantile critics","volume-title":"ICML","author":"Kuznetsov","key":"ref35"},{"key":"ref36","article-title":"Hindsight experience replay","volume-title":"NeurIPS","volume":"30","author":"Andrychowicz"},{"article-title":"Monte carlo augmented actor-critic for sparse reward deep reinforcement learning from suboptimal demonstrations","volume-title":"NeurIPS","author":"Wilcox","key":"ref37"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA46639.2022.9811660"},{"article-title":"Goal-conditioned imitation learning","volume-title":"NeurIPS","author":"Ding","key":"ref39"},{"article-title":"Watch and match: Supercharging imitation with regularized optimal transport","volume-title":"CoRL","author":"Haldar","key":"ref40"},{"article-title":"robosuite: A modular simulation framework and benchmark for robot learning","year":"2020","author":"Zhu","key":"ref41"},{"article-title":"Safety gymnasium: A unified safe reinforcement learning benchmark","volume-title":"NeurIPS Datasets and Benchmarks Track","author":"Ji","key":"ref42"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.48550\/ARXIV.1707.06347"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v38i11.29136"},{"article-title":"Isaac gym: High performance gpu-based physics simulation for robot learning","year":"2021","author":"Makoviychuk","key":"ref45"}],"event":{"name":"2024 IEEE\/RSJ International Conference on Intelligent Robots and Systems (IROS)","start":{"date-parts":[[2024,10,14]]},"location":"Abu Dhabi, United Arab Emirates","end":{"date-parts":[[2024,10,18]]}},"container-title":["2024 IEEE\/RSJ International Conference on Intelligent Robots and Systems (IROS)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/10801246\/10801290\/10801924.pdf?arnumber=10801924","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,12,26]],"date-time":"2024-12-26T06:58:57Z","timestamp":1735196337000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10801924\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,10,14]]},"references-count":45,"URL":"https:\/\/doi.org\/10.1109\/iros58592.2024.10801924","relation":{},"subject":[],"published":{"date-parts":[[2024,10,14]]}}}