{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,4,26]],"date-time":"2025-04-26T05:07:16Z","timestamp":1745644036579,"version":"3.28.0"},"reference-count":35,"publisher":"IEEE","license":[{"start":{"date-parts":[[2023,12,13]],"date-time":"2023-12-13T00:00:00Z","timestamp":1702425600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2023,12,13]],"date-time":"2023-12-13T00:00:00Z","timestamp":1702425600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/100010661","name":"European Union's H2020 Framework Programme","doi-asserted-by":"publisher","award":["H2020\/2014-2020"],"award-info":[{"award-number":["H2020\/2014-2020"]}],"id":[{"id":"10.13039\/100010661","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/100000001","name":"National Science Foundation","doi-asserted-by":"publisher","award":["SHF-2048094,CNS-1932620,CNS-2039087,FMitF-1837131,CCF-SHF-1932620"],"award-info":[{"award-number":["SHF-2048094,CNS-1932620,CNS-2039087,FMitF-1837131,CCF-SHF-1932620"]}],"id":[{"id":"10.13039\/100000001","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2023,12,13]]},"DOI":"10.1109\/cdc49753.2023.10383559","type":"proceedings-article","created":{"date-parts":[[2024,1,19]],"date-time":"2024-01-19T18:38:36Z","timestamp":1705689516000},"page":"6834-6840","source":"Crossref","is-referenced-by-count":3,"title":["Model-Free Reinforcement Learning for Spatiotemporal Tasks Using Symbolic Automata"],"prefix":"10.1109","author":[{"given":"Anand","family":"Balakrishnan","sequence":"first","affiliation":[{"name":"University of Southern California,Los Angeles,California,USA"}]},{"given":"Stefan","family":"Jak\u0161i\u0107","sequence":"additional","affiliation":[{"name":"AIT Austrian Institute of Technology GmbH,Vienna,Austria"}]},{"given":"Edgar A.","family":"Aguilar","sequence":"additional","affiliation":[{"name":"AIT Austrian Institute of Technology GmbH,Vienna,Austria"}]},{"given":"Dejan","family":"Ni\u010dkovi\u0107","sequence":"additional","affiliation":[{"name":"AIT Austrian Institute of Technology GmbH,Vienna,Austria"}]},{"given":"Jyotirmoy V.","family":"Deshmukh","sequence":"additional","affiliation":[{"name":"University of Southern California,Los Angeles,California,USA"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/TSMC.1983.6313077"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/CDC.1995.478953"},{"key":"ref3","article-title":"Reinforcement learning: An introduction","volume-title":"Adaptive Computation and Machine Learning Series","author":"Sutton","year":"2018"},{"journal-title":"Concrete Problems in AI Safety","year":"2016","author":"Amodei","key":"ref4"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/TRO.2009.2030225"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/robot.2010.5509686"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.15607\/rss.2014.x.039"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/CDC.2014.7039527"},{"journal-title":"Logically-Constrained Reinforcement Learning","year":"2018","author":"Hasanbeig","key":"ref9"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-17462-0_27"},{"journal-title":"Automata-Guided Hierarchical Reinforcement Learning for Skill Composition","year":"2018","author":"Li","key":"ref11"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/ICCPS48487.2020.00017"},{"key":"ref13","first-page":"2107","article-title":"Using Reward Machines for High-Level Task Specification and Decomposition in Reinforcement Learning","volume-title":"Proceedings of the 35th International Conference on Machine Learning","author":"Icarte","year":"2018"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2019\/840"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1016\/j.tcs.2009.06.021"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-642-15297-9_9"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/CDC.2016.7799279"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/IROS.2017.8206234"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/IROS40897.2019.8968254"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-63387-9_3"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/TCAD.2018.2858460"},{"key":"ref22","article-title":"Neuro-Dynamic Programming, ser","volume-title":"Optimization and Neural Computation Series","author":"Bertsekas","year":"1996"},{"key":"ref23","first-page":"278","article-title":"Policy invariance under reward transformations: Theory and application to reward shaping","volume-title":"ICML, ser. ICML 99","volume":"99","author":"Ng","year":"1999"},{"key":"ref24","first-page":"565","article-title":"Reward Shaping in Episodic Reinforcement Learning","volume-title":"Proceedings of the 16th Conference on Autonomous Agents and MultiAgent Systems, ser. AAMAS 17","author":"Grze\u015b","year":"2017"},{"issue":"3","key":"ref25","doi-asserted-by":"crossref","first-page":"279","DOI":"10.1007\/BF00992698","article-title":"Q-Learning","volume":"8","author":"Watkins","year":"1992","journal-title":"Machine Learning"},{"key":"ref26","article-title":"Compositional Reinforcement Learning from Logical Specifications","volume":"34","author":"Jothimurugan","year":"2021","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2022\/507"},{"key":"ref28","article-title":"A Composable Specification Language for Reinforcement Learning Tasks","volume":"32","author":"Jothimurugan","year":"2019","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1613\/jair.1.12440"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.23919\/ACC.2018.8431181"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.15607\/rss.2020.xvi.088"},{"key":"ref32","article-title":"A Lyapunov-based Approach to Safe Reinforcement Learning","volume-title":"Advances in Neural Information Processing Systems","volume":"31","author":"Chow","year":"2018"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1109\/IROS40897.2019.8967820"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1109\/LCSYS.2021.3049917"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1126\/scirobotics.aay6276"}],"event":{"name":"2023 62nd IEEE Conference on Decision and Control (CDC)","start":{"date-parts":[[2023,12,13]]},"location":"Singapore, Singapore","end":{"date-parts":[[2023,12,15]]}},"container-title":["2023 62nd IEEE Conference on Decision and Control (CDC)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/10383192\/10383193\/10383559.pdf?arnumber=10383559","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,1,23]],"date-time":"2024-01-23T16:35:03Z","timestamp":1706027703000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10383559\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,12,13]]},"references-count":35,"URL":"https:\/\/doi.org\/10.1109\/cdc49753.2023.10383559","relation":{},"subject":[],"published":{"date-parts":[[2023,12,13]]}}}