{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2024,10,29]],"date-time":"2024-10-29T12:24:38Z","timestamp":1730204678876,"version":"3.28.0"},"reference-count":31,"publisher":"IEEE","license":[{"start":{"date-parts":[[2020,12,14]],"date-time":"2020-12-14T00:00:00Z","timestamp":1607904000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2020,12,14]],"date-time":"2020-12-14T00:00:00Z","timestamp":1607904000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2020,12,14]],"date-time":"2020-12-14T00:00:00Z","timestamp":1607904000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2020,12,14]]},"DOI":"10.1109\/cdc42340.2020.9303931","type":"proceedings-article","created":{"date-parts":[[2021,1,13]],"date-time":"2021-01-13T07:27:32Z","timestamp":1610522852000},"page":"597-602","source":"Crossref","is-referenced-by-count":2,"title":["Learning Dynamic-Objective Policies from a Class of Optimal Trajectories"],"prefix":"10.1109","author":[{"given":"Christopher Iliffe","family":"Sprague","sequence":"first","affiliation":[]},{"given":"Dario","family":"Izzo","sequence":"additional","affiliation":[]},{"given":"Petter","family":"Ogren","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"article-title":"Adam: A method for stochastic optimization","year":"2014","author":"kingma","key":"ref31"},{"key":"ref30","doi-asserted-by":"crossref","first-page":"19","DOI":"10.1016\/0771-050X(80)90013-3","article-title":"A family of embedded runge-kutta formulae","volume":"6","author":"dormand","year":"1980","journal-title":"Journal of Computational and Applied Mathematics"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-41508-6_6"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/TIE.2017.2779442"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/TCST.2009.2017934"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/JAS.2016.7510253"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/IROS.2018.8594266"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/LRA.2018.2800124"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1016\/j.automatica.2014.10.128"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1016\/j.automatica.2013.11.019"},{"journal-title":"REINFORCEMENT LEARNING AND OPTIMAL CONTROL","year":"2019","author":"bertsekas","key":"ref18"},{"article-title":"Reinforcement learning: An introduction","year":"2011","author":"sutton","key":"ref19"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1007\/s10957-008-9387-1"},{"article-title":"Proximal policy optimization algorithms","year":"2017","author":"schulman","key":"ref4"},{"journal-title":"The Mathematical Theory of Optimal Processes","year":"1962","author":"pontryagin","key":"ref27"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1016\/j.jprocont.2006.06.001"},{"key":"ref6","first-page":"385","article-title":"Multiobjective reinforcement learning: A comprehensive overview","volume":"45","author":"liu","year":"2014","journal-title":"IEEE Transactions on Systems Man and Cybernetics Systems"},{"article-title":"Adding neural network controllers to behavior trees without destroying performance guarantees","year":"2018","author":"sprague","key":"ref29"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.2514\/1.G002357"},{"article-title":"Dynamic weights in multi-objective deep reinforcement learning","year":"2018","author":"abels","key":"ref8"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1145\/1102351.1102427"},{"article-title":"Continuous control with deep reinforcement learning","year":"2015","author":"lillicrap","key":"ref2"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/TWC.2017.2688328"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1137\/1.9780898718577"},{"key":"ref20","first-page":"1329","article-title":"Benchmarking deep reinforcement learning for continuous control","author":"duan","year":"2016","journal-title":"International Conference on Machine Learning"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1115\/DSCC2018-9249"},{"key":"ref21","first-page":"6765","article-title":"Inverse reward design","author":"hadfield-menell","year":"2017","journal-title":"Advances in neural information processing systems"},{"article-title":"Machine learning and evolutionary techniques in interplanetary trajectory design","year":"2018","author":"izzo","key":"ref24"},{"article-title":"Fast policy learning through imitation and reinforcement","year":"2018","author":"cheng","key":"ref23"},{"article-title":"Adding neural network controllers to behavior trees without destroying performance guarantees","year":"2018","author":"sprague","key":"ref26"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1007\/s42064-019-0054-0"}],"event":{"name":"2020 59th IEEE Conference on Decision and Control (CDC)","start":{"date-parts":[[2020,12,14]]},"location":"Jeju, Korea (South)","end":{"date-parts":[[2020,12,18]]}},"container-title":["2020 59th IEEE Conference on Decision and Control (CDC)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9303728\/9303729\/09303931.pdf?arnumber=9303931","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,6,27]],"date-time":"2022-06-27T15:59:11Z","timestamp":1656345551000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9303931\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2020,12,14]]},"references-count":31,"URL":"https:\/\/doi.org\/10.1109\/cdc42340.2020.9303931","relation":{},"subject":[],"published":{"date-parts":[[2020,12,14]]}}}