{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2024,10,22]],"date-time":"2024-10-22T21:01:28Z","timestamp":1729630888972,"version":"3.28.0"},"reference-count":17,"publisher":"IEEE","license":[{"start":{"date-parts":[[2020,7,1]],"date-time":"2020-07-01T00:00:00Z","timestamp":1593561600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/Crown.html"},{"start":{"date-parts":[[2020,7,1]],"date-time":"2020-07-01T00:00:00Z","timestamp":1593561600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2020,7,1]],"date-time":"2020-07-01T00:00:00Z","timestamp":1593561600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2020,7]]},"DOI":"10.1109\/ijcnn48605.2020.9207473","type":"proceedings-article","created":{"date-parts":[[2020,9,30]],"date-time":"2020-09-30T00:40:33Z","timestamp":1601426433000},"page":"1-7","source":"Crossref","is-referenced-by-count":0,"title":["Automatic Policy Decomposition through Abstract State Space Dynamic Specialization"],"prefix":"10.1109","author":[{"given":"Rene","family":"Sturgeon","sequence":"first","affiliation":[]},{"given":"Francois","family":"Rivest","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref10","first-page":"3675","author":"kulkarni","year":"2016","journal-title":"Hierarchical Deep Reinforcement Learning Integrating Temporal Abstraction and Intrinsic Motivation"},{"key":"ref11","first-page":"5392","article-title":"Hybrid reward architecture for reinforcement learning","author":"van seijen","year":"2017","journal-title":"Advances in NIPS"},{"key":"ref12","first-page":"1588","author":"mankowitz","year":"2016","journal-title":"Adaptive Skills Adaptive Partitions (ASAP)"},{"key":"ref13","first-page":"1995","article-title":"Dueling network architectures for deep reinforcement learning","author":"wang","year":"2016","journal-title":"Proceedings of the 33rd International Conference on Machine Learning"},{"key":"ref14","first-page":"1928","article-title":"Asynchronous methods for deep reinforcement learning","author":"mnih","year":"2016","journal-title":"Proceedings of the 33rd International Conference on Machine Learning"},{"key":"ref15","article-title":"Understanding the difficulty of training deep feedforward neural networks","author":"glorot","year":"2010","journal-title":"Proceedings of the International Conference on Artificial Intelligence and Statistics (AISTATS&#x2019;10) Society for Artificial Intelligence and Statistics"},{"key":"ref16","article-title":"Adam: A method for stochastic optimization","author":"kingma","year":"2014","journal-title":"International Conference on Learning Representations"},{"key":"ref17","article-title":"Automatic option discovery within non-stationary environments","author":"sturgeon","year":"2018","journal-title":"PhD"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1016\/S0004-3702(99)00052-1"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.2352\/ISSN.2470-1173.2017.19.AVM-023"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/LRA.2018.2800101"},{"key":"ref5","doi-asserted-by":"crossref","DOI":"10.1609\/aaai.v31i1.10916","article-title":"The option-critic architecture","author":"bacon","year":"2017","journal-title":"Thirty-First AAAI Conference on Artificial Intelligence"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1613\/jair.3912"},{"key":"ref7","doi-asserted-by":"crossref","first-page":"3582","DOI":"10.1609\/aaai.v33i01.33013582","article-title":"Combined reinforcement learning via abstract representations","volume":"33","author":"fran\u00e7ois-lavet","year":"2019","journal-title":"Proceedings of the AAAI Conference on Artificial Intelligence"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1080\/01691864.2017.1365009"},{"key":"ref1","doi-asserted-by":"crossref","first-page":"529","DOI":"10.1038\/nature14236","article-title":"Human-level control through deep reinforcement learning","volume":"518","author":"mnih","year":"2015","journal-title":"Nature"},{"key":"ref9","doi-asserted-by":"crossref","DOI":"10.1609\/aaai.v32i1.11831","article-title":"When waiting is not an option: Learning options with a deliberation cost","author":"harb","year":"2018","journal-title":"Thirty-Second AAAI Conference on Artificial Intelligence"}],"event":{"name":"2020 International Joint Conference on Neural Networks (IJCNN)","start":{"date-parts":[[2020,7,19]]},"location":"Glasgow, United Kingdom","end":{"date-parts":[[2020,7,24]]}},"container-title":["2020 International Joint Conference on Neural Networks (IJCNN)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9200848\/9206590\/09207473.pdf?arnumber=9207473","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,11,21]],"date-time":"2022-11-21T06:23:20Z","timestamp":1669011800000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9207473\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2020,7]]},"references-count":17,"URL":"https:\/\/doi.org\/10.1109\/ijcnn48605.2020.9207473","relation":{},"subject":[],"published":{"date-parts":[[2020,7]]}}}