{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,9,2]],"date-time":"2025-09-02T00:03:53Z","timestamp":1756771433274,"version":"3.44.0"},"reference-count":24,"publisher":"IEEE","license":[{"start":{"date-parts":[[2019,12,1]],"date-time":"2019-12-01T00:00:00Z","timestamp":1575158400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2019,12,1]],"date-time":"2019-12-01T00:00:00Z","timestamp":1575158400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2019,12]]},"DOI":"10.1109\/robio49542.2019.8961391","type":"proceedings-article","created":{"date-parts":[[2020,1,21]],"date-time":"2020-01-21T14:49:51Z","timestamp":1579618191000},"page":"148-153","source":"Crossref","is-referenced-by-count":5,"title":["Efficient Robotic Task Generalization Using Deep Model Fusion Reinforcement Learning"],"prefix":"10.1109","author":[{"given":"Tianying","family":"Wang","sequence":"first","affiliation":[{"name":"A*STAR Artificial Intelligence Initiative (A*AI),Singapore,138632"}]},{"given":"Hao","family":"Zhang","sequence":"additional","affiliation":[{"name":"A*STAR Artificial Intelligence Initiative (A*AI),Singapore,138632"}]},{"given":"Wei Qi","family":"Toh","sequence":"additional","affiliation":[{"name":"A*STAR Artificial Intelligence Initiative (A*AI),Singapore,138632"}]},{"given":"Hongyuan","family":"Zhu","sequence":"additional","affiliation":[{"name":"A*STAR Artificial Intelligence Initiative (A*AI),Singapore,138632"}]},{"given":"Cheston","family":"Tan","sequence":"additional","affiliation":[{"name":"A*STAR Artificial Intelligence Initiative (A*AI),Singapore,138632"}]},{"given":"Yan","family":"Wu","sequence":"additional","affiliation":[{"name":"A*STAR Artificial Intelligence Initiative (A*AI),Singapore,138632"}]},{"given":"Yong","family":"Liu","sequence":"additional","affiliation":[{"name":"A*STAR Artificial Intelligence Initiative (A*AI),Singapore,138632"}]},{"given":"Wei","family":"Jing","sequence":"additional","affiliation":[{"name":"A*STAR Artificial Intelligence Initiative (A*AI),Singapore,138632"}]}],"member":"263","reference":[{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/ROBIO.2018.8664803"},{"key":"ref11","first-page":"1057","article-title":"Policy gradient methods for reinforcement learning with function approximation","author":"sutton","year":"2000","journal-title":"Advances in neural information processing systems"},{"key":"ref12","volume":"2","author":"sutton","year":"1998","journal-title":"Introduction to Reinforcement Learning"},{"key":"ref13","first-page":"-387i","article-title":"Deterministic policy gradient algorithms","author":"silver","year":"0"},{"article-title":"Continuous control with deep reinforcement learning","year":"2015","author":"lillicrap","key":"ref14"},{"article-title":"Proximal policy optimization algorithms","year":"2017","author":"schulman","key":"ref15"},{"key":"ref16","article-title":"Prioritized experience replay","author":"schaul","year":"2016","journal-title":"International Conference on Learning Representations (ICLR)"},{"key":"ref17","first-page":"5048","article-title":"Hindsight experience replay","author":"andrychowicz","year":"2017","journal-title":"Advances in neural information processing systems"},{"key":"ref18","first-page":"1928","article-title":"Asynchronous methods for deep reinforcement learning","author":"mnih","year":"2016","journal-title":"International Conference on Machine Learning"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/ROBOT.2010.5509181"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/IROS.2018.8593654"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2019.8793659"},{"key":"ref6","doi-asserted-by":"crossref","first-page":"529","DOI":"10.1038\/nature14236","article-title":"Human-level control through deep reinforcement learning","volume":"518","author":"mnih","year":"2015","journal-title":"Nature"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2019.8793485"},{"key":"ref8","doi-asserted-by":"crossref","DOI":"10.1609\/aaai.v30i1.10295","article-title":"Deep reinforcement learning with double q-learning","author":"van hasselt","year":"2016","journal-title":"THIRTIETH AAAI Conference on Artificial Intelligence"},{"key":"ref7","doi-asserted-by":"crossref","first-page":"484","DOI":"10.1038\/nature16961","article-title":"Mastering the game of go with deep neural networks and tree search","volume":"529","author":"silver","year":"2016","journal-title":"Nature"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1007\/s10846-017-0468-y"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1177\/0278364913495721"},{"article-title":"A survey on policy search algorithms for learning robot controllers in a handful of trials","year":"2018","author":"chatzilygeroudis","key":"ref9"},{"article-title":"Generalizing skills with semi-supervised reinforcement learning","year":"2016","author":"finn","key":"ref20"},{"article-title":"Openai gym","year":"2016","author":"brockman","key":"ref22"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/ROBIO.2017.8324510"},{"key":"ref24","doi-asserted-by":"crossref","DOI":"10.1609\/aaai.v32i1.11694","article-title":"Deep reinforcement learning that matters","author":"henderson","year":"2018","journal-title":"Thirty-Second AAAI Conference on Artificial Intelligence"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/IROS.2012.6386109"}],"event":{"name":"2019 IEEE International Conference on Robotics and Biomimetics (ROBIO)","start":{"date-parts":[[2019,12,6]]},"location":"Dali, China","end":{"date-parts":[[2019,12,8]]}},"container-title":["2019 IEEE International Conference on Robotics and Biomimetics (ROBIO)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/8953068\/8961374\/08961391.pdf?arnumber=8961391","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,9,1]],"date-time":"2025-09-01T19:21:49Z","timestamp":1756754509000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/8961391\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2019,12]]},"references-count":24,"URL":"https:\/\/doi.org\/10.1109\/robio49542.2019.8961391","relation":{},"subject":[],"published":{"date-parts":[[2019,12]]}}}