{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,10,25]],"date-time":"2025-10-25T12:32:44Z","timestamp":1761395564430,"version":"3.28.0"},"reference-count":21,"publisher":"IEEE","license":[{"start":{"date-parts":[[2019,7,1]],"date-time":"2019-07-01T00:00:00Z","timestamp":1561939200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2019,7,1]],"date-time":"2019-07-01T00:00:00Z","timestamp":1561939200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2019,7,1]],"date-time":"2019-07-01T00:00:00Z","timestamp":1561939200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2019,7]]},"DOI":"10.1109\/icarm.2019.8833960","type":"proceedings-article","created":{"date-parts":[[2019,9,12]],"date-time":"2019-09-12T20:27:41Z","timestamp":1568320061000},"page":"786-792","source":"Crossref","is-referenced-by-count":5,"title":["Online Multi-modal Imitation Learning via Lifelong Intention Encoding"],"prefix":"10.1109","author":[{"given":"Songhao","family":"Piao","sequence":"first","affiliation":[]},{"given":"Yue","family":"Huang","sequence":"additional","affiliation":[]},{"given":"Huaping","family":"Liu","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref10","article-title":"Task transfer by preference-based cost learning","author":"jing","year":"2018","journal-title":"arXiv preprint arXiv 1805 04686"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1145\/860575.860614"},{"key":"ref12","article-title":"Stochastic neural networks for hierarchical reinforcement learning","author":"florensa","year":"2017","journal-title":"arXiv preprint arXiv 1704 03012"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1177\/0278364908091153"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.15607\/RSS.2018.XIV.009"},{"key":"ref15","first-page":"1","article-title":"Surface material retrieval using weakly paired cross-modal learning","author":"liu","year":"2018","journal-title":"IEEE Transactions on Automation Science and Engineering"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/IROS.2018.8593634"},{"key":"ref17","first-page":"4507","article-title":"Lifelong inverse reinforcement learning","author":"mendez","year":"2018","journal-title":"Advances in neural information processing systems"},{"key":"ref18","first-page":"1235","article-title":"Multi-modal imitation learning from unstructured demonstrations using generative adversarial nets","author":"hausman","year":"2017","journal-title":"Advances in neural information processing systems"},{"key":"ref19","article-title":"Proximal policy optimization algorithms","author":"schulman","year":"2017","journal-title":"arXiv preprint arXiv 1707 06347"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/ROBOT.1997.614389"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1145\/1015330.1015430"},{"key":"ref6","first-page":"2","article-title":"Algorithms for inverse reinforcement learning","volume":"1","author":"ng","year":"2000","journal-title":"ICML"},{"key":"ref5","first-page":"897","article-title":"Apprenticeship learning about multiple intentions","author":"babes","year":"2011","journal-title":"Proceedings of the 28th International Conference on Machine Learning (ICML-11)"},{"key":"ref8","first-page":"19","article-title":"Nonlinear inverse reinforcement learning with gaussian processes","author":"levine","year":"2011","journal-title":"Advances in neural information processing systems"},{"key":"ref7","first-page":"49","article-title":"Guided cost learning: Deep inverse optimal control via policy optimization","author":"finn","year":"2016","journal-title":"International Conference on Machine Learning"},{"key":"ref2","first-page":"1433","article-title":"Maximum entropy inverse reinforcement learning","volume":"8","author":"ziebart","year":"2008","journal-title":"AAAI"},{"key":"ref1","first-page":"4565","article-title":"Generative adversarial imitation learning","author":"ho","year":"2016","journal-title":"Advances in neural information processing systems"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1007\/BF00992696"},{"key":"ref20","first-page":"i","article-title":"Ella: an efficient lifelong learning algorithm","author":"ruvolo","year":"2013","journal-title":"International Conference on International Conference on Machine Learning"},{"key":"ref21","first-page":"1206","article-title":"Online multi-task learning for policy gradient methods","author":"ammar","year":"2014","journal-title":"International Conference on Machine Learning"}],"event":{"name":"2019 IEEE 4th International Conference on Advanced Robotics and Mechatronics (ICARM)","start":{"date-parts":[[2019,7,3]]},"location":"Toyonaka, Japan","end":{"date-parts":[[2019,7,5]]}},"container-title":["2019 IEEE 4th International Conference on Advanced Robotics and Mechatronics (ICARM)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/8826169\/8833630\/08833960.pdf?arnumber=8833960","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,7,19]],"date-time":"2022-07-19T16:18:08Z","timestamp":1658247488000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/8833960\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2019,7]]},"references-count":21,"URL":"https:\/\/doi.org\/10.1109\/icarm.2019.8833960","relation":{},"subject":[],"published":{"date-parts":[[2019,7]]}}}