{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,8,25]],"date-time":"2025-08-25T21:10:06Z","timestamp":1756156206829,"version":"3.44.0"},"reference-count":30,"publisher":"IEEE","license":[{"start":{"date-parts":[[2019,11,1]],"date-time":"2019-11-01T00:00:00Z","timestamp":1572566400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2019,11,1]],"date-time":"2019-11-01T00:00:00Z","timestamp":1572566400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2019,11]]},"DOI":"10.1109\/iros40897.2019.8968248","type":"proceedings-article","created":{"date-parts":[[2020,1,30]],"date-time":"2020-01-30T23:53:51Z","timestamp":1580428431000},"page":"3474-3480","source":"Crossref","is-referenced-by-count":2,"title":["TendencyRL: Multi-stage Discriminative Hints for Efficient Goal-Oriented Reverse Curriculum Learning"],"prefix":"10.1109","author":[{"given":"Chen","family":"Wang","sequence":"first","affiliation":[{"name":"Shanghai Jiao Tong University,Department of Computer Science"}]},{"given":"Junfeng","family":"Ding","sequence":"additional","affiliation":[{"name":"Shanghai Jiao Tong University,Department of Computer Science"}]},{"given":"Xiangyu","family":"Chen","sequence":"additional","affiliation":[{"name":"Shanghai Jiao Tong University,Department of Computer Science"}]},{"given":"Zelin","family":"Ye","sequence":"additional","affiliation":[{"name":"Shanghai Jiao Tong University,Department of Computer Science"}]},{"given":"Jialu","family":"Wang","sequence":"additional","affiliation":[{"name":"Shanghai Jiao Tong University,Department of Computer Science"}]},{"given":"Ziruo","family":"Cai","sequence":"additional","affiliation":[{"name":"Shanghai Jiao Tong University,Department of Computer Science"}]},{"given":"Cewu","family":"Lu","sequence":"additional","affiliation":[{"name":"Shanghai Jiao Tong University,Department of Computer Science"}]}],"member":"263","reference":[{"key":"ref30","first-page":"91","article-title":"Faster r-cnn: Towards real-time object detection with region proposal networks","author":"ren","year":"2015","journal-title":"Advances in neural information processing systems"},{"key":"ref10","article-title":"Incentivizing exploration in reinforcement learning with deep predictive models","author":"stadie","year":"2015","journal-title":"arXiv preprint arXiv 1507 00814"},{"key":"ref11","first-page":"1109","article-title":"Vime: Variational information maximizing exploration","author":"houthooft","year":"2016","journal-title":"Advances in neural information processing systems"},{"key":"ref12","first-page":"1471","article-title":"Unifying count-based exploration and intrinsic motivation","author":"bellemare","year":"2016","journal-title":"Advances in neural information processing systems"},{"key":"ref13","first-page":"2125","article-title":"Variational information maximisation for intrinsically motivated reinforcement learning","author":"mohamed","year":"2015","journal-title":"Advances in neural information processing systems"},{"key":"ref14","first-page":"663","article-title":"Algorithms for inverse reinforcement learning","author":"ng","year":"2000","journal-title":"ICML"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1145\/1015330.1015430"},{"key":"ref16","first-page":"1433","article-title":"Maximum entropy inverse reinforcement learning","volume":"8","author":"ziebart","year":"2008","journal-title":"AAAI"},{"key":"ref17","first-page":"3352","article-title":"Reinforcement learning from demonstration through shaping","author":"brys","year":"2015","journal-title":"IJCAI"},{"key":"ref18","first-page":"1913","article-title":"Shaping mario with human advice","author":"harutyunyan","year":"2015","journal-title":"Proceedings of the 2015 International Conference on Autonomous Agents and Multiagent Systems International Foundation for Autonomous Agents and Multiagent Systems"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1007\/s12369-012-0160-0"},{"year":"2017","key":"ref28"},{"key":"ref4","first-page":"325","article-title":"Curriculum learning for motor skills","author":"karpathy","year":"2012","journal-title":"Canadian Conference on Artificial Intelligence"},{"year":"2012","key":"ref27"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/IROS.2017.8202244"},{"key":"ref6","article-title":"Intrinsic motivation and automatic curricula via asymmetric self-play","author":"sukhbaatar","year":"2018","journal-title":"International Conference on Learning Representations"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/CVPRW.2017.70"},{"key":"ref5","article-title":"Robust adversarial reinforcement learning","author":"pinto","year":"2017","journal-title":"arXiv preprint arXiv 1703 04816"},{"key":"ref8","first-page":"482","article-title":"Reverse curriculum generation for reinforcement learning","volume":"78","author":"florensa","year":"2017","journal-title":"Proceedings of the 1st Annual Conference on Robot Learning ser Proceedings of Machine Learning Research"},{"journal-title":"Cs234 project final report Approaches to hierarchical reinforcement learning","year":"2017","author":"wulfe","key":"ref7"},{"key":"ref2","first-page":"1334","article-title":"End-to-end training of deep visuomotor policies","volume":"17","author":"levine","year":"2016","journal-title":"The Journal of Machine Learning Research"},{"key":"ref9","article-title":"Stochastic neural networks for hierarchical reinforcement learning","author":"florensa","year":"2017","journal-title":"arXiv preprint arXiv 1704 03012"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2015.7138994"},{"key":"ref20","article-title":"One-shot visual imitation learning via meta-learning","author":"finn","year":"2017","journal-title":"arXiv preprint arXiv 1709 04396"},{"key":"ref22","article-title":"Reverse curriculum generation for reinforcement learning","author":"florensa","year":"2017","journal-title":"arXiv preprint arXiv 1707 06892"},{"key":"ref21","first-page":"1087","article-title":"One-shot imitation learning","author":"duan","year":"2017","journal-title":"Advances in neural information processing systems"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2018.8463162"},{"key":"ref23","article-title":"Hierarchical reinforcement learning for robot navigation","author":"bischoff","year":"2013","journal-title":"Proceedings of the European Symposium on Artificial Neural Networks Computational Intelligence and Machine Learning (ESANN 2013)"},{"key":"ref26","article-title":"Openai gym","author":"brockman","year":"2016","journal-title":"arXiv preprint arXiv 1606 01540"},{"key":"ref25","first-page":"1928","article-title":"Asynchronous methods for deep reinforcement learning","author":"mnih","year":"2016","journal-title":"International Conference on Machine Learning"}],"event":{"name":"2019 IEEE\/RSJ International Conference on Intelligent Robots and Systems (IROS)","start":{"date-parts":[[2019,11,3]]},"location":"Macau, China","end":{"date-parts":[[2019,11,8]]}},"container-title":["2019 IEEE\/RSJ International Conference on Intelligent Robots and Systems (IROS)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/8957008\/8967518\/08968248.pdf?arnumber=8968248","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,8,25]],"date-time":"2025-08-25T20:36:22Z","timestamp":1756154182000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/8968248\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2019,11]]},"references-count":30,"URL":"https:\/\/doi.org\/10.1109\/iros40897.2019.8968248","relation":{},"subject":[],"published":{"date-parts":[[2019,11]]}}}