{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,6]],"date-time":"2026-01-06T04:54:05Z","timestamp":1767675245847,"version":"3.37.3"},"reference-count":79,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"12","license":[{"start":{"date-parts":[[2022,12,1]],"date-time":"2022-12-01T00:00:00Z","timestamp":1669852800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by\/4.0\/legalcode"}],"funder":[{"DOI":"10.13039\/501100002341","name":"Academy of Finland","doi-asserted-by":"publisher","award":["299358"],"award-info":[{"award-number":["299358"]}],"id":[{"id":"10.13039\/501100002341","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100006136","name":"Technology Industries of Finland Centennial Foundation","doi-asserted-by":"crossref","id":[{"id":"10.13039\/501100006136","id-type":"DOI","asserted-by":"crossref"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Visual. Comput. Graphics"],"published-print":{"date-parts":[[2022,12,1]]},"DOI":"10.1109\/tvcg.2021.3100095","type":"journal-article","created":{"date-parts":[[2021,7,27]],"date-time":"2021-07-27T20:26:18Z","timestamp":1627417578000},"page":"4700-4712","source":"Crossref","is-referenced-by-count":2,"title":["Learning Task-Agnostic Action Spaces for Movement Optimization"],"prefix":"10.1109","volume":"28","author":[{"ORCID":"https:\/\/orcid.org\/0000-0003-4930-9917","authenticated-orcid":false,"given":"Amin","family":"Babadi","sequence":"first","affiliation":[{"name":"Department of Computer Science, Aalto University, Espoo, Finland"}]},{"given":"Michiel","family":"van de Panne","sequence":"additional","affiliation":[{"name":"Department of Computer Science, University of British Columbia, Vancouver, BC, Canada"}]},{"given":"C. Karen","family":"Liu","sequence":"additional","affiliation":[{"name":"Department of Computer Science, Stanford University, Stanford, CA, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-7764-3459","authenticated-orcid":false,"given":"Perttu","family":"Hamalainen","sequence":"additional","affiliation":[{"name":"Department of Computer Science, Aalto University, Espoo, Finland"}]}],"member":"263","reference":[{"article-title":"PPO-CMA: Proximal policy optimization with covariance matrix adaptation","year":"2018","author":"h\u00e4m\u00e4l\u00e4inen","key":"ref73"},{"key":"ref72","doi-asserted-by":"publisher","DOI":"10.1109\/ADPRL.2007.368199"},{"article-title":"Learning predictive models from observation and interaction","year":"2019","author":"schmeckpeper","key":"ref71"},{"article-title":"Learning to reach goals via iterated supervised learning","year":"2019","author":"ghosh","key":"ref70"},{"article-title":"Stable baselines","year":"2018","author":"hill","key":"ref76"},{"key":"ref77","first-page":"6389","article-title":"Visualizing the loss landscape of neural nets","author":"li","year":"2018","journal-title":"Proc Int Conf Neural Inf Process"},{"key":"ref74","article-title":"TensorFlow: Large-scale machine learning on heterogeneous systems","author":"abadi","year":"2015","journal-title":"software available from tensorflow org"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.15607\/RSS.2014.X.052"},{"article-title":"Searching for activation functions","year":"2017","author":"ramachandran","key":"ref75"},{"key":"ref38","first-page":"1","article-title":"Guided policy search","author":"levine","year":"2013","journal-title":"Proc Int Conf Mach Learn"},{"article-title":"Deep residual mixture models","year":"2020","author":"h\u00e4m\u00e4l\u00e4inen","key":"ref78"},{"article-title":"Conservative Q-learning for offline reinforcement learning","year":"2020","author":"kumar","key":"ref79"},{"article-title":"Hierarchical visuomotor control of humanoids","year":"2018","author":"merel","key":"ref33"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1145\/3386569.3392433"},{"article-title":"Learning human behaviors from motion capture by adversarial imitation","year":"2017","author":"merel","key":"ref31"},{"key":"ref30","first-page":"3303","article-title":"Data-efficient hierarchical reinforcement learning","author":"nachum","year":"2018","journal-title":"Proc Int Conf Neural Inf Process"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1145\/3355089.3356501"},{"key":"ref36","first-page":"4105","article-title":"CoMic: Complementary task learning & mimicry for reusable skills","author":"hasenclever","year":"2020","journal-title":"Proc Int Conf Mach Learn"},{"article-title":"Neural probabilistic motor primitives for humanoid control","year":"2018","author":"merel","key":"ref35"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1145\/3386569.3392474"},{"article-title":"OpenAI baselines","year":"2017","author":"dhariwal","key":"ref60"},{"article-title":"Unity: A general platform for intelligent agents","year":"2018","author":"juliani","key":"ref62"},{"article-title":"Tensorflow agents: Efficient batched reinforcement learning in tensorflow","year":"2017","author":"hafner","key":"ref61"},{"article-title":"Tonic: A deep reinforcement learning library for fast prototyping and benchmarking","year":"2020","author":"pardo","key":"ref63"},{"article-title":"Near-optimal representation learning for hierarchical reinforcement learning","year":"2018","author":"nachum","key":"ref28"},{"article-title":"High-dimensional continuous control using generalized advantage estimation","year":"2015","author":"schulman","key":"ref64"},{"key":"ref27","first-page":"3681","article-title":"MCP: Learning composable hierarchical control with multiplicative compositional policies","author":"peng","year":"2019","journal-title":"Proc Int Conf Neural Inf Process"},{"key":"ref65","doi-asserted-by":"publisher","DOI":"10.1109\/IROS.2012.6386109"},{"article-title":"OpenAI gym","year":"2016","author":"brockman","key":"ref66"},{"key":"ref29","first-page":"5048","article-title":"Hindsight experience replay","author":"andrychowicz","year":"2017","journal-title":"Proc Int Conf Neural Inf Process"},{"article-title":"Emergence of locomotion behaviours in rich environments","year":"2017","author":"heess","key":"ref67"},{"article-title":"Unsupervised meta-learning for reinforcement learning","year":"2018","author":"gupta","key":"ref68"},{"key":"ref69","first-page":"4754","article-title":"Deep reinforcement learning in a handful of trials using probabilistic dynamics models","author":"chua","year":"2018","journal-title":"Proc Int Conf Neural Inf Process"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1145\/3072959.3073707"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/IROS.2012.6386025"},{"key":"ref20","first-page":"271","article-title":"Feudal reinforcement learning","author":"dayan","year":"1993","journal-title":"Proc Int Conf Neural Inf Process"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1613\/jair.639"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1016\/S0004-3702(99)00052-1"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1145\/2897824.2925881"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1145\/2766910"},{"key":"ref26","doi-asserted-by":"crossref","DOI":"10.1038\/nature14236","article-title":"Human-level control through deep reinforcement learning","volume":"518","author":"mnih","year":"2015","journal-title":"Nature"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1145\/3072959.2990496"},{"article-title":"Dream to control: Learning behaviors by latent imagination","year":"2019","author":"hafner","key":"ref50"},{"article-title":"Planning to explore via self-supervised world models","year":"2020","author":"sekar","key":"ref51"},{"article-title":"Learning agile robotic locomotion skills by imitating animals","year":"2020","author":"peng","key":"ref59"},{"article-title":"Continuous control with deep reinforcement learning","year":"2015","author":"lillicrap","key":"ref58"},{"key":"ref57","first-page":"1587","article-title":"Addressing function approximation error in actor-critic methods","author":"fujimoto","year":"2018","journal-title":"Proc Int Conf Mach Learn"},{"article-title":"Soft actor-critic: Off-policy maximum entropy deep reinforcement learning with a stochastic actor","year":"2018","author":"haarnoja","key":"ref56"},{"article-title":"Proximal policy optimization algorithms","year":"2017","author":"schulman","key":"ref55"},{"journal-title":"Reinforcement Learning An Introduction","year":"2018","author":"sutton","key":"ref54"},{"key":"ref53","doi-asserted-by":"publisher","DOI":"10.1109\/TVCG.2018.2849386"},{"key":"ref52","doi-asserted-by":"publisher","DOI":"10.1145\/2366145.2366173"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1145\/3306346.3322972"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/TVCG.2020.3018187"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1145\/3099564.3099579"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1145\/1276377.1276509"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1145\/1360612.1360680"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1145\/2508363.2508399"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1007\/3-540-32494-1_4"},{"key":"ref16","doi-asserted-by":"crossref","DOI":"10.1145\/2185520.2185539","article-title":"Discovery of complex behaviors through contact-invariant optimization","volume":"31","author":"mordatch","year":"2012","journal-title":"ACM Trans Graph"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1111\/cgf.13513"},{"key":"ref18","doi-asserted-by":"crossref","DOI":"10.1145\/2601097.2601218","article-title":"Online motion synthesis using sequential Monte Carlo","volume":"33","author":"h\u00e4m\u00e4l\u00e4inen","year":"2014","journal-title":"ACM Trans Graph"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/CIG.2018.8490407"},{"key":"ref4","doi-asserted-by":"crossref","first-page":"143:1","DOI":"10.1145\/3197517.3201311","article-title":"DeepMimic: Example-guided deep reinforcement learning of physics-based character skills","volume":"37","author":"peng","year":"2018","journal-title":"ACM Trans Graph"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1145\/3355089.3356536"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1145\/3099564.3099567"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1145\/3386569.3392381"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1145\/3072959.3073602"},{"key":"ref7","article-title":"Deep neuroethology of a virtual rodent","author":"merel","year":"2020","journal-title":"Proc Int Conf Learn Representations"},{"key":"ref49","first-page":"1471","article-title":"Unifying count-based exploration and intrinsic motivation","author":"bellemare","year":"2016","journal-title":"Proc Int Conf Neural Inf Process"},{"key":"ref9","article-title":"Learning multi-level hierarchies with hindsight","author":"levy","year":"2019","journal-title":"Proc Int Conf Learn Representations"},{"key":"ref46","article-title":"Regularizing model-based planning with energy-based models","author":"boney","year":"2019","journal-title":"Proc Conf Robot Learn"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2016.7487172"},{"key":"ref48","doi-asserted-by":"publisher","DOI":"10.1016\/j.artint.2015.05.002"},{"key":"ref47","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-642-40935-6_12"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1145\/3359566.3360072"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1145\/3130800.3130833"},{"key":"ref44","article-title":"Model based reinforcement learning for Atari","author":"kaiser","year":"2020","journal-title":"Proc Int Conf Learn Representations"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.1145\/504729.504754"}],"container-title":["IEEE Transactions on Visualization and Computer Graphics"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/2945\/9930678\/09497687.pdf?arnumber=9497687","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,12,6]],"date-time":"2022-12-06T23:43:51Z","timestamp":1670370231000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9497687\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022,12,1]]},"references-count":79,"journal-issue":{"issue":"12"},"URL":"https:\/\/doi.org\/10.1109\/tvcg.2021.3100095","relation":{},"ISSN":["1077-2626","1941-0506","2160-9306"],"issn-type":[{"type":"print","value":"1077-2626"},{"type":"electronic","value":"1941-0506"},{"type":"electronic","value":"2160-9306"}],"subject":[],"published":{"date-parts":[[2022,12,1]]}}}