{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,11,3]],"date-time":"2025-11-03T13:35:54Z","timestamp":1762176954741,"version":"3.37.3"},"reference-count":101,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"2","license":[{"start":{"date-parts":[[2019,6,1]],"date-time":"2019-06-01T00:00:00Z","timestamp":1559347200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/OAPA.html"}],"funder":[{"DOI":"10.13039\/501100000780","name":"European Commission","doi-asserted-by":"publisher","award":["FP7\/2007\u20132013"],"award-info":[{"award-number":["FP7\/2007\u20132013"]}],"id":[{"id":"10.13039\/501100000780","id-type":"DOI","asserted-by":"publisher"}]},{"name":"ICT Challenge 2 \u201cCognitive Systems and Robotics\u201d through the Project \u201cIM-CLeVeR\u2014Intrinsically Motivated Cumulative Learning Versatile Robots\u201d","award":["ICT-IP-231722"],"award-info":[{"award-number":["ICT-IP-231722"]}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Cogn. Dev. Syst."],"published-print":{"date-parts":[[2019,6]]},"DOI":"10.1109\/tcds.2016.2607018","type":"journal-article","created":{"date-parts":[[2016,10,17]],"date-time":"2016-10-17T20:38:41Z","timestamp":1476736721000},"page":"292-317","source":"Crossref","is-referenced-by-count":19,"title":["A Reinforcement Learning Architecture That Transfers Knowledge Between Skills When Solving Multiple Tasks"],"prefix":"10.1109","volume":"11","author":[{"given":"Paolo","family":"Tommasino","sequence":"first","affiliation":[{"name":"Robotics Research Centre, Nanyang Technological University, Singapore"}]},{"given":"Daniele","family":"Caligiore","sequence":"additional","affiliation":[{"name":"Laboratory of Computational Embodied Neuroscience, Istituto di Scienze e Tecnologie della Cognizione, Consiglio Nazionale delle Ricerce, Rome, Italy"}]},{"given":"Marco","family":"Mirolli","sequence":"additional","affiliation":[{"name":"Laboratory of Computational Embodied Neuroscience, Istituto di Scienze e Tecnologie della Cognizione, Consiglio Nazionale delle Ricerce, Rome, Italy"}]},{"given":"Gianluca","family":"Baldassarre","sequence":"additional","affiliation":[{"name":"Laboratory of Computational Embodied Neuroscience, Istituto di Scienze e Tecnologie della Cognizione, Consiglio Nazionale delle Ricerce, Rome, Italy"}]}],"member":"263","reference":[{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1080\/09540091003682553"},{"journal-title":"Animal Behavior","year":"1993","author":"mcfarland","key":"ref38"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1177\/105971230501300205"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1016\/S0893-6080(02)00047-3"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1016\/S0959-4388(00)00153-7"},{"key":"ref30","first-page":"215","article-title":"Adaptive critics and the basal ganglia","author":"barto","year":"1995","journal-title":"Models of Information Processing in the Basal Ganglia"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1207\/s15516709cog1502_2"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1162\/neco.1991.3.1.79"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1016\/S0896-6273(02)00967-4"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1126\/science.275.5306.1593"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1162\/neco.1996.8.5.895"},{"key":"ref27","first-page":"1057","article-title":"Policy gradient methods for reinforcement learning with function approximation","author":"sutton","year":"2000","journal-title":"Proc Adv Neural Inf Process Syst"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/TSMC.1983.6313077"},{"key":"ref20","first-page":"27","article-title":"A bioinspired hierarchical reinforcement learning architecture for modeling learning of multiple skills with continuous states and actions","volume":"149","author":"caligiore","year":"2010","journal-title":"Proc 10th Int Conf Epigenetic Robot"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1177\/1059712314539710"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/DevLrn.2012.6400871"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/TNN.1998.712192"},{"key":"ref23","first-page":"1038","article-title":"Generalization in reinforcement learning: Successful examples using sparse coarse coding","author":"sutton","year":"1996","journal-title":"Proc Adv Neural Inf Process Syst"},{"key":"ref101","doi-asserted-by":"publisher","DOI":"10.1016\/j.neunet.2012.09.015"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-642-27645-3_7"},{"key":"ref100","doi-asserted-by":"publisher","DOI":"10.1016\/S0301-0082(96)00042-1"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-642-27645-3_18"},{"key":"ref50","doi-asserted-by":"publisher","DOI":"10.1016\/S0896-6273(02)01003-6"},{"key":"ref51","doi-asserted-by":"publisher","DOI":"10.1016\/j.neubiorev.2013.01.012"},{"key":"ref59","doi-asserted-by":"publisher","DOI":"10.1109\/DEVLRN.2010.5578840"},{"key":"ref58","doi-asserted-by":"publisher","DOI":"10.1007\/978-1-4471-0449-0"},{"key":"ref57","doi-asserted-by":"publisher","DOI":"10.1007\/BF00204593"},{"key":"ref56","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-642-27645-3_1"},{"key":"ref55","doi-asserted-by":"publisher","DOI":"10.1162\/089976600300015961"},{"key":"ref54","doi-asserted-by":"publisher","DOI":"10.1037\/a0020887"},{"key":"ref53","doi-asserted-by":"publisher","DOI":"10.1016\/j.tics.2004.02.004"},{"key":"ref52","doi-asserted-by":"publisher","DOI":"10.1146\/annurev.neuro.24.1.167"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1093\/acprof:osobl\/9780199552917.001.0001"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1016\/0003-3472(79)90006-X"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1007\/BF00229422"},{"key":"ref6","first-page":"1633","article-title":"Transfer learning for reinforcement learning domains: A survey","volume":"10","author":"taylor","year":"2009","journal-title":"J Mach Learn Res"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1016\/j.earlhumdev.2003.09.006"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1016\/S1364-6613(99)01294-2"},{"key":"ref49","doi-asserted-by":"publisher","DOI":"10.1016\/S0896-6273(01)00423-8"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1016\/S0079-7421(08)60536-8"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1126\/science.291.5504.599"},{"key":"ref46","doi-asserted-by":"publisher","DOI":"10.1152\/jn.00795.2010"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.1093\/acprof:oso\/9780195326703.001.0001"},{"key":"ref48","doi-asserted-by":"publisher","DOI":"10.1006\/nlme.1998.3843"},{"key":"ref47","doi-asserted-by":"publisher","DOI":"10.1007\/PL00007984"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1109\/DEVLRN.2011.6037326"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.3389\/fnins.2010.00200"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.1371\/journal.pone.0003775"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.1109\/DevLrn.2012.6400883"},{"key":"ref73","doi-asserted-by":"publisher","DOI":"10.1162\/106454600568320"},{"key":"ref72","first-page":"1452","article-title":"Q-error as a selection mechanism in modular reinforcement-learning systems","volume":"22","author":"ring","year":"2011","journal-title":"Proc Int Joint Conf Artif Intell (IJCAI)"},{"key":"ref71","doi-asserted-by":"publisher","DOI":"10.1162\/NECO_a_00246"},{"key":"ref70","doi-asserted-by":"publisher","DOI":"10.1162\/089976602753712972"},{"key":"ref76","doi-asserted-by":"publisher","DOI":"10.1007\/s10994-008-5061-y"},{"key":"ref77","first-page":"1679","article-title":"Learning parameterized skills","author":"da silva","year":"2012","journal-title":"Proc 29th Int Conf Mach Learn (ICML)"},{"key":"ref74","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-642-39802-5_7"},{"key":"ref75","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-642-39802-5_8"},{"key":"ref78","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2014.6907629"},{"key":"ref79","doi-asserted-by":"publisher","DOI":"10.1145\/1273496.1273624"},{"key":"ref60","doi-asserted-by":"publisher","DOI":"10.1037\/a0037016"},{"key":"ref62","doi-asserted-by":"publisher","DOI":"10.1023\/A:1025696116075"},{"key":"ref61","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-540-77296-5_32"},{"key":"ref63","doi-asserted-by":"publisher","DOI":"10.1016\/S0004-3702(99)00052-1"},{"key":"ref64","first-page":"895","article-title":"Building portable options: Skill transfer in reinforcement learning","volume":"7","author":"konidaris","year":"2007","journal-title":"Proc 20th Int Joint Conf Artif Intell"},{"key":"ref65","doi-asserted-by":"publisher","DOI":"10.1007\/BF00992698"},{"key":"ref66","first-page":"761","article-title":"Horde: A scalable real-time architecture for learning knowledge from unsupervised sensorimotor interaction","author":"sutton","year":"2011","journal-title":"Proc 10th Int Conf Auton Agents Multiagent Syst (AAMAS)"},{"key":"ref67","first-page":"720","article-title":"Probabilistic policy reuse in a reinforcement learning agent","author":"fern\u00e1ndez","year":"2006","journal-title":"Proc Int Joint Conf Autonomous Agents and Multiagent Systems"},{"key":"ref68","doi-asserted-by":"publisher","DOI":"10.1145\/1390156.1390225"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1007\/s00221-005-0169-9"},{"key":"ref69","doi-asserted-by":"publisher","DOI":"10.1016\/S0893-6080(98)00066-5"},{"journal-title":"The Origins of Intelligence in Children","year":"1953","author":"piaget","key":"ref1"},{"key":"ref95","doi-asserted-by":"publisher","DOI":"10.3389\/fpsyg.2014.00124"},{"key":"ref94","doi-asserted-by":"publisher","DOI":"10.1016\/j.neunet.2012.12.012"},{"key":"ref93","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-642-39875-9"},{"key":"ref92","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-642-39875-9_8"},{"key":"ref91","doi-asserted-by":"publisher","DOI":"10.7551\/mitpress\/9780262016964.001.0001"},{"journal-title":"The Computational Neurobiology of Reaching and Pointing","year":"2005","author":"shadmehr","key":"ref90"},{"key":"ref98","doi-asserted-by":"publisher","DOI":"10.1016\/j.cognition.2008.08.011"},{"key":"ref99","doi-asserted-by":"publisher","DOI":"10.1007\/11840541_33"},{"key":"ref96","doi-asserted-by":"publisher","DOI":"10.1146\/annurev.ne.09.030186.002041"},{"key":"ref97","doi-asserted-by":"publisher","DOI":"10.1038\/nrn1919"},{"key":"ref10","first-page":"112","article-title":"Intrinsically motivated learning of hierarchical collections of skills","author":"barto","year":"2004","journal-title":"Proc Int Conf Develop Learn (ICDL'06)"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.21236\/ADA440079"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/TEVC.2006.890271"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1016\/0921-8890(95)00004-Y"},{"key":"ref14","first-page":"11","article-title":"What are the key open challenges for understanding the autonomous cumulative learning of skills?","volume":"7","author":"baldassarre","year":"2010","journal-title":"Newslett Autonom Mental Develop Tech Committee"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-642-27645-3_5"},{"journal-title":"Animal Behavior an Evolutionary Approach","year":"1998","author":"alcock","key":"ref16"},{"key":"ref82","doi-asserted-by":"publisher","DOI":"10.1109\/TCDS.2016.2538961"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1007\/BF00992700"},{"key":"ref81","doi-asserted-by":"publisher","DOI":"10.1109\/DEVLRN.2014.6983020"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1016\/S1389-0417(01)00039-0"},{"article-title":"Automatic discovery of subgoals in reinforcement learning using diverse density","year":"2001","author":"mcgovern","key":"ref84"},{"article-title":"Planning with neural networks and reinforcement learning","year":"2002","author":"baldassarre","key":"ref19"},{"key":"ref83","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-642-32375-1"},{"article-title":"Mixture models","year":"2008","author":"jacobs","key":"ref80"},{"key":"ref89","doi-asserted-by":"publisher","DOI":"10.1523\/JNEUROSCI.14-05-03208.1994"},{"key":"ref85","first-page":"1015","article-title":"Skill discovery in continuous reinforcement learning domains using skill chaining","author":"konidaris","year":"2009","journal-title":"Proc Adv Neural Inf Process Syst (NIPS)"},{"key":"ref86","doi-asserted-by":"publisher","DOI":"10.1109\/DevLrn.2012.6400835"},{"key":"ref87","doi-asserted-by":"publisher","DOI":"10.3389\/fnbot.2013.00022"},{"key":"ref88","doi-asserted-by":"publisher","DOI":"10.1037\/0278-7393.5.2.179"}],"container-title":["IEEE Transactions on Cognitive and Developmental Systems"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/7274989\/8733947\/07592409.pdf?arnumber=7592409","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,5,9]],"date-time":"2024-05-09T17:41:09Z","timestamp":1715276469000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/7592409\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2019,6]]},"references-count":101,"journal-issue":{"issue":"2"},"URL":"https:\/\/doi.org\/10.1109\/tcds.2016.2607018","relation":{},"ISSN":["2379-8920","2379-8939"],"issn-type":[{"type":"print","value":"2379-8920"},{"type":"electronic","value":"2379-8939"}],"subject":[],"published":{"date-parts":[[2019,6]]}}}