{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,9,9]],"date-time":"2025-09-09T21:54:47Z","timestamp":1757454887329,"version":"3.37.3"},"reference-count":40,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"4","license":[{"start":{"date-parts":[[2022,12,1]],"date-time":"2022-12-01T00:00:00Z","timestamp":1669852800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2022,12,1]],"date-time":"2022-12-01T00:00:00Z","timestamp":1669852800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2022,12,1]],"date-time":"2022-12-01T00:00:00Z","timestamp":1669852800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100001655","name":"Deutscher Akademischer Austauschdienst DAAD\u2014Research Grants-Doctoral Programme in Germany Scholarship","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100001655","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Cogn. Dev. Syst."],"published-print":{"date-parts":[[2022,12]]},"DOI":"10.1109\/tcds.2020.3001633","type":"journal-article","created":{"date-parts":[[2020,6,11]],"date-time":"2020-06-11T20:43:00Z","timestamp":1591908180000},"page":"1367-1377","source":"Crossref","is-referenced-by-count":4,"title":["Efficient Online Interest-Driven Exploration for Developmental Robots"],"prefix":"10.1109","volume":"14","author":[{"ORCID":"https:\/\/orcid.org\/0000-0001-7733-4509","authenticated-orcid":false,"given":"Rania","family":"Rayyes","sequence":"first","affiliation":[{"name":"Institut f&#x00FC;r Robotik und Prozessinformatik, Technische Universit&#x00E4;t Braunschweig, Braunschweig, Germany"}]},{"given":"Heiko","family":"Donat","sequence":"additional","affiliation":[{"name":"Institut f&#x00FC;r Robotik und Prozessinformatik, Technische Universit&#x00E4;t Braunschweig, Braunschweig, Germany"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-6738-9933","authenticated-orcid":false,"given":"Jochen","family":"Steil","sequence":"additional","affiliation":[{"name":"Institut f&#x00FC;r Robotik und Prozessinformatik, Technische Universit&#x00E4;t Braunschweig, Braunschweig, Germany"}]}],"member":"263","reference":[{"year":"2019","journal-title":"PXI Hardware Specification","key":"ref39"},{"doi-asserted-by":"publisher","key":"ref38","DOI":"10.1109\/100.486658"},{"key":"ref33","first-page":"379","article-title":"Learning with the self-organizing map","volume":"1","author":"ritter","year":"1991","journal-title":"Proc ICANN"},{"key":"ref32","first-page":"4344","article-title":"Learning by playing solving sparse reward tasks from scratch","volume":"80","author":"riedmiller","year":"2018","journal-title":"Proc 35th Int Conf Mach Learn"},{"key":"ref31","first-page":"5048","article-title":"Hindsight experience replay","author":"andrychowicz","year":"2017","journal-title":"Proc 30th Adv Neural Inf Process Syst"},{"year":"1993","author":"lin","journal-title":"Reinforcement learning for robots using neural networks","key":"ref30"},{"doi-asserted-by":"publisher","key":"ref37","DOI":"10.1109\/ICRA.2013.6630645"},{"doi-asserted-by":"publisher","key":"ref36","DOI":"10.1007\/978-3-319-15705-4_25"},{"doi-asserted-by":"publisher","key":"ref35","DOI":"10.1109\/ICRA.2019.8794347"},{"doi-asserted-by":"publisher","key":"ref34","DOI":"10.1038\/nature04587"},{"doi-asserted-by":"publisher","key":"ref10","DOI":"10.1109\/DEVLRN.2017.8329804"},{"year":"2019","journal-title":"MoveIt","key":"ref40"},{"doi-asserted-by":"publisher","key":"ref11","DOI":"10.1109\/TAMD.2010.2103311"},{"doi-asserted-by":"publisher","key":"ref12","DOI":"10.1109\/IROS.2018.8593833"},{"doi-asserted-by":"publisher","key":"ref13","DOI":"10.1109\/TAMD.2010.2056368"},{"doi-asserted-by":"publisher","key":"ref14","DOI":"10.1109\/TCDS.2016.2538961"},{"doi-asserted-by":"publisher","key":"ref15","DOI":"10.1016\/j.robot.2012.05.008"},{"doi-asserted-by":"publisher","key":"ref16","DOI":"10.1515\/pjbr-2017-0004"},{"key":"ref17","first-page":"6","article-title":"What is intrinsic motivation? A typology of computational approaches","volume":"1","author":"oudeyer","year":"2009","journal-title":"Front Neurorobot"},{"doi-asserted-by":"publisher","key":"ref18","DOI":"10.3389\/fnbot.2013.00022"},{"doi-asserted-by":"publisher","key":"ref19","DOI":"10.1007\/s10514-013-9339-y"},{"doi-asserted-by":"publisher","key":"ref28","DOI":"10.1109\/DEVLRN.2019.8850707"},{"doi-asserted-by":"publisher","key":"ref4","DOI":"10.1007\/s10339-011-0404-1"},{"doi-asserted-by":"publisher","key":"ref27","DOI":"10.3389\/fnbot.2018.00068"},{"doi-asserted-by":"publisher","key":"ref3","DOI":"10.1109\/TAMD.2015.2426192"},{"doi-asserted-by":"publisher","key":"ref6","DOI":"10.1109\/EST.2010.20"},{"year":"2013","author":"mnih","journal-title":"Playing atari with deep reinforcement learning","key":"ref29"},{"doi-asserted-by":"publisher","key":"ref5","DOI":"10.1109\/IROS.2016.7759584"},{"year":"2018","author":"tanneberg","journal-title":"Intrinsic motivation and mental replay enable efficient online adaptation in stochastic recurrent networks","key":"ref8"},{"doi-asserted-by":"publisher","key":"ref7","DOI":"10.1109\/TEVC.2006.890271"},{"key":"ref2","doi-asserted-by":"crossref","DOI":"10.7551\/mitpress\/9320.001.0001","author":"cangelosi","year":"2015","journal-title":"Developmental Robotics From Babies to Robots"},{"year":"2019","author":"huang","journal-title":"Learning gentle object manipulation with curiosity-driven deep reinforcement learning","key":"ref9"},{"doi-asserted-by":"publisher","key":"ref1","DOI":"10.1080\/09540090600768658"},{"doi-asserted-by":"publisher","key":"ref20","DOI":"10.3389\/fnins.2014.00317"},{"doi-asserted-by":"publisher","key":"ref22","DOI":"10.1016\/j.tics.2004.04.002"},{"key":"ref21","doi-asserted-by":"crossref","first-page":"1147","DOI":"10.1109\/TNNLS.2013.2287890","article-title":"Efficient exploratory learning of inverse kinematics on a bionic elephant trunk","volume":"25","author":"rolf","year":"2014","journal-title":"IEEE Trans Neural Netw Learn Syst"},{"doi-asserted-by":"publisher","key":"ref24","DOI":"10.1109\/DEVLRN.2011.6037368"},{"key":"ref23","first-page":"56","article-title":"Goal babbling with direction sampling for simultaneous exploration and learning of inverse kinematics of a humanoid robot","volume":"4","author":"rayyes","year":"2016","journal-title":"Proc WS NC2"},{"doi-asserted-by":"publisher","key":"ref26","DOI":"10.1109\/IROS.2018.8593762"},{"doi-asserted-by":"publisher","key":"ref25","DOI":"10.1109\/DEVLRN.2016.7846793"}],"container-title":["IEEE Transactions on Cognitive and Developmental Systems"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/7274989\/9976403\/09115051.pdf?arnumber=9115051","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,12,26]],"date-time":"2022-12-26T19:08:57Z","timestamp":1672081737000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9115051\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022,12]]},"references-count":40,"journal-issue":{"issue":"4"},"URL":"https:\/\/doi.org\/10.1109\/tcds.2020.3001633","relation":{},"ISSN":["2379-8920","2379-8939"],"issn-type":[{"type":"print","value":"2379-8920"},{"type":"electronic","value":"2379-8939"}],"subject":[],"published":{"date-parts":[[2022,12]]}}}