{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,18]],"date-time":"2026-03-18T10:28:12Z","timestamp":1773829692041,"version":"3.50.1"},"reference-count":31,"publisher":"IEEE","license":[{"start":{"date-parts":[[2025,6,2]],"date-time":"2025-06-02T00:00:00Z","timestamp":1748822400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,6,2]],"date-time":"2025-06-02T00:00:00Z","timestamp":1748822400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025,6,2]]},"DOI":"10.1109\/cogsima64436.2025.11079473","type":"proceedings-article","created":{"date-parts":[[2025,7,15]],"date-time":"2025-07-15T17:41:16Z","timestamp":1752601276000},"page":"127-134","source":"Crossref","is-referenced-by-count":1,"title":["Innate-Values-Driven Reinforcement Learning Based Cognitive Modeling"],"prefix":"10.1109","author":[{"given":"Qin","family":"Yang","sequence":"first","affiliation":[{"name":"Bradley University,Intelligent Social Systems and Swarm Robotics Lab (ISR),Computer Science and Information Systems Department,Peoria,USA"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-65094-4"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.3389\/fnhum.2017.00145"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-642-32375-1_2"},{"key":"ref4","volume-title":"Motivation und Lernen mit Texten","author":"Schiefele","year":"1996"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-642-32375-1_9"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1037\/11305-004"},{"key":"ref7","author":"Alderfer","year":"1972","journal-title":"Existence, relatedness, and growth: Human needs in organizational settings"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-540-89187-1"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-642-32375-1_1"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1613\/jair.1.13554"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/IJCNN.1991.170605"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/TAMD.2010.2056368"},{"key":"ref13","article-title":"A real-time novelty detector for a mobile robot","volume-title":"EUREL European Advanced Robotics Systems Masterclass and Conference","author":"Marsland","year":"2000"},{"key":"ref14","first-page":"19","article-title":"Intrinsically motivated learning of hierarchical collections of skills","volume-title":"Proceedings of the 3rd International Conference on Development and Learning","volume":"112","author":"Barto","year":"2004"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-540-74913-4_30"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/SMC42975.2020.9283249"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1145\/3555776.3577642"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1145\/3643862"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/SSRR50563.2020.9292570"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/SMC52423.2021.9659187"},{"key":"ref21","article-title":"A survey on intrinsic motivation in reinforcement learning","author":"Aubret","year":"2019","journal-title":"arXiv preprint"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1038\/nature14236"},{"key":"ref23","first-page":"1995","article-title":"Dueling network architectures for deep reinforcement learning","volume-title":"International conference on machine learning","author":"Wang","year":"2016"},{"key":"ref24","first-page":"1928","article-title":"Asynchronous methods for deep reinforcement learning","volume-title":"International conference on machine learning","author":"Mnih","year":"2016"},{"key":"ref25","article-title":"Proximal policy optimization algorithms","author":"Schulman","year":"2017","journal-title":"arXiv preprint"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/CIG.2016.7860433"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/TG.2018.2877047"},{"key":"ref28","volume-title":"Markov decision processes: discrete stochastic dynamic programming","author":"Puterman","year":"2014"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.21236\/ad0708563"},{"key":"ref30","volume-title":"Nonlinear Preference and Utility Theory","author":"Fishburn","year":"1988"},{"key":"ref31","article-title":"Reinforcement learning for robots using neural networks","author":"Lin","year":"1992","journal-title":"Carnegie Mellon University"}],"event":{"name":"2025 IEEE Conference on Cognitive and Computational Aspects of Situation Management (CogSIMA)","location":"Duisburg, Germany","start":{"date-parts":[[2025,6,2]]},"end":{"date-parts":[[2025,6,5]]}},"container-title":["2025 IEEE Conference on Cognitive and Computational Aspects of Situation Management (CogSIMA)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/11079407\/11079467\/11079473.pdf?arnumber=11079473","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,7,16]],"date-time":"2025-07-16T05:40:14Z","timestamp":1752644414000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11079473\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,6,2]]},"references-count":31,"URL":"https:\/\/doi.org\/10.1109\/cogsima64436.2025.11079473","relation":{},"subject":[],"published":{"date-parts":[[2025,6,2]]}}}