{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,24]],"date-time":"2026-03-24T11:53:04Z","timestamp":1774353184701,"version":"3.50.1"},"reference-count":36,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"9","license":[{"start":{"date-parts":[[2021,9,1]],"date-time":"2021-09-01T00:00:00Z","timestamp":1630454400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2021,9,1]],"date-time":"2021-09-01T00:00:00Z","timestamp":1630454400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2021,9,1]],"date-time":"2021-09-01T00:00:00Z","timestamp":1630454400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Syst. Man Cybern, Syst."],"published-print":{"date-parts":[[2021,9]]},"DOI":"10.1109\/tsmc.2019.2957051","type":"journal-article","created":{"date-parts":[[2019,12,18]],"date-time":"2019-12-18T20:52:16Z","timestamp":1576702336000},"page":"5773-5784","source":"Crossref","is-referenced-by-count":12,"title":["Self-Attention-Based Temporary Curiosity in Reinforcement Learning Exploration"],"prefix":"10.1109","volume":"51","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-9825-8186","authenticated-orcid":false,"given":"Hangkai","family":"Hu","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0001-7361-9283","authenticated-orcid":false,"given":"Shiji","family":"Song","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-7251-0988","authenticated-orcid":false,"given":"Gao","family":"Huang","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/D16-1053"},{"key":"ref32","article-title":"Count-based exploration with the successor representation","author":"machado","year":"2018","journal-title":"arXiv preprint arXiv 1807 11622"},{"key":"ref31","article-title":"Self-attention generative adversarial networks","volume":"abs 1805 8318","author":"zhang","year":"2018","journal-title":"CoRR"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00813"},{"key":"ref36","article-title":"Image transformer","author":"parmar","year":"2018","journal-title":"arXiv preprint arxiv 1802 05807"},{"key":"ref35","first-page":"5998","article-title":"Attention is all you need","author":"vaswani","year":"2017","journal-title":"Proc Adv Neural Inf Process Syst"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/D16-1244"},{"key":"ref10","first-page":"1928","article-title":"Asynchronous methods for deep reinforcement learning","author":"mnih","year":"2016","journal-title":"Proc Int Conf Mach Learn"},{"key":"ref11","article-title":"Noisy networks for exploration","author":"fortunato","year":"2018","journal-title":"Proc Int Conf Learn Represent"},{"key":"ref12","article-title":"Deep exploration via randomized value functions","volume":"abs 1703 7608","author":"osband","year":"2017","journal-title":"CoRR"},{"key":"ref13","first-page":"1471","article-title":"Unifying count-based exploration and intrinsic motivation","author":"bellemare","year":"2016","journal-title":"Proc Adv Neural Inf Process Syst"},{"key":"ref14","first-page":"206","article-title":"Exploration in model-based reinforcement learning by empirically estimating learning progress","author":"lopes","year":"2012","journal-title":"Proc Adv Neural Inf Process Syst"},{"key":"ref15","first-page":"2125","article-title":"Variational information maximisation for intrinsically motivated reinforcement learning","author":"mohamed","year":"2015","journal-title":"Proc Adv Neural Inf Process Syst"},{"key":"ref16","article-title":"Incentivizing exploration in reinforcement learning with deep predictive models","volume":"abs 1507 814","author":"stadie","year":"2015","journal-title":"CoRR"},{"key":"ref17","first-page":"2753","article-title":"Exploration: A study of count-based exploration for deep reinforcement learning","author":"tang","year":"2017","journal-title":"Proc Adv Neural Inf Process Syst"},{"key":"ref18","first-page":"1109","article-title":"VIME: Variational information maximizing exploration","author":"houthooft","year":"2016","journal-title":"Proc Adv Neural Inf Process Syst"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/CVPRW.2017.70"},{"key":"ref28","first-page":"222","article-title":"A possibility for implementing curiosity and boredom in model-building neural controllers","author":"schmidhuber","year":"1991","journal-title":"Proc Int Conf Simulat Adapt Behav Animals Animats"},{"key":"ref4","article-title":"Continuous control with deep reinforcement learning","author":"lillicrap","year":"2016","journal-title":"Proc Int Conf Learn Represent"},{"key":"ref27","article-title":"A distributional perspective on reinforcement learning","author":"bellemare","year":"2017","journal-title":"Proc Int Conf Mach Learn"},{"key":"ref3","article-title":"Playing Atari with deep reinforcement learning","author":"mnih","year":"2013","journal-title":"Proc NIPS DEEP Learn Workshop"},{"key":"ref6","article-title":"Distributed adaptive tracking control for Lur&#x2019;e systems with event-triggered strategy","author":"wu","year":"0","journal-title":"IEEE Trans Syst Man Cybern Syst"},{"key":"ref29","first-page":"7251","article-title":"UnFlow: Unsupervised learning of optical flow with a bidirectional census loss","author":"meister","year":"2018","journal-title":"Proc 32nd AAAI Conf Artif Intell"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/TSMC.2018.2871196"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/TFUZZ.2018.2883374"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/TSMC.2019.2918142"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1038\/nature14236"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/TFUZZ.2019.2895560"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1038\/nature16961"},{"key":"ref20","article-title":"Large-scale study of curiosity-driven learning","author":"burda","year":"2019","journal-title":"Proc Int Conf Learn Represent"},{"key":"ref22","article-title":"Exploration by random network distillation","author":"burda","year":"2019","journal-title":"Proc Int Conf Learn Represent"},{"key":"ref21","article-title":"Never forget: Balancing exploration and exploitation via learning optical flow","author":"yang","year":"2019","journal-title":"arXiv preprint arXiv 1901 08486"},{"key":"ref24","article-title":"QANet: Combining local convolution with global self-attention for reading comprehension","author":"yu","year":"2018","journal-title":"Proc Int Conf Learn Represent"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/N18-2074"},{"key":"ref26","article-title":"Count-based exploration with neural density models","author":"ostrovski","year":"2017","journal-title":"Proc Int Conf Mach Learn"},{"key":"ref25","author":"sutton","year":"2018","journal-title":"Reinforcement Learning An Introduction"}],"container-title":["IEEE Transactions on Systems, Man, and Cybernetics: Systems"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/6221021\/9515329\/08936518.pdf?arnumber=8936518","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,5,10]],"date-time":"2022-05-10T14:53:11Z","timestamp":1652194391000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/8936518\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2021,9]]},"references-count":36,"journal-issue":{"issue":"9"},"URL":"https:\/\/doi.org\/10.1109\/tsmc.2019.2957051","relation":{},"ISSN":["2168-2216","2168-2232"],"issn-type":[{"value":"2168-2216","type":"print"},{"value":"2168-2232","type":"electronic"}],"subject":[],"published":{"date-parts":[[2021,9]]}}}