{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2024,10,30]],"date-time":"2024-10-30T14:31:43Z","timestamp":1730298703676,"version":"3.28.0"},"reference-count":20,"publisher":"IEEE","license":[{"start":{"date-parts":[[2021,12,5]],"date-time":"2021-12-05T00:00:00Z","timestamp":1638662400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2021,12,5]],"date-time":"2021-12-05T00:00:00Z","timestamp":1638662400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2021,12,5]],"date-time":"2021-12-05T00:00:00Z","timestamp":1638662400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2021,12,5]]},"DOI":"10.1109\/ssci50451.2021.9659949","type":"proceedings-article","created":{"date-parts":[[2022,1,24]],"date-time":"2022-01-24T21:09:51Z","timestamp":1643058591000},"page":"01-08","source":"Crossref","is-referenced-by-count":1,"title":["Effects of Different Optimization Formulations in Evolutionary Reinforcement Learning on Diverse Behavior Generation"],"prefix":"10.1109","author":[{"given":"Victor","family":"Villin","sequence":"first","affiliation":[]},{"given":"Naoki","family":"Masuyama","sequence":"additional","affiliation":[]},{"given":"Yusuke","family":"Nojima","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1162\/EVCO_a_00025"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.3389\/frobt.2016.00040"},{"journal-title":"Surprise search for evolutionary divergence","year":"2017","author":"gravina","key":"ref12"},{"key":"ref13","first-page":"5032","article-title":"Improving exploration in evolution strategies for deep reinforcement learning via a population of novelty-seeking agents","author":"conti","year":"2018","journal-title":"Proceedings of the 32Nd International Conference on Neural Information Processing Systems (NIPS'18)"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/TEVC.2018.2877215"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/ASE.2019.00077"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-642-25489-5_50"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/CIG.2016.7860408"},{"key":"ref18","doi-asserted-by":"crossref","first-page":"292","DOI":"10.1007\/BFb0056872","article-title":"Multiobjective optimization using evolutionary algorithms ? a comparative case study","author":"zitzler","year":"1998","journal-title":"Parallel Problem Solving from Nature ? PPSN V"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/CEC.2008.4631121"},{"key":"ref4","first-page":"10510","article-title":"Diversity-driven exploration strategy for deep reinforcement learning","author":"hong","year":"2018","journal-title":"Proceedings of the 32nd International Conference on Neural Information Processing Systems ()"},{"journal-title":"Deep neuroevolution Genetic algorithms are a competitive alternative for training deep neural networks for reinforcement learning","year":"2018","author":"such","key":"ref3"},{"key":"ref6","first-page":"278","article-title":"Policy invariance under reward transformations: Theory and application to reward shaping","author":"ng","year":"0","journal-title":"Proceedings of the Sixteenth International Conference on Machine Learning"},{"key":"ref5","article-title":"Exploration by random network distillation","author":"burda","year":"0","journal-title":"International Conference on Learning Representations"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1126\/science.aau6249"},{"key":"ref7","article-title":"Reinforcement learning with unsupervised auxiliary tasks","author":"jaderberg","year":"2017","journal-title":"Proceedings of the 5th International Conference on Learning Representations"},{"journal-title":"Population based training of neural networks","year":"2017","author":"jaderberg","key":"ref2"},{"key":"ref1","first-page":"1928","article-title":"Asynchronous methods for deep reinforcement learning","volume":"48","author":"mnih","year":"2016","journal-title":"Proceedings of The 33rd International Conference on Machine Learning ser Proceedings of Machine Learning Research"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2020\/466"},{"key":"ref20","first-page":"18050","article-title":"Effective diversity in population based reinforcement learning","volume":"33","author":"parker-holder","year":"2020","journal-title":"Advances in neural information processing systems"}],"event":{"name":"2021 IEEE Symposium Series on Computational Intelligence (SSCI)","start":{"date-parts":[[2021,12,5]]},"location":"Orlando, FL, USA","end":{"date-parts":[[2021,12,7]]}},"container-title":["2021 IEEE Symposium Series on Computational Intelligence (SSCI)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9659537\/9659538\/09659949.pdf?arnumber=9659949","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,5,10]],"date-time":"2022-05-10T16:56:45Z","timestamp":1652201805000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9659949\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2021,12,5]]},"references-count":20,"URL":"https:\/\/doi.org\/10.1109\/ssci50451.2021.9659949","relation":{},"subject":[],"published":{"date-parts":[[2021,12,5]]}}}