{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,2,21]],"date-time":"2025-02-21T19:23:23Z","timestamp":1740165803494,"version":"3.37.3"},"reference-count":27,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"4","license":[{"start":{"date-parts":[[2024,4,1]],"date-time":"2024-04-01T00:00:00Z","timestamp":1711929600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by\/4.0\/legalcode"}],"funder":[{"DOI":"10.13039\/501100002460","name":"Chung-Ang University Research Grants in 2021","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100002460","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100003725","name":"Institute for Information & communication Technology Planning & evaluation (IITP) Grant funded by the Korean Government","doi-asserted-by":"publisher","award":["2022-0-00612"],"award-info":[{"award-number":["2022-0-00612"]}],"id":[{"id":"10.13039\/501100003725","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Neural Netw. Learning Syst."],"published-print":{"date-parts":[[2024,4]]},"DOI":"10.1109\/tnnls.2022.3203035","type":"journal-article","created":{"date-parts":[[2022,9,14]],"date-time":"2022-09-14T19:31:32Z","timestamp":1663183892000},"page":"5280-5294","source":"Crossref","is-referenced-by-count":1,"title":["Minimax Optimal Bandits for Heavy Tail Rewards"],"prefix":"10.1109","volume":"35","author":[{"ORCID":"https:\/\/orcid.org\/0000-0003-0147-2715","authenticated-orcid":false,"given":"Kyungjae","family":"Lee","sequence":"first","affiliation":[{"name":"Department of Artificial Intelligence, Chung-Ang University, Seoul, South Korea"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-2684-2022","authenticated-orcid":false,"given":"Sungbin","family":"Lim","sequence":"additional","affiliation":[{"name":"Artificial Intelligence Graduate School and the Department of Industrial Engineering, Ulsan National Institute of Science and Technology (UNIST), Ulsan, South Korea"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1016\/0196-8858(85)90002-8"},{"key":"ref2","first-page":"1","article-title":"Minimax policies for adversarial and stochastic bandits","volume-title":"Proc. 22nd Conf. Learn. Theory","author":"Audibert"},{"key":"ref3","first-page":"99","article-title":"Further optimal regret bounds for Thompson sampling","volume-title":"Proc. 16th Int. Conf. Artif. Intell. Statist.","author":"Agrawal"},{"key":"ref4","first-page":"223","article-title":"A minimax and asymptotically optimal algorithm for stochastic bandits","volume-title":"Proc. Int. Conf. Algorithmic Learn. Theory","volume":"76","author":"M\u00e9nard"},{"key":"ref5","first-page":"2691","article-title":"On the optimality of perturbations in stochastic and adversarial multi-armed bandit problems","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Kim"},{"key":"ref6","first-page":"5074","article-title":"MOTS: Minimax optimal Thompson sampling","volume-title":"Proc. 38th Int. Conf. Mach. Learn.","volume":"139","author":"Jin"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/TIT.2013.2277869"},{"key":"ref8","first-page":"8452","article-title":"Optimal algorithms for stochastic multi-armed bandits with heavy tailed rewards","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Lee"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/LCSYS.2020.3035767"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-642-04414-4_7"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2016.2600243"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2018.2818742"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2017.2697407"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2018.2806006"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2018.2885123"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2018.2854796"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2020.2995920"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2020.2995920"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1023\/A:1013689704352"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1561\/2200000024"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/JSTSP.2013.2263494"},{"key":"ref22","first-page":"26","article-title":"Regret minimization in heavy-tailed bandits","volume-title":"Proc. Conf. Learn. Theory","volume":"134","author":"Agrawal"},{"key":"ref23","first-page":"6284","article-title":"Boltzmann exploration done right","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Cesa-Bianchi"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1214\/11-AIHP454"},{"volume-title":"Cryptocurrency Historical Prices","year":"2021","author":"Rajkumar","key":"ref25"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1016\/b978-0-444-50896-6.x5000-6"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.3390\/ijfs4040024"}],"container-title":["IEEE Transactions on Neural Networks and Learning Systems"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/5962385\/10492491\/09893089.pdf?arnumber=9893089","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,12,11]],"date-time":"2024-12-11T01:45:51Z","timestamp":1733881551000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9893089\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,4]]},"references-count":27,"journal-issue":{"issue":"4"},"URL":"https:\/\/doi.org\/10.1109\/tnnls.2022.3203035","relation":{},"ISSN":["2162-237X","2162-2388"],"issn-type":[{"type":"print","value":"2162-237X"},{"type":"electronic","value":"2162-2388"}],"subject":[],"published":{"date-parts":[[2024,4]]}}}