{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,10,28]],"date-time":"2025-10-28T03:17:45Z","timestamp":1761621465030,"version":"3.41.0"},"publisher-location":"New York, NY, USA","reference-count":33,"publisher":"ACM","license":[{"start":{"date-parts":[[2019,7,25]],"date-time":"2019-07-25T00:00:00Z","timestamp":1564012800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.acm.org\/publications\/policies\/copyright_policy#Background"}],"funder":[{"name":"German Federal Ministry of Education and Research","award":["01IS17042"],"award-info":[{"award-number":["01IS17042"]}]}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2019,7,25]]},"DOI":"10.1145\/3292500.3330862","type":"proceedings-article","created":{"date-parts":[[2019,7,26]],"date-time":"2019-07-26T13:17:26Z","timestamp":1564147046000},"page":"1449-1459","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":13,"title":["Scaling Multi-Armed Bandit Algorithms"],"prefix":"10.1145","author":[{"given":"Edouard","family":"Fouch\u00e9","sequence":"first","affiliation":[{"name":"Karlsruhe Institute of Technology, Karlsruhe, Germany"}]},{"given":"Junpei","family":"Komiyama","sequence":"additional","affiliation":[{"name":"University of Tokyo, Tokyo, Japan"}]},{"given":"Klemens","family":"B\u00f6hm","sequence":"additional","affiliation":[{"name":"Karlsruhe Institute of Technology, Karlsruhe, Germany"}]}],"member":"320","published-online":{"date-parts":[[2019,7,25]]},"reference":[{"volume-title":"St\u00e9phan Cl\u00e9 mencc on, and Aur\u00e9 lien Garivier","year":"2018","author":"Achab Mastane","key":"e_1_3_2_1_1_1"},{"key":"e_1_3_2_1_2_1","volume-title":"Further Optimal Regret Bounds for Thompson Sampling. In AISTATS (JMLR Workshop and Conference Proceedings)","volume":"31","author":"Agrawal Shipra","year":"2013"},{"key":"e_1_3_2_1_3_1","doi-asserted-by":"publisher","DOI":"10.1109\/TAC.1987.1104491"},{"key":"e_1_3_2_1_4_1","doi-asserted-by":"publisher","DOI":"10.1023\/A:1013689704352"},{"volume-title":"Proceedings of IEEE 36th Annual Foundations of Computer Science. 322--331","author":"Auer Peter","key":"e_1_3_2_1_5_1"},{"key":"e_1_3_2_1_6_1","doi-asserted-by":"publisher","DOI":"10.1137\/S0097539701398375"},{"key":"e_1_3_2_1_7_1","doi-asserted-by":"publisher","DOI":"10.1145\/1007352.1007367"},{"volume-title":"Learning from Time-Changing Data with Adaptive Windowing","author":"Bifet Albert","key":"e_1_3_2_1_8_1"},{"key":"e_1_3_2_1_9_1","doi-asserted-by":"publisher","DOI":"10.1561\/2200000024"},{"volume-title":"A Survey of Online Experiment Design with the Stochastic Multi-Armed Bandit. CoRR","year":"2015","author":"Burtini Giuseppe","key":"e_1_3_2_1_10_1"},{"key":"e_1_3_2_1_11_1","unstructured":"Deepayan Chakrabarti Ravi Kumar Filip Radlinski and Eli Upfal. 2008. Mortal Multi-Armed Bandits. In NIPS. 273--280.   Deepayan Chakrabarti Ravi Kumar Filip Radlinski and Eli Upfal. 2008. Mortal Multi-Armed Bandits. In NIPS. 273--280."},{"key":"e_1_3_2_1_12_1","unstructured":"Wei Chen Wei Hu Fu Li Jian Li Yu Liu and Pinyan Lu. 2016. Combinatorial Multi-Armed Bandit with General Reward Functions. In NIPS. 1651--1659.   Wei Chen Wei Hu Fu Li Jian Li Yu Liu and Pinyan Lu. 2016. Combinatorial Multi-Armed Bandit with General Reward Functions. In NIPS. 1651--1659."},{"volume-title":"ICML (JMLR Workshop and Conference Proceedings). JMLR.org, 1587--1595","year":"2016","author":"Degenne R\u00e9my","key":"e_1_3_2_1_13_1"},{"key":"e_1_3_2_1_14_1","doi-asserted-by":"publisher","DOI":"10.1145\/2523813"},{"key":"e_1_3_2_1_15_1","volume-title":"The KL-UCB Algorithm for Bounded Stochastic Bandits and Beyond. In COLT (JMLR Proceedings)","volume":"19","author":"Garivier Aur\u00e9","year":"2011"},{"volume-title":"On Upper-Confidence Bound Policies for Non-Stationary Bandit Problems. CoRR","year":"2008","author":"Garivier Aur\u00e9","key":"e_1_3_2_1_16_1"},{"volume-title":"ALT (Lecture Notes in Computer Science)","author":"Garivier Aur\u00e9lien","key":"e_1_3_2_1_17_1"},{"key":"e_1_3_2_1_18_1","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-642-34106-9_18"},{"volume-title":"Anytime algorithms for multi-armed bandit problems","author":"Kleinberg Robert D.","key":"e_1_3_2_1_19_1","doi-asserted-by":"crossref","DOI":"10.1145\/1109557.1109659"},{"key":"e_1_3_2_1_20_1","volume-title":"Optimal Regret Analysis of Thompson Sampling in Stochastic Multi-armed Bandit Problem with Multiple Plays. In ICML (JMLR Workshop and Conference Proceedings)","volume":"37","author":"Komiyama Junpei","year":"2015"},{"key":"e_1_3_2_1_21_1","doi-asserted-by":"publisher","DOI":"10.1103\/PhysRevE.69.066138"},{"volume-title":"Bandit Algorithms","author":"Lattimore Tor","key":"e_1_3_2_1_22_1","doi-asserted-by":"crossref","DOI":"10.1017\/9781108571401"},{"key":"e_1_3_2_1_23_1","doi-asserted-by":"publisher","DOI":"10.1145\/1772690.1772758"},{"key":"e_1_3_2_1_24_1","doi-asserted-by":"publisher","DOI":"10.1145\/3187009.3177737"},{"key":"e_1_3_2_1_25_1","volume-title":"ALT (Proceedings of Machine Learning Research)","volume":"76","author":"Maillard Odalric-Ambrym","year":"2017"},{"volume-title":"Taming Non-stationary Bandits: A Bayesian Approach. CoRR","year":"2017","author":"Raj Vishnu","key":"e_1_3_2_1_26_1"},{"volume-title":"Adapting to a Changing Environment: the Brownian Restless Bandits","author":"Slivkins Aleksandrs","key":"e_1_3_2_1_27_1"},{"volume-title":"Surveillance in an abruptly changing world via multiarmed bandits","author":"Srivastava Vaibhav","key":"e_1_3_2_1_28_1","doi-asserted-by":"crossref","DOI":"10.1109\/CDC.2014.7039462"},{"volume-title":"Barto","year":"1998","author":"Sutton Richard S.","key":"e_1_3_2_1_29_1"},{"key":"e_1_3_2_1_30_1","doi-asserted-by":"publisher","DOI":"10.1093\/biomet\/25.3-4.285"},{"volume-title":"Alex Rogers, and Nicholas R. Jennings.","year":"2010","author":"Tran-Thanh Long","key":"e_1_3_2_1_31_1"},{"volume-title":"ALT","author":"Uchiya Taishi","key":"e_1_3_2_1_32_1"},{"volume-title":"Budgeted Multi-Armed Bandits with Multiple Plays","author":"Xia Yingce","key":"e_1_3_2_1_33_1"}],"event":{"name":"KDD '19: The 25th ACM SIGKDD Conference on Knowledge Discovery and Data Mining","sponsor":["SIGMOD ACM Special Interest Group on Management of Data","SIGKDD ACM Special Interest Group on Knowledge Discovery in Data"],"location":"Anchorage AK USA","acronym":"KDD '19"},"container-title":["Proceedings of the 25th ACM SIGKDD International Conference on Knowledge Discovery &amp; Data Mining"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3292500.3330862","content-type":"unspecified","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3292500.3330862","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,6,18]],"date-time":"2025-06-18T00:26:02Z","timestamp":1750206362000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3292500.3330862"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2019,7,25]]},"references-count":33,"alternative-id":["10.1145\/3292500.3330862","10.1145\/3292500"],"URL":"https:\/\/doi.org\/10.1145\/3292500.3330862","relation":{},"subject":[],"published":{"date-parts":[[2019,7,25]]},"assertion":[{"value":"2019-07-25","order":2,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}