{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,2,21]],"date-time":"2025-02-21T22:55:42Z","timestamp":1740178542783,"version":"3.37.3"},"reference-count":22,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","license":[{"start":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T00:00:00Z","timestamp":1704067200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T00:00:00Z","timestamp":1704067200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T00:00:00Z","timestamp":1704067200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"name":"Koret Foundation grant for Smart Cities and Digital Living"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Control Syst. Lett."],"published-print":{"date-parts":[[2024]]},"DOI":"10.1109\/lcsys.2024.3519637","type":"journal-article","created":{"date-parts":[[2024,12,18]],"date-time":"2024-12-18T19:52:16Z","timestamp":1734551536000},"page":"2985-2990","source":"Crossref","is-referenced-by-count":0,"title":["We Are Legion: High Probability Regret Bound in Adversarial Multiagent Online Learning"],"prefix":"10.1109","volume":"8","author":[{"ORCID":"https:\/\/orcid.org\/0009-0004-6855-9852","authenticated-orcid":false,"given":"Sri","family":"Jaladi","sequence":"first","affiliation":[{"name":"Department of Computer Science, Stanford University, Stanford, CA, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-4120-8292","authenticated-orcid":false,"given":"Ilai","family":"Bistritz","sequence":"additional","affiliation":[{"name":"Department of Industrial Engineering and the School of Electrical Engineering, Tel Aviv University, Tel Aviv, Israel"}]}],"member":"263","reference":[{"doi-asserted-by":"publisher","key":"ref1","DOI":"10.4086\/toc.2012.v008a006"},{"doi-asserted-by":"publisher","key":"ref2","DOI":"10.1561\/2200000068"},{"key":"ref3","first-page":"1074","article-title":"Improved highprobability regret for adversarial bandits with time-varying feedback graphs","volume-title":"Proc. Int. Conf. Algorithmic Learn. Theory","author":"Luo"},{"key":"ref4","first-page":"1","article-title":"Explore no more: Improved high-probability regret bounds for non-stochastic bandits","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Neu"},{"doi-asserted-by":"publisher","key":"ref5","DOI":"10.1006\/inco.1994.1009"},{"doi-asserted-by":"publisher","key":"ref6","DOI":"10.1109\/TCNS.2016.2635380"},{"key":"ref7","first-page":"1","article-title":"Distributed multi-player bandits\u2014A game of thrones approach","volume-title":"Proc. 32nd Conf. Neural Inf. Process. Syst.","volume":"31","author":"Bistritz"},{"doi-asserted-by":"publisher","key":"ref8","DOI":"10.1109\/TAC.2021.3077454"},{"key":"ref9","first-page":"837","article-title":"Queue up your regrets: Achieving the dynamic capacity region of multiplayer bandits","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"35","author":"Bistritz"},{"issue":"77","key":"ref10","first-page":"1","article-title":"Multi-player bandits: The adversarial case","volume":"21","author":"Alatur","year":"2020","journal-title":"J. Mach. Learn. Res."},{"issue":"212","key":"ref11","first-page":"1","article-title":"Multi-agent multiarmed bandits with limited communication","volume":"23","author":"Agarwal","year":"2022","journal-title":"J. Mach. Learn. Res."},{"doi-asserted-by":"publisher","key":"ref12","DOI":"10.1109\/CDC.2016.7798264"},{"doi-asserted-by":"publisher","key":"ref13","DOI":"10.1109\/CDC51059.2022.9992885"},{"key":"ref14","first-page":"1","article-title":"Decentralized cooperative stochastic bandits","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Mart\u00ednez-Rubio"},{"key":"ref15","first-page":"1","article-title":"Individual regret in cooperative nonstochastic multi-armed bandits","volume-title":"Proc. 33rd Conf. Neural Inf. Process. Syst.","author":"Bar-On"},{"key":"ref16","first-page":"1421","article-title":"Adversarially robust multi-armed bandit algorithm with variance-dependent regret bounds","volume-title":"Proc. 35th Conf. Learn. Theory","author":"Ito"},{"doi-asserted-by":"publisher","key":"ref17","DOI":"10.1007\/978-3-540-75225-7_15"},{"doi-asserted-by":"publisher","key":"ref18","DOI":"10.1109\/ICASSP.2017.7952664"},{"key":"ref19","first-page":"1","article-title":"SIC-MMAB: Synchronisation involves communication in multiplayer multi-armed bandits","volume-title":"Proc. 33rd Conf. Neural Inf. Process. Syst.","author":"Boursier"},{"key":"ref20","first-page":"24005","article-title":"Fair algorithms for multi-agent multi-armed bandits","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"34","author":"Hossain"},{"doi-asserted-by":"publisher","key":"ref21","DOI":"10.1109\/JSAIT.2021.3073065"},{"key":"ref22","first-page":"1","article-title":"Online learning with adversarial delays","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Quanrud"}],"container-title":["IEEE Control Systems Letters"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/7782633\/10411713\/10806853.pdf?arnumber=10806853","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,12,31]],"date-time":"2024-12-31T06:33:50Z","timestamp":1735626830000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10806853\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024]]},"references-count":22,"URL":"https:\/\/doi.org\/10.1109\/lcsys.2024.3519637","relation":{},"ISSN":["2475-1456"],"issn-type":[{"type":"electronic","value":"2475-1456"}],"subject":[],"published":{"date-parts":[[2024]]}}}