{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2024,10,29]],"date-time":"2024-10-29T12:23:20Z","timestamp":1730204600780,"version":"3.28.0"},"reference-count":38,"publisher":"IEEE","license":[{"start":{"date-parts":[[2021,12,14]],"date-time":"2021-12-14T00:00:00Z","timestamp":1639440000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2021,12,14]],"date-time":"2021-12-14T00:00:00Z","timestamp":1639440000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2021,12,14]]},"DOI":"10.1109\/cdc45484.2021.9683030","type":"proceedings-article","created":{"date-parts":[[2022,2,1]],"date-time":"2022-02-01T15:50:18Z","timestamp":1643730618000},"page":"720-727","source":"Crossref","is-referenced-by-count":2,"title":["Thompson sampling for linear quadratic mean-field teams"],"prefix":"10.1109","author":[{"given":"Mukul","family":"Gagrani","sequence":"first","affiliation":[{"name":"USC,Qualcomm AI Research,Department of EE,San Diego"}]},{"given":"Sagar","family":"Sudhakara","sequence":"additional","affiliation":[{"name":"University of Southern California,Department of Electrical Engineering,Los Angeles,CA"}]},{"given":"Aditya","family":"Mahajan","sequence":"additional","affiliation":[{"name":"Mcgill University,Canada"}]},{"given":"Ashutosh","family":"Nayyar","sequence":"additional","affiliation":[{"name":"University of Southern California,Department of Electrical Engineering,Los Angeles,CA"}]},{"given":"Yi","family":"Ouyang","sequence":"additional","affiliation":[{"name":"Preferred Networks,USA"}]}],"member":"263","reference":[{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1109\/TAC.1977.1101497"},{"key":"ref33","first-page":"4966","article-title":"Learning mean-field games","author":"guo","year":"2019","journal-title":"Neural Information Processing Systems"},{"article-title":"Reinforcement learning for mean field game","year":"2019","author":"tiwari","key":"ref32"},{"key":"ref31","first-page":"251","article-title":"Reinforcement learning in stationary mean-field games","author":"subramanian","year":"2019","journal-title":"International Conference on Autonomous Agents and Multi-Agent Systems"},{"key":"ref30","first-page":"5567","article-title":"Mean field multi-agent reinforcement learning","author":"yang","year":"2018","journal-title":"International Conference on Machine Learning"},{"article-title":"Thompson sampling for linear quadratic mean-field teams","year":"2020","author":"gagrani","key":"ref37"},{"article-title":"Unified reinforcement Q-learning for mean field game and control problems","year":"2020","author":"angiuli","key":"ref36"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/CDC42340.2020.9304279"},{"article-title":"Multi type mean field reinforcement learning","year":"2020","author":"subramanian","key":"ref34"},{"article-title":"Certainty equivalent control of LQR is efficient","year":"2019","author":"mania","key":"ref10"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1016\/j.automatica.2020.108950"},{"key":"ref12","first-page":"8937","article-title":"Naive exploration is optimal for online lqr","author":"simchowitz","year":"2020","journal-title":"International Conference on Machine Learning"},{"key":"ref13","article-title":"Analysis of thompson sampling for the multi-armed bandit problem","author":"agrawal","year":"2012","journal-title":"Conference on Learning Theory"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/ALLERTON.2017.8262873"},{"key":"ref15","article-title":"Posterior sampling-based reinforcement learning for control of unknown linear systems","author":"ouyang","year":"2019","journal-title":"IEEE Transactions on Automatic Control"},{"key":"ref16","first-page":"1","article-title":"Improved regret bounds for thompson sampling in linear quadratic control problems","author":"abeille","year":"2018","journal-title":"International Conference on Machine Learning"},{"key":"ref17","first-page":"1328","article-title":"Logarithmic regret for learning linear quadratic regulators efficiently","author":"cassel","year":"2020","journal-title":"International Conference on Machine Learning"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1080\/00207178608933690"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1016\/0005-1098(91)90086-H"},{"key":"ref28","doi-asserted-by":"crossref","first-page":"1375","DOI":"10.3982\/ECTA6158","article-title":"Markov perfect industry dynamics with many firms","volume":"76","author":"weintraub","year":"2008","journal-title":"Econometrica"},{"article-title":"Finite time analysis of optimal adaptive policies for linear-quadratic systems","year":"2017","author":"faradonbeh","key":"ref4"},{"key":"ref27","first-page":"1489","article-title":"Oblivious Equilibrium: A Mean Field Approximation for Large-Scale Dynamic Games","author":"weintraub","year":"2005","journal-title":"Neural Information Processing Systems"},{"key":"ref3","first-page":"1","article-title":"Regret bounds for the adaptive control of linear quadratic systems","author":"abbasi-yadkori","year":"2011","journal-title":"Annual Conference on Learning Theory"},{"key":"ref6","first-page":"23","article-title":"Efficient optimistic exploration in linear-quadratic regulators via lagrangian relaxation","author":"abeille","year":"2020","journal-title":"International Conference on Machine Learning"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1007\/s13235-013-0099-2"},{"key":"ref5","first-page":"1300","article-title":"Learning linear-quadratic regulators efficiently with only $\\sqrt T $ regret","author":"cohen","year":"2019","journal-title":"International Conference on Machine Learning"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1137\/0319052"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/TAC.1980.1102363"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1137\/S0363012997317499"},{"key":"ref9","first-page":"4192","article-title":"Regret bounds for robust adaptive control of the linear quadratic regulator","author":"dean","year":"2018","journal-title":"Neural Information Processing Systems"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1023\/A:1013689704352"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1016\/0005-1098(95)00013-M"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/CDC.2015.7403050"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1016\/j.automatica.2011.07.004"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1007\/s11537-007-0657-8"},{"article-title":"Linear Quadratic Mean Field Teams: Optimal and Approximately Optimal Decentralized Solutions","year":"2016","author":"arabneydi","key":"ref23"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/TAC.2012.2183439"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/TAC.2007.904450"}],"event":{"name":"2021 60th IEEE Conference on Decision and Control (CDC)","start":{"date-parts":[[2021,12,14]]},"location":"Austin, TX, USA","end":{"date-parts":[[2021,12,17]]}},"container-title":["2021 60th IEEE Conference on Decision and Control (CDC)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9682670\/9682776\/09683030.pdf?arnumber=9683030","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,6,6]],"date-time":"2022-06-06T16:23:51Z","timestamp":1654532631000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9683030\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2021,12,14]]},"references-count":38,"URL":"https:\/\/doi.org\/10.1109\/cdc45484.2021.9683030","relation":{},"subject":[],"published":{"date-parts":[[2021,12,14]]}}}