{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,24]],"date-time":"2026-01-24T13:42:33Z","timestamp":1769262153028,"version":"3.49.0"},"reference-count":50,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"1","license":[{"start":{"date-parts":[[2026,2,1]],"date-time":"2026-02-01T00:00:00Z","timestamp":1769904000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2026,2,1]],"date-time":"2026-02-01T00:00:00Z","timestamp":1769904000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2026,2,1]],"date-time":"2026-02-01T00:00:00Z","timestamp":1769904000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Emerg. Top. Comput. Intell."],"published-print":{"date-parts":[[2026,2]]},"DOI":"10.1109\/tetci.2025.3593841","type":"journal-article","created":{"date-parts":[[2025,8,27]],"date-time":"2025-08-27T18:33:05Z","timestamp":1756319585000},"page":"594-606","source":"Crossref","is-referenced-by-count":1,"title":["From Wasserstein to Maximum Mean Discrepancy Barycenters: A Novel Framework for Uncertainty Propagation in Model-Free Reinforcement Learning"],"prefix":"10.1109","volume":"10","author":[{"given":"Srinjoy","family":"Roy","sequence":"first","affiliation":[{"name":"Chennai Mathematical Institute, Siruseri, India"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-9955-2176","authenticated-orcid":false,"given":"Subhajit","family":"Saha","sequence":"additional","affiliation":[{"name":"Institute for Advancing Intelligence, TCG-CREST, Kolkata, India"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-6843-4508","authenticated-orcid":false,"given":"Swagatam","family":"Das","sequence":"additional","affiliation":[{"name":"Electronics and Communication Sciences Unit, Indian Statistical Institute, Kolkata, India"}]}],"member":"263","reference":[{"key":"ref1","volume-title":"Reinforcement Learning: An Introduction","author":"Sutton","year":"2018"},{"key":"ref2","first-page":"3003","article-title":"(More) efficient reinforcement learning via posterior sampling","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"26","author":"Osband","year":"2013"},{"key":"ref3","first-page":"761","article-title":"Bayesian Q-learning","volume-title":"Proc. 15th Nat.\/10th Conf. Artif. Intell.\/Innov. Appl. Artif. Intell.","author":"Dearden","year":"1998"},{"key":"ref4","first-page":"19","article-title":"A Bayesian sampling approach to exploration in reinforcement learning","volume-title":"Proc. Conf. Uncertainty Artif. Intell.","author":"Asmuth","year":"2009"},{"key":"ref5","first-page":"4333","article-title":"Propagating uncertainty in reinforcement learning via Wasserstein barycenters","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"32","author":"Metelli","year":"2019"},{"key":"ref6","first-page":"1","article-title":"Controlling Wasserstein distances by kernel norms with application to compressive statistical learning","volume":"24","author":"Vayer","year":"2023","journal-title":"J. Mach. Learn. Res."},{"key":"ref7","doi-asserted-by":"crossref","DOI":"10.7551\/mitpress\/14207.001.0001","volume-title":"Distributional Reinforcement Learning","author":"Bellemare","year":"2023"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1016\/j.jcss.2007.08.009"},{"key":"ref9","first-page":"4033","article-title":"Deep exploration via bootstrapped DQN","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"29","author":"Osband","year":"2016"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1145\/1143844.1143955"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1023\/A:1017984413808"},{"key":"ref12","first-page":"213","article-title":"R-max - A general polynomial time algorithm for near-optimal reinforcement learning","volume":"3","author":"Brafman","year":"2003","journal-title":"J. Mach. Learn. Res."},{"key":"ref13","first-page":"2377","article-title":"Generalization and exploration via randomized value functions","volume-title":"Proc. 33rd Int. Conf. Int. Conf. Mach. Learn.","volume":"48","author":"Osband","year":"2016"},{"key":"ref14","first-page":"3898","article-title":"Improving PAC exploration using the median of means","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"29","author":"Pazis","year":"2016"},{"key":"ref15","first-page":"4868","article-title":"Is Q-learning provably efficient?","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"31","author":"Jin","year":"2018"},{"issue":"51","key":"ref16","first-page":"1563","article-title":"Near-optimal regret bounds for reinforcement learning","volume":"11","author":"Jaksch","year":"2010","journal-title":"J. Mach. Learn. Res."},{"key":"ref17","first-page":"1505","article-title":"Optimistic linear programming gives logarithmic regret for irreducible MDPs","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Tewari","year":"2007"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.7551\/mitpress\/7503.003.0011"},{"key":"ref19","first-page":"1578","article-title":"Efficient bias-span-constrained exploration-exploitation in reinforcement learning","volume-title":"Int. Conf. Mach. Learn.","author":"Fruit","year":"2018"},{"key":"ref20","first-page":"35","article-title":"REGAL: A regularization based algorithm for reinforcement learning in weakly communicating MDPs","volume-title":"Proc. 25th Conf. Uncertainty Artif. Intell.","author":"Bartlett","year":"2009"},{"key":"ref21","first-page":"263","article-title":"Minimax regret bounds for reinforcement learning","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Azar","year":"2017"},{"key":"ref22","first-page":"2818","article-title":"Sample complexity of episodic fixed-horizon reinforcement learning","volume-title":"Proc. 29th Int. Conf. Neural Inf. Process. Syst.","author":"Dann","year":"2015"},{"key":"ref23","first-page":"1031","article-title":"Model-based reinforcement learning with nearly tight exploration complexity bounds","volume-title":"Proc. 27th Int. Conf. Int. Conf. Mach. Learn.","author":"Szita","year":"2010"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-642-34106-9_26"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1007\/978-94-015-3711-7"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1023\/A:1013689704352"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.2307\/2332286"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/ALLERTON.2010.5706896"},{"key":"ref29","first-page":"2701","article-title":"Why is posterior sampling better than optimism for reinforcement learning?","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Osband","year":"2016"},{"key":"ref30","first-page":"1184","article-title":"Optimistic posterior sampling for reinforcement learning: Worst-case regret bounds","volume-title":"Proc. Neural Inf. Process. Syst.","author":"Agrawal","year":"2022"},{"key":"ref31","first-page":"8626","article-title":"Randomized prior functions for deep reinforcement learning","volume-title":"Proc. 32nd Int. Conf. Neural Inf. Process. Syst.","author":"Osband","year":"2018"},{"key":"ref32","first-page":"2795","article-title":"Epistemic neural networks","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Osband","year":"2021"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1109\/ITA.2018.8503252"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1109\/TETCI.2022.3140380"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1002\/SERIES1345"},{"key":"ref36","article-title":"Estimating barycenters of measures in high dimensions","author":"Cohen","year":"2020"},{"issue":"25","key":"ref37","first-page":"723","article-title":"A kernel two-sample test","volume":"13","author":"Gretton","year":"2012","journal-title":"J. Mach. Learn. Res."},{"key":"ref38","first-page":"397","article-title":"Using confidence bounds for exploitation-exploration trade-offs","volume":"3","author":"Auer","year":"2003","journal-title":"J. Mach. Learn. Res."},{"issue":"84","key":"ref39","first-page":"2413","article-title":"Reinforcement learning in finite MDPS: PAC analysis","volume-title":"J. Mach. Learn. Res.","volume":"10","author":"Strehl","year":"2009"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.3150\/18-BEJ1065"},{"key":"ref41","article-title":"Statistical inference for generative models with maximum mean discrepancy","author":"Briol","year":"2019"},{"issue":"131","key":"ref42","first-page":"1","article-title":"MushroomRL: Simplifying reinforcement learning research","volume":"22","author":"D\u2019Eramo","year":"2021","journal-title":"J. Mach. Learn. Res."},{"key":"ref43","first-page":"2613","article-title":"Double Q-learning","volume-title":"Proc. 24th Annu. Conf. Neural Inf. Process. Syst.","author":"Hasselt","year":"2010"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.1613\/jair.3912"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.1613\/jair.5699"},{"key":"ref46","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v30i1.10295"},{"key":"ref47","article-title":"Playing atari with deep reinforcement learning","author":"Mnih","year":"2013"},{"key":"ref48","first-page":"1995","article-title":"Dueling network architectures for deep reinforcement learning","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Wang","year":"2016"},{"key":"ref49","first-page":"4294","article-title":"Learning values across many orders of magnitude","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"29","author":"Hasselt","year":"2016"},{"key":"ref50","doi-asserted-by":"publisher","DOI":"10.1109\/TETCI.2022.3222545"}],"container-title":["IEEE Transactions on Emerging Topics in Computational Intelligence"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/7433297\/11361307\/11142813.pdf?arnumber=11142813","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,1,23]],"date-time":"2026-01-23T21:01:07Z","timestamp":1769202067000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11142813\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2026,2]]},"references-count":50,"journal-issue":{"issue":"1"},"URL":"https:\/\/doi.org\/10.1109\/tetci.2025.3593841","relation":{},"ISSN":["2471-285X"],"issn-type":[{"value":"2471-285X","type":"electronic"}],"subject":[],"published":{"date-parts":[[2026,2]]}}}