{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,18]],"date-time":"2026-01-18T03:27:51Z","timestamp":1768706871369,"version":"3.49.0"},"reference-count":23,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"7","license":[{"start":{"date-parts":[[2024,7,1]],"date-time":"2024-07-01T00:00:00Z","timestamp":1719792000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2024,7,1]],"date-time":"2024-07-01T00:00:00Z","timestamp":1719792000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2024,7,1]],"date-time":"2024-07-01T00:00:00Z","timestamp":1719792000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"name":"IIT Delhi through I-Hub Foundation for Cobotics","award":["GP\/2021\/ISSC\/022"],"award-info":[{"award-number":["GP\/2021\/ISSC\/022"]}]},{"DOI":"10.13039\/501100001843","name":"Science and Engineering Research Board","doi-asserted-by":"publisher","award":["CRG\/2022\/003707"],"award-info":[{"award-number":["CRG\/2022\/003707"]}],"id":[{"id":"10.13039\/501100001843","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Artif. Intell."],"published-print":{"date-parts":[[2024,7]]},"DOI":"10.1109\/tai.2024.3375258","type":"journal-article","created":{"date-parts":[[2024,3,18]],"date-time":"2024-03-18T16:32:13Z","timestamp":1710779533000},"page":"3624-3637","source":"Crossref","is-referenced-by-count":6,"title":["Online Reinforcement Learning in Periodic MDP"],"prefix":"10.1109","volume":"5","author":[{"ORCID":"https:\/\/orcid.org\/0009-0000-1598-2121","authenticated-orcid":false,"given":"Ayush","family":"Aniket","sequence":"first","affiliation":[{"name":"Department of Electrical Engineering, IIT Delhi, New Delhi, India"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-2684-5912","authenticated-orcid":false,"given":"Arpan","family":"Chattopadhyay","sequence":"additional","affiliation":[{"name":"Department of Electrical Engineering, IIT Delhi, New Delhi, India"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1177\/0278364913495721"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/GAMENETS.2009.5137416"},{"key":"ref3","article-title":"Idiosyncrasies and challenges of data driven learning in electronic trading","author":"Bacoyannis","year":"2018"},{"key":"ref4","first-page":"2","article-title":"Near-optimal regret bounds for reinforcement learning","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"21","author":"Auer","year":"2008"},{"key":"ref5","article-title":"A sliding-window algorithm for Markov decision processes with arbitrarily changing rewards and transitions","author":"Gajane","year":"2018"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.23919\/ACC.2019.8815000"},{"key":"ref7","first-page":"81","article-title":"Variational regret bounds for reinforcement learning","volume-title":"Proc. Uncertainty Artif. Intell.","author":"Ortner","year":"2020"},{"key":"ref8","first-page":"1843","article-title":"Reinforcement learning for non-stationary Markov decision processes: The blessing of (more) optimism","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Cheung","year":"2020"},{"key":"ref9","first-page":"6743","article-title":"Dynamic regret of policy optimization in non-stationary environments","volume":"33","author":"Fei","year":"2020","journal-title":"Proc. Adv. Neural Inf. Process. Syst."},{"key":"ref10","first-page":"3538","article-title":"A kernel-based approach to non-stationary reinforcement learning in metric spaces","volume-title":"Proc. Int. Conf. Artif. Intell. Statist.","author":"Domingues","year":"2021"},{"key":"ref11","first-page":"7447","article-title":"Near-optimal model-free reinforcement learning in non-stationary episodic MDPs","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Mao","year":"2021"},{"key":"ref12","article-title":"Nonstationary reinforcement learning with linear function approximation","author":"Zhou","year":"2020"},{"key":"ref13","article-title":"Efficient learning in non-stationary linear Markov decision processes","author":"Touati","year":"2020"},{"key":"ref14","first-page":"4300","article-title":"Non-stationary reinforcement learning without prior knowledge: An optimal black-box approach","volume-title":"Proc. Conf. Learn. Theory","author":"Wei","year":"2021"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1287\/opre.13.6.920"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1007\/BF01720020"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/ADPRL.2014.7010627"},{"key":"ref18","volume-title":"Markov Decision Processes: Discrete Stochastic Dynamic Programming","author":"Puterman","year":"2014"},{"key":"ref19","article-title":"Exploration-exploitation dilemma in reinforcement learning under various form of prior knowledge","author":"Fruit","year":"2019"},{"key":"ref20","article-title":"Inequalities for the l1 deviation of the empirical distribution","author":"Weissman","year":"2003"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1016\/j.tcs.2009.01.016"},{"key":"ref22","first-page":"1056","article-title":"Tightening exploration in upper confidence reinforcement learning","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Bourel","year":"2020"},{"key":"ref23","first-page":"3","article-title":"(More) efficient reinforcement learning via posterior sampling","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"26","author":"Osband","year":"2013"}],"container-title":["IEEE Transactions on Artificial Intelligence"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9078688\/10599850\/10475139.pdf?arnumber=10475139","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,8,23]],"date-time":"2025-08-23T01:09:12Z","timestamp":1755911352000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10475139\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,7]]},"references-count":23,"journal-issue":{"issue":"7"},"URL":"https:\/\/doi.org\/10.1109\/tai.2024.3375258","relation":{},"ISSN":["2691-4581"],"issn-type":[{"value":"2691-4581","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,7]]}}}