{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,17]],"date-time":"2026-03-17T07:12:44Z","timestamp":1773731564978,"version":"3.50.1"},"reference-count":46,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","license":[{"start":{"date-parts":[[2023,1,1]],"date-time":"2023-01-01T00:00:00Z","timestamp":1672531200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by-nc-nd\/4.0\/"}],"funder":[{"DOI":"10.13039\/501100003725","name":"National Research Foundation of Korea (NRF) Grant","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100003725","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100014188","name":"Ministry of Science and ICT, South Korea","doi-asserted-by":"publisher","award":["NRF-2021R1A2C2094350"],"award-info":[{"award-number":["NRF-2021R1A2C2094350"]}],"id":[{"id":"10.13039\/501100014188","id-type":"DOI","asserted-by":"publisher"}]},{"name":"Institute of Information & Communications Technology Planning and Evaluation (IITP) Grant"},{"DOI":"10.13039\/501100003621","name":"Korea Government","doi-asserted-by":"publisher","award":["2020-0-01373"],"award-info":[{"award-number":["2020-0-01373"]}],"id":[{"id":"10.13039\/501100003621","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Access"],"published-print":{"date-parts":[[2023]]},"DOI":"10.1109\/access.2023.3324458","type":"journal-article","created":{"date-parts":[[2023,10,13]],"date-time":"2023-10-13T18:07:33Z","timestamp":1697220453000},"page":"112577-112589","source":"Crossref","is-referenced-by-count":17,"title":["Offline Reinforcement Learning for Automated Stock Trading"],"prefix":"10.1109","volume":"11","author":[{"given":"Namyeong","family":"Lee","sequence":"first","affiliation":[{"name":"Department of Artificial Intelligence, Hanyang University, Seoul, South Korea"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-8877-9519","authenticated-orcid":false,"given":"Jun","family":"Moon","sequence":"additional","affiliation":[{"name":"Department of Artificial Intelligence, Hanyang University, Seoul, South Korea"}]}],"member":"263","reference":[{"key":"ref13","first-page":"15084","article-title":"Decision transformer: Reinforcement learning via sequence modeling","volume":"34","author":"chen","year":"2021","journal-title":"Proc Adv Neural Inf Process Syst"},{"key":"ref35","article-title":"Practical deep reinforcement learning approach for stock trading","author":"liu","year":"2018","journal-title":"arXiv 1811 07522"},{"key":"ref12","doi-asserted-by":"crossref","first-page":"436","DOI":"10.1038\/nature14539","article-title":"Deep learning","volume":"521","author":"lecun","year":"2015","journal-title":"Nature"},{"key":"ref34","article-title":"Optimistic bull or pessimistic bear: Adaptive deep reinforcement learning for stock portfolio allocation","author":"li","year":"2019","journal-title":"arXiv 1907 01503"},{"key":"ref15","article-title":"Continuous control with deep reinforcement learning","author":"lillicrap","year":"2015","journal-title":"arXiv 1509 02971"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.90"},{"key":"ref14","first-page":"9","article-title":"Language models are unsupervised multitask learners","volume":"1","author":"radford","year":"2019","journal-title":"OpenAIRE blog"},{"key":"ref36","article-title":"Layer normalization","author":"ba","year":"2016","journal-title":"arXiv 1607 06450"},{"key":"ref31","author":"sutton","year":"2018","journal-title":"Reinforcement Learning An Introduction"},{"key":"ref30","first-page":"1273","article-title":"Offline reinforcement learning as one big sequence modeling problem","volume":"34","author":"janner","year":"2021","journal-title":"Proc Adv Neural Inf Process Syst"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/IHMSC.2013.84"},{"key":"ref33","article-title":"Adversarial deep reinforcement learning in portfolio management","author":"liang","year":"2018","journal-title":"arXiv 1808 09940"},{"key":"ref10","article-title":"On-line portfolio selection with moving average reversion","author":"li","year":"2012","journal-title":"arXiv 1206 4626"},{"key":"ref32","article-title":"A deep reinforcement learning framework for the financial portfolio management problem","author":"jiang","year":"2017","journal-title":"arXiv 1706 10059"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v34i01.5462"},{"key":"ref1","article-title":"FinRL: A deep reinforcement learning library for automated stock trading in quantitative finance","author":"liu","year":"2020","journal-title":"arXiv 2011 09607"},{"key":"ref17","first-page":"20132","article-title":"A minimalist approach to offline reinforcement learning","volume":"34","author":"fujimoto","year":"2021","journal-title":"Proc Adv Neural Inf Process Syst"},{"key":"ref39","author":"ang","year":"2012","journal-title":"Mean-variance investing"},{"key":"ref16","article-title":"Offline reinforcement learning: Tutorial, review, and perspectives on open problems","author":"levine","year":"2020","journal-title":"arXiv 2005 01643"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1142\/WSHFES"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1145\/3387168.3387199"},{"key":"ref18","first-page":"1179","article-title":"Conservative Q-learning for offline reinforcement learning","volume":"33","author":"kumar","year":"2020","journal-title":"Proc Adv Neural Inf Process Syst"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1609\/icwsm.v8i1.14550"},{"key":"ref46","doi-asserted-by":"publisher","DOI":"10.2469\/faj.v58.n4.2453"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1162\/neco.1997.9.8.1735"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.3115\/v1\/D14-1148"},{"key":"ref26","article-title":"BERT: Pre-training of deep bidirectional transformers for language understanding","author":"devlin","year":"2018","journal-title":"arXiv 1810 04805"},{"key":"ref25","first-page":"1","article-title":"Attention is all you need","volume":"30","author":"vaswani","year":"2017","journal-title":"Proc Adv Neural Inf Process Syst"},{"key":"ref20","first-page":"1861","article-title":"Soft actor-critic: Off-policy maximum entropy deep reinforcement learning with a stochastic actor","author":"haarnoja","year":"2018","journal-title":"Proc Int Conf Mach Learn"},{"key":"ref42","doi-asserted-by":"crossref","first-page":"601","DOI":"10.1007\/s10846-018-0898-1","article-title":"A fully-autonomous aerial robot for search and rescue applications in indoor environments using learning-based techniques","volume":"95","author":"sampedro","year":"2019","journal-title":"J Intell Robotic Syst"},{"key":"ref41","article-title":"Deep reinforcement learning for list-wise recommendations","author":"zhao","year":"2017","journal-title":"arXiv 1801 00209"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.2469\/faj.v66.n5.3"},{"key":"ref44","doi-asserted-by":"crossref","first-page":"529","DOI":"10.1038\/nature14236","article-title":"Human-level control through deep reinforcement learning","volume":"518","author":"mnih","year":"2015","journal-title":"Nature"},{"key":"ref21","article-title":"Proximal policy optimization algorithms","author":"schulman","year":"2017","journal-title":"arXiv 1707 06347"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.1109\/JIOT.2019.2921159"},{"key":"ref28","first-page":"1928","article-title":"Asynchronous methods for deep reinforcement learning","author":"mnih","year":"2016","journal-title":"Proc Int Conf Mach Learn"},{"key":"ref27","article-title":"Improving language understanding by generative pre-training","author":"radford","year":"2018"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/N16-1174"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1134\/S1064226919120131"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1145\/3447548.3467297"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1145\/3292500.3330647"},{"key":"ref4","article-title":"DP-LSTM: Differential privacy-inspired LSTM for stock prediction using financial news","author":"li","year":"2019","journal-title":"arXiv 1912 10806"},{"key":"ref3","first-page":"1419","article-title":"Stock market&#x2019;s price movement prediction with LSTM neural networks","author":"nelson","year":"2017","journal-title":"Proc Int Joint Conf Neural Netw (IJCNN)"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2020\/640"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/D19-5105"},{"key":"ref40","article-title":"D4RL: Datasets for deep data-driven reinforcement learning","author":"fu","year":"2020","journal-title":"arXiv 2004 07219"}],"container-title":["IEEE Access"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/6287639\/10005208\/10285085.pdf?arnumber=10285085","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,11,6]],"date-time":"2023-11-06T19:57:28Z","timestamp":1699300648000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10285085\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023]]},"references-count":46,"URL":"https:\/\/doi.org\/10.1109\/access.2023.3324458","relation":{},"ISSN":["2169-3536"],"issn-type":[{"value":"2169-3536","type":"electronic"}],"subject":[],"published":{"date-parts":[[2023]]}}}