{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,5,1]],"date-time":"2026-05-01T12:44:57Z","timestamp":1777639497505,"version":"3.51.4"},"reference-count":34,"publisher":"Springer Science and Business Media LLC","issue":"1","license":[{"start":{"date-parts":[[2024,11,26]],"date-time":"2024-11-26T00:00:00Z","timestamp":1732579200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,11,26]],"date-time":"2024-11-26T00:00:00Z","timestamp":1732579200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Appl Intell"],"published-print":{"date-parts":[[2025,1]]},"DOI":"10.1007\/s10489-024-05965-2","type":"journal-article","created":{"date-parts":[[2024,11,26]],"date-time":"2024-11-26T09:46:10Z","timestamp":1732614370000},"update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":11,"title":["Deep reinforcement learning for dynamic strategy interchange in financial markets"],"prefix":"10.1007","volume":"55","author":[{"ORCID":"https:\/\/orcid.org\/0000-0003-0602-2115","authenticated-orcid":false,"given":"Xingyu","family":"Zhong","sequence":"first","affiliation":[]},{"given":"Jinhui","family":"Wei","sequence":"additional","affiliation":[]},{"given":"Siyuan","family":"Li","sequence":"additional","affiliation":[]},{"given":"Qingzhen","family":"Xu","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,11,26]]},"reference":[{"key":"5965_CR1","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/3625234","volume":"14","author":"S Sun","year":"2023","unstructured":"Sun S, Wang R, An B (2023) Reinforcement learning for quantitative trading. ACM Trans Intell Syst Technol 14:1\u201329","journal-title":"ACM Trans Intell Syst Technol"},{"key":"5965_CR2","doi-asserted-by":"publisher","first-page":"37","DOI":"10.1080\/00031305.2017.1380080","volume":"72","author":"SJ Taylor","year":"2018","unstructured":"Taylor SJ, Letham B (2018) Forecasting at scale. American Stat 72:37\u201345","journal-title":"American Stat"},{"key":"5965_CR3","doi-asserted-by":"publisher","first-page":"2222","DOI":"10.1109\/TNNLS.2016.2582924","volume":"28","author":"K Greff","year":"2016","unstructured":"Greff K, Srivastava RK, Koutn\u00edk J, Steunebrink BR, Schmidhuber J (2016) Lstm: A search space odyssey. IEEE Trans Neural Netw Learn Syst 28:2222\u20132232","journal-title":"IEEE Trans Neural Netw Learn Syst"},{"key":"5965_CR4","doi-asserted-by":"crossref","unstructured":"Yang H, Liu X-Y, Zhong S, Walid A (2020) Deep reinforcement learning for automated stock trading: An ensemble strategy 1\u20138","DOI":"10.1145\/3383455.3422540"},{"key":"5965_CR5","doi-asserted-by":"crossref","unstructured":"Guan M, Liu X-Y (2021) Explainable deep reinforcement learning for portfolio management: an empirical approach 1\u20139","DOI":"10.1145\/3490354.3494415"},{"key":"5965_CR6","doi-asserted-by":"crossref","unstructured":"Niu H, Li S, Li J (2022) Metatrader: An reinforcement learning approach integrating diverse policies for portfolio optimization 1573\u20131583","DOI":"10.1145\/3511808.3557363"},{"key":"5965_CR7","doi-asserted-by":"crossref","unstructured":"Faber M (2007) A quantitative approach to tactical asset allocation. The Journal of Wealth Management, Spring","DOI":"10.3905\/jwm.2007.674809"},{"key":"5965_CR8","unstructured":"Osler CL (2000) Support for resistance: technical analysis and intraday exchange rates. Econ Policy Rev 6"},{"key":"5965_CR9","doi-asserted-by":"publisher","first-page":"18797","DOI":"10.1109\/ACCESS.2020.2968595","volume":"8","author":"Y-H Xu","year":"2020","unstructured":"Xu Y-H, Yang C-C, Hua M, Zhou W (2020) Deep deterministic policy gradient (ddpg)-based resource allocation scheme for noma vehicular communications. IEEE Access 8:18797\u201318807","journal-title":"IEEE Access"},{"key":"5965_CR10","doi-asserted-by":"publisher","DOI":"10.1371\/journal.pone.0172395","volume":"12","author":"A Tampuu","year":"2017","unstructured":"Tampuu A et al (2017) Multiagent cooperation and competition with deep reinforcement learning. PloS one 12:e0172395","journal-title":"PloS one"},{"key":"5965_CR11","doi-asserted-by":"crossref","unstructured":"Shoham Y, Leyton-Brown K (2008) Multiagent systems: Algorithmic, game-theoretic, and logical foundations (Cambridge University Press)","DOI":"10.1017\/CBO9780511811654"},{"key":"5965_CR12","unstructured":"Lowe R, et\u00a0al (2017) Multi-agent actor-critic for mixed cooperative-competitive environments. Adv Neural Inf Process Syst 30"},{"key":"5965_CR13","doi-asserted-by":"publisher","first-page":"209","DOI":"10.1007\/BF02936328","volume":"2","author":"A Migdalas","year":"2002","unstructured":"Migdalas A (2002) Applications of game theory in finance and managerial accounting. Operat Res 2:209\u2013241","journal-title":"Operat Res"},{"key":"5965_CR14","doi-asserted-by":"publisher","first-page":"11055","DOI":"10.1007\/s11063-023-11364-4","volume":"55","author":"T Radhika","year":"2023","unstructured":"Radhika T, Chandrasekar A, Vijayakumar V, Zhu Q (2023) Analysis of markovian jump stochastic cohen-grossberg bam neural networks with time delays for exponential input-to-state stability. Neural Process Lett 55:11055\u201311072","journal-title":"Neural Process Lett"},{"key":"5965_CR15","doi-asserted-by":"publisher","first-page":"105127","DOI":"10.1016\/j.jet.2020.105127","volume":"191","author":"S Rossi","year":"2021","unstructured":"Rossi S, Tinn K (2021) Rational quantitative trading in efficient markets. J Econ Theory 191:105127","journal-title":"J Econ Theory"},{"key":"5965_CR16","unstructured":"Shen S, Jiang H, Zhang T (2012) Stock market forecasting using machine learning algorithms. Department of Electrical Engineering, Stanford University, Stanford, CA 1\u20135"},{"key":"5965_CR17","unstructured":"Krollner B, Vanstone BJ, Finnie GR, et\u00a0al (2010) Financial time series forecasting with machine learning techniques: a survey"},{"key":"5965_CR18","doi-asserted-by":"publisher","first-page":"42","DOI":"10.1016\/j.jfds.2016.03.002","volume":"2","author":"R Dash","year":"2016","unstructured":"Dash R, Dash PK (2016) A hybrid stock trading framework integrating technical analysis with machine learning techniques. J Finance Data Sci 2:42\u201357","journal-title":"J Finance Data Sci"},{"key":"5965_CR19","doi-asserted-by":"crossref","unstructured":"Tay FE, Cao L (2001) Application of support vector machines in financial time series forecasting. Omega 29:309\u2013317","DOI":"10.1016\/S0305-0483(01)00026-3"},{"key":"5965_CR20","doi-asserted-by":"crossref","unstructured":"Tsantekidis A et al (2017) Forecasting stock prices from the limit order book using convolutional neural networks 1:7\u201312","DOI":"10.1109\/CBI.2017.23"},{"key":"5965_CR21","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1186\/s40537-020-00333-6","volume":"7","author":"J Shen","year":"2020","unstructured":"Shen J, Shafiq MO (2020) Short-term stock market price trend prediction using a comprehensive deep learning system. J Big Data 7:1\u201333","journal-title":"J Big Data"},{"key":"5965_CR22","doi-asserted-by":"crossref","unstructured":"Mnih V, et\u00a0al (2015) Human-level control through deep reinforcement learning. Nature 518:529\u2013533","DOI":"10.1038\/nature14236"},{"key":"5965_CR23","doi-asserted-by":"publisher","first-page":"1217","DOI":"10.1109\/TNNLS.2020.2981377","volume":"32","author":"L Li","year":"2020","unstructured":"Li L, Li D, Song T, Xu X (2020) Actor-critic learning control with regularization and feature selection in policy gradient estimation. IEEE Trans Neural Netw Learn Syst 32:1217\u20131227","journal-title":"IEEE Trans Neural Netw Learn Syst"},{"key":"5965_CR24","doi-asserted-by":"publisher","first-page":"1324","DOI":"10.1109\/TETCI.2022.3140375","volume":"6","author":"C Banerjee","year":"2022","unstructured":"Banerjee C, Chen Z, Noman N, Zamani M (2022) Optimal actor-critic policy with optimized training datasets. IEEE Trans Emerg Topics Computat Intell 6:1324\u20131334","journal-title":"IEEE Trans Emerg Topics Computat Intell"},{"key":"5965_CR25","doi-asserted-by":"publisher","first-page":"174","DOI":"10.1016\/j.matcom.2023.08.007","volume":"222","author":"Y Cao","year":"2024","unstructured":"Cao Y, Chandrasekar A, Radhika T, Vijayakumar V (2024) Input-to-state stability of stochastic markovian jump genetic regulatory networks. Math Comput Simulation 222:174\u2013187","journal-title":"Math Comput Simulation"},{"key":"5965_CR26","doi-asserted-by":"publisher","unstructured":"Lee J, Kim R, Yi S-W, Kang J (2020) Maps: Multi-agent reinforcement learning-based portfolio management system. https:\/\/doi.org\/10.24963\/ijcai.2020\/623","DOI":"10.24963\/ijcai.2020\/623"},{"key":"5965_CR27","doi-asserted-by":"crossref","unstructured":"Huang Z, Tanaka F (2021) Mspm: A modularized and scalable multi-agent reinforcement learning-based system for financial portfolio management","DOI":"10.1371\/journal.pone.0265924"},{"key":"5965_CR28","volume-title":"Dynamic Programming and Markov Processes","author":"RA Howard","year":"1960","unstructured":"Howard RA (1960) Dynamic Programming and Markov Processes. MIT Press, Cambridge, MA"},{"key":"5965_CR29","first-page":"1835","volume":"35","author":"X-Y Liu","year":"2022","unstructured":"Liu X-Y et al (2022) Finrl-meta: Market environments and benchmarks for data-driven financial reinforcement learning. Adv Neural Inf Process Syst 35:1835\u20131849","journal-title":"Adv Neural Inf Process Syst"},{"key":"5965_CR30","unstructured":"Brockman G, et\u00a0al (2016) Openai gym. arXiv:1606.01540"},{"key":"5965_CR31","first-page":"12348","volume":"22","author":"A Raffin","year":"2021","unstructured":"Raffin A et al (2021) Stable-baselines3: Reliable reinforcement learning implementations. J Mach Learn Res 22:12348\u201312355","journal-title":"J Mach Learn Res"},{"key":"5965_CR32","unstructured":"Liang E, et\u00a0al (2018) Rllib: Abstractions for distributed reinforcement learning 3053\u20133062"},{"key":"5965_CR33","unstructured":"Liu X-Y, Li Z, Wang Z, Zheng J (2021) Elegantrl: A lightweight and stable deep reinforcement learning library"},{"key":"5965_CR34","doi-asserted-by":"publisher","first-page":"119191","DOI":"10.1016\/j.eswa.2022.119191","volume":"213","author":"C Wu","year":"2023","unstructured":"Wu C, Bi W, Liu H (2023) Proximal policy optimization algorithm for dynamic pricing with online reviews. Expert Syst Appl 213:119191","journal-title":"Expert Syst Appl"}],"container-title":["Applied Intelligence"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s10489-024-05965-2.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s10489-024-05965-2\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s10489-024-05965-2.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,1,2]],"date-time":"2025-01-02T15:09:32Z","timestamp":1735830572000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s10489-024-05965-2"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,11,26]]},"references-count":34,"journal-issue":{"issue":"1","published-print":{"date-parts":[[2025,1]]}},"alternative-id":["5965"],"URL":"https:\/\/doi.org\/10.1007\/s10489-024-05965-2","relation":{},"ISSN":["0924-669X","1573-7497"],"issn-type":[{"value":"0924-669X","type":"print"},{"value":"1573-7497","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,11,26]]},"assertion":[{"value":"6 October 2024","order":1,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"26 November 2024","order":2,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors declare no competing interests.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Competing interests"}}],"article-number":"30"}}