{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,2,21]],"date-time":"2025-02-21T17:17:19Z","timestamp":1740158239991,"version":"3.37.3"},"reference-count":19,"publisher":"Springer Science and Business Media LLC","issue":"3","license":[{"start":{"date-parts":[[2018,1,10]],"date-time":"2018-01-10T00:00:00Z","timestamp":1515542400000},"content-version":"tdm","delay-in-days":0,"URL":"http:\/\/www.springer.com\/tdm"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Int. J. ITS Res."],"published-print":{"date-parts":[[2018,9]]},"DOI":"10.1007\/s13177-017-0150-6","type":"journal-article","created":{"date-parts":[[2018,1,9]],"date-time":"2018-01-09T21:58:12Z","timestamp":1515535092000},"page":"215-226","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":3,"title":["Adaptive Learning Algorithms for Simulation-Based Dynamic Traffic User Equilibrium"],"prefix":"10.1007","volume":"16","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-6453-8403","authenticated-orcid":false,"suffix":"Jr","given":"Genaro","family":"Peque","sequence":"first","affiliation":[]},{"given":"Toshihiko","family":"Miyagi","sequence":"additional","affiliation":[]},{"given":"Fumitaka","family":"Kurauchi","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2018,1,10]]},"reference":[{"key":"150_CR1","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-642-32460-4","volume-title":"Traffic Flow Dynamics","author":"M Treiber","year":"2013","unstructured":"Treiber, M., Kesting, A.: Traffic Flow Dynamics. Springer-Verlag, Berlin, Heidelberg (2013)"},{"key":"150_CR2","doi-asserted-by":"publisher","first-page":"438","DOI":"10.1016\/j.sbspro.2012.09.762","volume":"54","author":"T Miyagi","year":"2012","unstructured":"Miyagi, T., Peque, G.: Informed user algorithm that converge to a pure Nash equilibrium in traffic games. Procedia Soc. Behav. Sci. 54, 438\u2013449 (2012)","journal-title":"Procedia Soc. Behav. Sci."},{"key":"150_CR3","doi-asserted-by":"publisher","first-page":"806","DOI":"10.1016\/j.sbspro.2013.05.043","volume":"80","author":"T Miyagi","year":"2013","unstructured":"Miyagi, T., Peque, G., Fukumoto, J.: Adaptive learning algorithms for traffic games with naive users. Procedia Soc. Behav. Sci. 80, 806\u2013817 (2013)","journal-title":"Procedia Soc. Behav. Sci."},{"issue":"4","key":"150_CR4","doi-asserted-by":"publisher","first-page":"3154","DOI":"10.1137\/120893501","volume":"5","author":"AC Chapman","year":"2013","unstructured":"Chapman, A.C., Leslie, D.S., Rogers, A., Jennings, N.R.: Convergent learning algorithms for unknown rewards games. SIAM J. Control. Optim. 5(4), 3154\u20133180 (2013)","journal-title":"SIAM J. Control. Optim."},{"key":"150_CR5","doi-asserted-by":"publisher","first-page":"71","DOI":"10.1016\/j.geb.2008.11.012","volume":"70","author":"R Cominetti","year":"2010","unstructured":"Cominetti, R., Melo, E., Sorin, S.: A payoff-based learning procedure and its application to traffic games. Games Econ. Behav. 70, 71\u201383 (2010)","journal-title":"Games Econ. Behav."},{"issue":"2","key":"150_CR6","doi-asserted-by":"publisher","first-page":"495","DOI":"10.1137\/S0363012903437976","volume":"44","author":"D Leslie","year":"2005","unstructured":"Leslie, D., Collins, E.: Individual Q-learning in normal form games. SIAM J. Control. Optim. 44(2), 495\u2013514 (2005)","journal-title":"SIAM J. Control. Optim."},{"key":"150_CR7","doi-asserted-by":"publisher","first-page":"285","DOI":"10.1016\/j.geb.2005.08.005","volume":"56","author":"D Leslie","year":"2006","unstructured":"Leslie, D., Collins, E.: Generalized weakened fictitious play. Games Econ. Behav. 56, 285\u2013298 (2006)","journal-title":"Games Econ. Behav."},{"issue":"1","key":"150_CR8","doi-asserted-by":"publisher","first-page":"373","DOI":"10.1137\/070680199","volume":"48","author":"J Marden","year":"2009","unstructured":"Marden, J., Young, P., Arslan, G., Shamma, J.S.: Payoff\u2013based dynamics for multi\u2013player weakly acyclic games. SIAM J. Control. Optim. 48(1), 373\u2013396 (2009)","journal-title":"SIAM J. Control. Optim."},{"key":"150_CR9","doi-asserted-by":"publisher","first-page":"626","DOI":"10.1016\/j.geb.2008.02.011","volume":"65","author":"P Young","year":"2009","unstructured":"Young, P.: Learning by trial and error. Games Econ. Behav. 65, 626\u2013643 (2009)","journal-title":"Games Econ. Behav."},{"key":"150_CR10","unstructured":"Miyagi, T.: A reinforcement learning model with endogenously determined learning-efficiency parameters, The Proceedins of CIS\/SIS Conference, Keio University (2004)"},{"key":"150_CR11","unstructured":"Miyagi, T.: Stochastic fictitious play, reinforcement learning and the user equilibrium in transportation networks. A Paper Presented at the IVth Meeting on \u201cMathematics in Transport\u201d, University College London (2005)"},{"key":"150_CR12","unstructured":"Miyagi, T.: Multi-agent learning models for route choices in transportation networks: An integrated approach of regret-based strategy and reinforcement learning. Proceedings of the 11th International Conference on Travel Behavior Research, Kyoto (2006)"},{"key":"150_CR13","unstructured":"Miyagi, T., Ishiguro, M.: Modelling of route choice behaviours of car-drivers under imperfect travel information. Urban Transp. 14, 551\u2013560 (2008), WIT Press"},{"key":"150_CR14","doi-asserted-by":"publisher","first-page":"3707","DOI":"10.1103\/PhysRevE.54.3707","volume":"54","author":"C Gawron","year":"1996","unstructured":"Gawron, C.: Continuous limit of the Nagel\u2013Schreckenberg\u2013model. Phys. Rev. E. 54, 3707 (1996)","journal-title":"Phys. Rev. E"},{"key":"150_CR15","doi-asserted-by":"publisher","first-page":"2221","DOI":"10.1051\/jp1:1992277","volume":"2","author":"K Nagel","year":"1992","unstructured":"Nagel, K., Schreckenberg, M.: A cellular automaton model for freeway traffic. J. Phys. I France. 2, 2221\u20132229 (1992)","journal-title":"J. Phys. I France"},{"key":"150_CR16","doi-asserted-by":"publisher","first-page":"65","DOI":"10.1007\/BF01737559","volume":"2","author":"R Rosenthal","year":"1973","unstructured":"Rosenthal, R.: A class of games possessing pure-strategy Nash equilibria. Int. J. Game Theory. 2, 65\u201367 (1973)","journal-title":"Int. J. Game Theory"},{"key":"150_CR17","doi-asserted-by":"publisher","first-page":"124","DOI":"10.1006\/game.1996.0044","volume":"14","author":"D Monderer","year":"1996","unstructured":"Monderer, D., Shapley, L.: Potential games. Games Econ. Behav. 14, 124\u2013143 (1996)","journal-title":"Games Econ. Behav."},{"key":"150_CR18","unstructured":"Beckmann, M.J., McGuire, C.B., Winsten, C.B.: Studies in the Economics of Transportation. Yale University Press (1956)"},{"issue":"3","key":"150_CR19","doi-asserted-by":"publisher","first-page":"287","DOI":"10.1023\/A:1007678930559","volume":"38","author":"SP Singh","year":"2000","unstructured":"Singh, S.P., Jaakola, T., Littman, M.L., Szepesvari, C.: Convergence results for single-step on-policy reinforcement-learning algorithm. Mach. Learn. 38(3), 287\u2013308 (2000)","journal-title":"Mach. Learn."}],"container-title":["International Journal of Intelligent Transportation Systems Research"],"original-title":[],"language":"en","link":[{"URL":"http:\/\/link.springer.com\/article\/10.1007\/s13177-017-0150-6\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"http:\/\/link.springer.com\/content\/pdf\/10.1007\/s13177-017-0150-6.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"http:\/\/link.springer.com\/content\/pdf\/10.1007\/s13177-017-0150-6.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2020,2,17]],"date-time":"2020-02-17T12:48:40Z","timestamp":1581943720000},"score":1,"resource":{"primary":{"URL":"http:\/\/link.springer.com\/10.1007\/s13177-017-0150-6"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2018,1,10]]},"references-count":19,"journal-issue":{"issue":"3","published-print":{"date-parts":[[2018,9]]}},"alternative-id":["150"],"URL":"https:\/\/doi.org\/10.1007\/s13177-017-0150-6","relation":{},"ISSN":["1348-8503","1868-8659"],"issn-type":[{"type":"print","value":"1348-8503"},{"type":"electronic","value":"1868-8659"}],"subject":[],"published":{"date-parts":[[2018,1,10]]},"assertion":[{"value":"31 March 2016","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"30 July 2017","order":2,"name":"revised","label":"Revised","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"21 December 2017","order":3,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"10 January 2018","order":4,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}}]}}