{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,3,30]],"date-time":"2025-03-30T13:28:00Z","timestamp":1743341280004,"version":"3.28.0"},"reference-count":24,"publisher":"IEEE","license":[{"start":{"date-parts":[[2019,9,1]],"date-time":"2019-09-01T00:00:00Z","timestamp":1567296000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2019,9,1]],"date-time":"2019-09-01T00:00:00Z","timestamp":1567296000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2019,9,1]],"date-time":"2019-09-01T00:00:00Z","timestamp":1567296000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2019,9]]},"DOI":"10.1109\/cavs.2019.8887764","type":"proceedings-article","created":{"date-parts":[[2019,11,1]],"date-time":"2019-11-01T00:12:36Z","timestamp":1572567156000},"page":"1-5","source":"Crossref","is-referenced-by-count":6,"title":["Meta-Deep Q-Learning for Eco-Routing"],"prefix":"10.1109","author":[{"given":"Xin","family":"Ma","sequence":"first","affiliation":[]},{"given":"Yuanchang","family":"Xie","sequence":"additional","affiliation":[]},{"given":"Chunxiao","family":"Chigan","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref10","article-title":"Analyzing Knowledge Transfer in Deep Q-Networks for Autonomously Handling Multiple Intersections","author":"isele","year":"2017","journal-title":"arXiv preprint arXiv 1705 01197"},{"key":"ref11","article-title":"Model-agnostic meta-learning for fast adaptation of deep networks","author":"finn","year":"0","journal-title":"Presented in International conference on machine learning (ICML)"},{"key":"ref12","article-title":"Meta-sgd: Learning to learn quickly for few shot learning","author":"li","year":"2017","journal-title":"arXiv preprint arXiv 1707 08040"},{"key":"ref13","article-title":"Multi-objective deep reinforcement learning","author":"mossalam","year":"2016","journal-title":"arXiv preprint arXiv 1610 01292"},{"key":"ref14","doi-asserted-by":"crossref","first-page":"529","DOI":"10.1038\/nature14236","article-title":"Human-level control through deep reinforcement learning","volume":"518","author":"mnih","year":"2015","journal-title":"Nature"},{"key":"ref15","first-page":"9839","article-title":"Reinforcement learning for solving the vehicle routing problem","author":"nazari","year":"2018","journal-title":"Advances in neural information processing systems"},{"key":"ref16","first-page":"5690","article-title":"Imagination-augmented agents for deep reinforcement learning","author":"racani\u00e8re","year":"2017","journal-title":"Advances in neural information processing systems"},{"key":"ref17","doi-asserted-by":"crossref","first-page":"85","DOI":"10.15439\/2018F231","article-title":"Modular Multi-Objective Deep Reinforcement Learning with Decision Values","author":"tajmajer","year":"2018","journal-title":"2018 Federated Conference on Computer Science and Information Systems (FedCSIS)"},{"key":"ref18","article-title":"RL2: Fast Reinforcement Learning via Slow Reinforcement Learning","author":"duan","year":"2016","journal-title":"arXiv preprint arXiv 1611 02779"},{"key":"ref19","article-title":"Learning to reinforcement learn","author":"wang","year":"2016","journal-title":"arXiv preprint arXiv 1611 05763"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/ITSC.2007.4357672"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1016\/j.tranpol.2012.05.010"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1016\/j.trb.2013.06.004"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/TITS.2012.2204051"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1016\/j.trc.2015.06.002"},{"key":"ref7","article-title":"Eco-routing based on a data driven fuel consumption model","author":"huang","year":"2018","journal-title":"arXiv preprint arXiv 1801 08639"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1016\/j.trd.2005.01.001"},{"journal-title":"Energy Use for Transportation - Energy Explained Your Guide To Understanding Energy - Energy Information Administration","year":"2019","key":"ref1"},{"journal-title":"Reinforcement Learning An Introduction","year":"1998","author":"sutton","key":"ref9"},{"key":"ref20","first-page":"1928","article-title":"Asynchronous methods for deep reinforcement learning","author":"mnih","year":"2016","journal-title":"International Conference on Machine Learning"},{"journal-title":"DLR - Institute of Transportation Systems - SUMO &#x2013; Simulation of Urban MObility","year":"2018","author":"pattberg","key":"ref22"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-642-16111-7_23"},{"journal-title":"Welcome to HBEFA","year":"2017","key":"ref24"},{"journal-title":"OpenStreetMap","year":"2019","key":"ref23"}],"event":{"name":"2019 IEEE 2nd Connected and Automated Vehicles Symposium (CAVS)","start":{"date-parts":[[2019,9,22]]},"location":"Honolulu, HI, USA","end":{"date-parts":[[2019,9,23]]}},"container-title":["2019 IEEE 2nd Connected and Automated Vehicles Symposium (CAVS)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/8884213\/8887758\/08887764.pdf?arnumber=8887764","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,7,19]],"date-time":"2022-07-19T20:21:01Z","timestamp":1658262061000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/8887764\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2019,9]]},"references-count":24,"URL":"https:\/\/doi.org\/10.1109\/cavs.2019.8887764","relation":{},"subject":[],"published":{"date-parts":[[2019,9]]}}}