{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,25]],"date-time":"2026-03-25T23:34:48Z","timestamp":1774481688189,"version":"3.50.1"},"reference-count":39,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"11","license":[{"start":{"date-parts":[[2025,11,1]],"date-time":"2025-11-01T00:00:00Z","timestamp":1761955200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2025,11,1]],"date-time":"2025-11-01T00:00:00Z","timestamp":1761955200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,11,1]],"date-time":"2025-11-01T00:00:00Z","timestamp":1761955200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Intell. Transport. Syst."],"published-print":{"date-parts":[[2025,11]]},"DOI":"10.1109\/tits.2025.3589857","type":"journal-article","created":{"date-parts":[[2025,7,28]],"date-time":"2025-07-28T19:51:20Z","timestamp":1753732280000},"page":"20748-20765","source":"Crossref","is-referenced-by-count":3,"title":["Dynamic Arrival Prioritization With Target Time Management and Deep Reinforcement Learning"],"prefix":"10.1109","volume":"26","author":[{"given":"Leonardo","family":"Caranti","sequence":"first","affiliation":[{"name":"Control and Operations Department, Aerospace Faculty, Delft University of Technology, Delft, Netherlands"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-9326-5902","authenticated-orcid":false,"given":"Marta","family":"Ribeiro","sequence":"additional","affiliation":[{"name":"Control and Operations Department, Aerospace Faculty, Delft University of Technology, Delft, Netherlands"}]},{"given":"Marie","family":"Carr\u00e9","sequence":"additional","affiliation":[{"name":"Operations Research and Air Traffic Management department, Swiss International Airlines Ltd., (SWISS), Kloten, Switzerland"}]}],"member":"263","reference":[{"key":"ref1","article-title":"A dynamic programming approach to the aircraft sequencing problem","author":"Psaraftis","year":"1978"},{"key":"ref2","article-title":"Airline based priority flight sequencing: Of aircraft arriving at an airport","author":"Vervaat","year":"2020"},{"key":"ref3","article-title":"Dynamic airline centric inbound priority sequencing: A case study on Westerly morning arrivals for KLM at Schiphol","author":"Hoogendoorn","year":"2022"},{"key":"ref4","volume-title":"ATFCM Users Manual","year":"2023"},{"key":"ref5","volume-title":"Network Operations Report 2019-Main Report","year":"2020"},{"key":"ref6","article-title":"Optimization of regulated airline arrival flows via target time management","volume-title":"Proc. SESAR Innov. Days (SID)","author":"Caranti"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.15632\/jtam-pl\/194991"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-12296-5_6"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.3390\/aerospace11080604"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.3390\/aerospace10010062"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1016\/j.trc.2017.05.011"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1016\/j.tre.2023.103295"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1016\/j.trc.2023.104188"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1017\/aer.2022.17"},{"key":"ref15","first-page":"1","article-title":"Coupling mathematical optimization and machine learning for the aircraft landing problem","volume-title":"Proc. 9th Int. Conf. Res. Air Transp.","author":"Ikli"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1016\/j.tre.2017.08.006"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1007\/s10951-014-0415-8"},{"key":"ref18","volume-title":"Innovative Slot Allocation: An Overview","year":"2014"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1016\/j.jairtraman.2021.102124"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-17834-4_10"},{"key":"ref21","article-title":"Flight prioritization and turnaround recovery","volume-title":"Proc. 14th ATM Res. Develop. Seminar","author":"Evler"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1177\/03611981231182714"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.3390\/aerospace12020119"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/CAI59869.2024.00213"},{"key":"ref25","volume-title":"Arrival Planning Information (API) Implementation Guide","year":"2022"},{"key":"ref26","article-title":"The aircraft sequencing problem with arrivals and departures","author":"Muharremoglu","year":"2000"},{"key":"ref27","volume-title":"Reinforcement Learning: An Introduction","author":"Sutton","year":"2018"},{"key":"ref28","article-title":"A survey on reinforcement learning in aviation applications","author":"Razzaghi","year":"2022","journal-title":"arXiv:2211.02147"},{"key":"ref29","article-title":"Proximal policy optimization algorithms","author":"Schulman","year":"2017","journal-title":"arXiv:1707.06347"},{"key":"ref30","first-page":"1861","article-title":"Soft actor-critic: Off-policy maximum entropy deep reinforcement learning with a stochastic actor","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Haarnoja"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.48550\/arXiv.2005.12729"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.3390\/drones7040245"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1109\/9.580874"},{"key":"ref34","first-page":"560","article-title":"Error bounds for approximate policy iteration","volume-title":"Proc. 19th Int. Conf. Mach. Learn.","volume":"3","author":"Munos"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v33i01.33013647"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.48550\/arXiv.1812.05905"},{"key":"ref37","first-page":"19440","article-title":"Parallel Q-learning: Scaling off-policy reinforcement learning under massively parallel simulation","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Li"},{"key":"ref38","article-title":"Boosting soft actor-critic: Emphasizing recent experience without forgetting the past","author":"Wang","year":"2019","journal-title":"arXiv:1906.04009"},{"key":"ref39","article-title":"Adam: A method for stochastic optimization","author":"Kingma","year":"2017","journal-title":"arXiv:1412.6980"}],"container-title":["IEEE Transactions on Intelligent Transportation Systems"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/6979\/11241045\/11098665.pdf?arnumber=11098665","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,11,12]],"date-time":"2025-11-12T18:44:00Z","timestamp":1762973040000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11098665\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,11]]},"references-count":39,"journal-issue":{"issue":"11"},"URL":"https:\/\/doi.org\/10.1109\/tits.2025.3589857","relation":{},"ISSN":["1524-9050","1558-0016"],"issn-type":[{"value":"1524-9050","type":"print"},{"value":"1558-0016","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025,11]]}}}