{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,11,12]],"date-time":"2025-11-12T14:27:58Z","timestamp":1762957678540,"version":"3.43.0"},"publisher-location":"Cham","reference-count":33,"publisher":"Springer Nature Switzerland","isbn-type":[{"type":"print","value":"9783031917660"},{"type":"electronic","value":"9783031917677"}],"license":[{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025]]},"DOI":"10.1007\/978-3-031-91767-7_1","type":"book-chapter","created":{"date-parts":[[2025,5,26]],"date-time":"2025-05-26T13:44:12Z","timestamp":1748267052000},"page":"1-14","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":1,"title":["Valeo4Cast: A Modular Approach to\u00a0End-to-End Forecasting"],"prefix":"10.1007","author":[{"given":"Yihong","family":"Xu","sequence":"first","affiliation":[]},{"given":"\u00c9loi","family":"Zablocki","sequence":"additional","affiliation":[]},{"given":"Alexandre","family":"Boulch","sequence":"additional","affiliation":[]},{"given":"Gilles","family":"Puy","sequence":"additional","affiliation":[]},{"given":"Mickael","family":"Chen","sequence":"additional","affiliation":[]},{"given":"Florent","family":"Bartoccioni","sequence":"additional","affiliation":[]},{"given":"Nermin","family":"Samet","sequence":"additional","affiliation":[]},{"given":"Oriane","family":"Sim\u00e9oni","sequence":"additional","affiliation":[]},{"given":"Spyros","family":"Gidaris","sequence":"additional","affiliation":[]},{"given":"Tuan-Hung","family":"Vu","sequence":"additional","affiliation":[]},{"given":"Andrei","family":"Bursuc","sequence":"additional","affiliation":[]},{"given":"Eduardo","family":"Valle","sequence":"additional","affiliation":[]},{"given":"Renaud","family":"Marlet","sequence":"additional","affiliation":[]},{"given":"Matthieu","family":"Cord","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2025,5,12]]},"reference":[{"key":"1_CR1","doi-asserted-by":"crossref","unstructured":"Ben-Younes, H., Zablocki, \u00c9., Chen, M., P\u00e9rez, P., Cord, M.: Raising context awareness in motion forecasting. In: CVPRW (2022)","DOI":"10.1109\/CVPRW56347.2022.00487"},{"key":"1_CR2","doi-asserted-by":"crossref","unstructured":"Bernardin, K., Stiefelhagen, R.: Evaluating multiple object tracking performance: the CLEAR MOT metrics. EURASIP J. Image Video Process. (2008)","DOI":"10.1155\/2008\/246309"},{"key":"1_CR3","doi-asserted-by":"crossref","unstructured":"Caesar, H., et al.: nuscenes: a multimodal dataset for autonomous driving. In: CVPR (2020)","DOI":"10.1109\/CVPR42600.2020.01164"},{"key":"1_CR4","doi-asserted-by":"crossref","unstructured":"Chen, Y., et al.: Focalformer3D: focusing on hard instance for 3D object detection. In: ICCV (2023)","DOI":"10.1109\/ICCV51070.2023.00771"},{"key":"1_CR5","doi-asserted-by":"crossref","unstructured":"Chen, Y., Liu, J., Zhang, X., Qi, X., Jia, J.: Voxelnext: fully sparse voxelnet for 3D object detection and tracking. In: CVPR (2023)","DOI":"10.1109\/CVPR52729.2023.02076"},{"key":"1_CR6","doi-asserted-by":"crossref","unstructured":"Ettinger, S., et al.: Large scale interactive motion forecasting for autonomous driving: the Waymo open motion dataset. In: ICCV (2021)","DOI":"10.1109\/ICCV48922.2021.00957"},{"key":"1_CR7","doi-asserted-by":"crossref","unstructured":"Feng, L., Bahari, M., Amor, K.M.B., Zablocki, \u00c9., Cord, M., Alahi, A.: Unitraj: a unified framework for scalable vehicle trajectory prediction. In: ECCV (2024)","DOI":"10.1007\/978-3-031-73254-6_7"},{"key":"1_CR8","unstructured":"Girgis, R., et al.: Latent variable sequential set transformers for joint multi-agent motion prediction. In: ICLR (2022)"},{"key":"1_CR9","doi-asserted-by":"crossref","unstructured":"Gu, J., et al.: Vip3D: end-to-end visual trajectory prediction via 3d agent queries. In: CVPR (2023)","DOI":"10.1109\/CVPR52729.2023.00532"},{"key":"1_CR10","unstructured":"Hu, H., et al.: EA-LSS: edge-aware lift-splat-shot framework for 3D BEV object detection. arXiv preprint arXiv:2303.17895 (2023)"},{"key":"1_CR11","doi-asserted-by":"crossref","unstructured":"Hu, Y., et al.: Planning-oriented autonomous driving. In: CVPR (2023)","DOI":"10.1109\/CVPR52729.2023.01712"},{"key":"1_CR12","doi-asserted-by":"crossref","unstructured":"Kim, B., et al.: Lapred: lane-aware prediction of multi-modal future trajectories of dynamic agents. In: CVPR (2021)","DOI":"10.1109\/CVPR46437.2021.01440"},{"key":"1_CR13","doi-asserted-by":"crossref","unstructured":"Lee, J., Lee, J., Lee, J., Kwon, S., Jung, H.: Re-VoxelDet: rethinking neck and head architectures for high-performance voxel-based 3D detection. In: WACV (2024)","DOI":"10.1109\/WACV57701.2024.00733"},{"key":"1_CR14","doi-asserted-by":"crossref","unstructured":"Lin, T.Y., et al.: Microsoft COCO: common objects in context. In: ECCV (2014)","DOI":"10.1007\/978-3-319-10602-1_48"},{"key":"1_CR15","doi-asserted-by":"crossref","unstructured":"Liu, Z., et al.: Bevfusion: multi-task multi-sensor fusion with unified bird\u2019s-eye view representation. In: ICRA (2023)","DOI":"10.1109\/ICRA48891.2023.10160968"},{"key":"1_CR16","doi-asserted-by":"crossref","unstructured":"Luiten, J., et al.: HOTA: a higher order metric for evaluating multi-object tracking. IJCV (2021)","DOI":"10.1007\/s11263-020-01375-2"},{"key":"1_CR17","doi-asserted-by":"crossref","unstructured":"Nayakanti, N., Al-Rfou, R., Zhou, A., Goel, K., Refaat, K.S., Sapp, B.: Wayformer: motion forecasting via simple & efficient attention networks. In: ICRA (2023)","DOI":"10.1109\/ICRA48891.2023.10160609"},{"key":"1_CR18","doi-asserted-by":"crossref","unstructured":"Pan, X., Xia, Z., Song, S., Li, L.E., Huang, G.: 3D object detection with pointformer. In: CVPR (2021)","DOI":"10.1109\/CVPR46437.2021.00738"},{"key":"1_CR19","unstructured":"Peri, N., Dave, A., Ramanan, D., Kong, S.: Towards long-tailed 3D detection. In: CoRL (2022)"},{"key":"1_CR20","doi-asserted-by":"crossref","unstructured":"Peri, N., Jonathon\u00a0Luiten, M.L., O\u0161ep, A., Leal-Taix\u00e9, L., Ramanan, D.: Forecasting from lidar via future object detection. In: CVPR (2022)","DOI":"10.1109\/CVPR52688.2022.01669"},{"key":"1_CR21","doi-asserted-by":"crossref","unstructured":"Puy, G., et al.: Three pillars improving vision foundation model distillation for lidar. In: CVPR (2024)","DOI":"10.1109\/CVPR52733.2024.02033"},{"key":"1_CR22","doi-asserted-by":"crossref","unstructured":"Salzmann, T., Ivanovic, B., Chakravarty, P., Pavone, M.: Trajectron++: dynamically-feasible trajectory forecasting with heterogeneous data. In: ECCV (2020)","DOI":"10.1007\/978-3-030-58523-5_40"},{"key":"1_CR23","unstructured":"Shi, S., Jiang, L., Dai, D., Schiele, B.: Motion transformer with global intention localization and local movement refinement. In: NeurIPS (2022)"},{"key":"1_CR24","unstructured":"Wang, Z., et al.: Technical report for argoverse challenges on unified sensor-based detection, tracking, and forecasting. CoRR abs\/2311.15615 (2023)"},{"key":"1_CR25","doi-asserted-by":"crossref","unstructured":"Weng, X., Wang, J., Held, D., Kitani, K.: 3D multi-object tracking: a baseline and new evaluation metrics. In: IEEE International Conference on Intelligent Robots and Systems (2020)","DOI":"10.1109\/IROS45743.2020.9341164"},{"key":"1_CR26","unstructured":"Wilson, B., et al.: Argoverse 2: next generation datasets for self-driving perception and forecasting. In: NeurIPS Track on Datasets and Benchmarks (2021)"},{"key":"1_CR27","unstructured":"Woo, J., Kim, J., Im, S.: Motion forecasting via coordinate transformations and object trajectory modifications. Technical report (2023)"},{"key":"1_CR28","doi-asserted-by":"crossref","unstructured":"Xu, Y., Ban, Y., Delorme, G., Gan, C., Rus, D., Alameda-Pineda, X.: Transcenter: transformers with dense representations for multiple-object tracking. IEEE Trans. Pattern Anal. Mach. Intell. (2023)","DOI":"10.1109\/TPAMI.2022.3225078"},{"key":"1_CR29","doi-asserted-by":"crossref","unstructured":"Xu, Y., et al.: Towards motion forecasting with real-world perception inputs: are end-to-end approaches competitive? In: ICRA (2024)","DOI":"10.1109\/ICRA57147.2024.10610201"},{"key":"1_CR30","doi-asserted-by":"crossref","unstructured":"Yin, T., Zhou, X., Kr\u00e4henb\u00fchl, P.: Center-based 3D object detection and tracking. In: CVPR (2021)","DOI":"10.1109\/CVPR46437.2021.01161"},{"key":"1_CR31","doi-asserted-by":"crossref","unstructured":"Yuan, Y., Weng, X., Ou, Y., Kitani, K.: Agentformer: agent-aware transformers for socio-temporal multi-agent forecasting. In: ICCV (2021)","DOI":"10.1109\/ICCV48922.2021.00967"},{"key":"1_CR32","doi-asserted-by":"crossref","unstructured":"Zhang, T., Chen, X., Wang, Y., Wang, Y., Zhao, H.: Mutr3D: a multi-camera tracking framework via 3D-to-2D queries. In: CVPRW (2022)","DOI":"10.1109\/CVPRW56347.2022.00500"},{"key":"1_CR33","doi-asserted-by":"crossref","unstructured":"Zhang, Y., et al.: Bytetrack: multi-object tracking by associating every detection box. In: ECCV (2022)","DOI":"10.1007\/978-3-031-20047-2_1"}],"container-title":["Lecture Notes in Computer Science","Computer Vision \u2013 ECCV 2024 Workshops"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-91767-7_1","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,8,7]],"date-time":"2025-08-07T08:30:48Z","timestamp":1754555448000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-91767-7_1"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025]]},"ISBN":["9783031917660","9783031917677"],"references-count":33,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-91767-7_1","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"type":"print","value":"0302-9743"},{"type":"electronic","value":"1611-3349"}],"subject":[],"published":{"date-parts":[[2025]]},"assertion":[{"value":"12 May 2025","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ECCV","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"European Conference on Computer Vision","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Milan","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Italy","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2024","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"29 September 2024","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"4 October 2024","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"18","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"eccv2024","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/eccv2024.ecva.net\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}