{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,26]],"date-time":"2026-01-26T04:15:38Z","timestamp":1769400938492,"version":"3.49.0"},"publisher-location":"Cham","reference-count":92,"publisher":"Springer Nature Switzerland","isbn-type":[{"value":"9783031726422","type":"print"},{"value":"9783031726439","type":"electronic"}],"license":[{"start":{"date-parts":[[2024,11,22]],"date-time":"2024-11-22T00:00:00Z","timestamp":1732233600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,11,22]],"date-time":"2024-11-22T00:00:00Z","timestamp":1732233600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025]]},"DOI":"10.1007\/978-3-031-72643-9_12","type":"book-chapter","created":{"date-parts":[[2024,11,21]],"date-time":"2024-11-21T20:48:40Z","timestamp":1732222120000},"page":"195-213","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":2,"title":["Neural Volumetric World Models for\u00a0Autonomous Driving"],"prefix":"10.1007","author":[{"given":"Zanming","family":"Huang","sequence":"first","affiliation":[]},{"given":"Jimuyang","family":"Zhang","sequence":"additional","affiliation":[]},{"given":"Eshed","family":"Ohn-Bar","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,11,22]]},"reference":[{"key":"12_CR1","unstructured":"Carla autonomous driving leaderboard. https:\/\/leaderboard.carla.org\/ (2022)"},{"key":"12_CR2","doi-asserted-by":"crossref","unstructured":"Agrawal, P., Carreira, J., Malik, J.: Learning to see by moving. In: ICCV (2015)","DOI":"10.1109\/ICCV.2015.13"},{"key":"12_CR3","unstructured":"Amir, S., Gandelsman, Y., Bagon, S., Dekel, T.: Deep ViT features as dense visual descriptors. In: ECCVW (2022)"},{"key":"12_CR4","unstructured":"Anderson, P., et\u00a0al.: On evaluation of embodied navigation agents. arXiv preprint arXiv:1807.06757 (2018)"},{"key":"12_CR5","unstructured":"Bain, M., Sammut, C.: A framework for behavioural cloning. In: Machine Intelligence (1996)"},{"key":"12_CR6","doi-asserted-by":"crossref","unstructured":"Bansal, M., Krizhevsky, A., Ogale, A.: ChauffeurNet: learning to drive by imitating the best and synthesizing the worst. In: RSS (2019)","DOI":"10.15607\/RSS.2019.XV.031"},{"key":"12_CR7","doi-asserted-by":"crossref","unstructured":"Behl, A., Chitta, K., Prakash, A., Ohn-Bar, E., Geiger, A.: Label-efficient visual abstractions for autonomous driving. In: IROS (2020)","DOI":"10.1109\/IROS45743.2020.9340641"},{"key":"12_CR8","unstructured":"Bojarski, M., et\u00a0al.: End to end learning for self-driving cars. arXiv preprint arXiv:1604.07316 (2016)"},{"key":"12_CR9","doi-asserted-by":"crossref","unstructured":"Byravan, A., Fox, D.: SE3-Nets: learning rigid body motion using deep neural networks. In: ICRA (2017)","DOI":"10.1109\/ICRA.2017.7989023"},{"key":"12_CR10","doi-asserted-by":"crossref","unstructured":"Caesar, H., et al.: nuscenes: a multimodal dataset for autonomous driving. In: CVPR (2020)","DOI":"10.1109\/CVPR42600.2020.01164"},{"key":"12_CR11","unstructured":"Caine, B., et al.: Pseudo-labeling for scalable 3D object detection. arXiv preprint arXiv:2103.02093 (2021)"},{"key":"12_CR12","doi-asserted-by":"crossref","unstructured":"Cao, A.Q., de\u00a0Charette, R.: MonoScene: monocular 3D semantic scene completion. In: CVPR (2022)","DOI":"10.1109\/CVPR52688.2022.00396"},{"key":"12_CR13","doi-asserted-by":"crossref","unstructured":"Caron, M., Touvron, H., Misra, I., J\u00e9gou, H., Mairal, J., Bojanowski, P., Joulin, A.: Emerging properties in self-supervised vision transformers. In: ICCV (2021)","DOI":"10.1109\/ICCV48922.2021.00951"},{"key":"12_CR14","doi-asserted-by":"crossref","unstructured":"Chang, M.F., et\u00a0al.: Argoverse: 3D tracking and forecasting with rich maps. In: CVPR (2019)","DOI":"10.1109\/CVPR.2019.00895"},{"key":"12_CR15","unstructured":"Chekroun, R., Toromanoff, M., Hornauer, S., Moutarde, F.: GRI: general reinforced imitation and its application to vision-based autonomous driving. arXiv preprint arXiv:2111.08575 (2021)"},{"key":"12_CR16","doi-asserted-by":"crossref","unstructured":"Chen, D., Koltun, V., Kr\u00e4henb\u00fchl, P.: Learning to drive from a world on rails. In: ICCV (2021)","DOI":"10.1109\/ICCV48922.2021.01530"},{"key":"12_CR17","doi-asserted-by":"crossref","unstructured":"Chen, D., Kr\u00e4henb\u00fchl, P.: Learning from all vehicles. In: CVPR (2022)","DOI":"10.1109\/CVPR52688.2022.01671"},{"key":"12_CR18","unstructured":"Chen, D., Zhou, B., Koltun, V., Kr\u00e4henb\u00fchl, P.: Learning by cheating. In: CoRL (2020)"},{"key":"12_CR19","doi-asserted-by":"crossref","unstructured":"Chen, L., et al.: PersFormer: 3D lane detection via perspective transformer and the OpenLane benchmark. In: ECCV (2022)","DOI":"10.1007\/978-3-031-19839-7_32"},{"key":"12_CR20","unstructured":"Chen, T., Kornblith, S., Norouzi, M., Hinton, G.: A simple framework for contrastive learning of visual representations. In: ICML (2020)"},{"key":"12_CR21","unstructured":"Chen, X., Fan, H., Girshick, R., He, K.: Improved baselines with momentum contrastive learning. arXiv preprint arXiv:2003.04297 (2020)"},{"key":"12_CR22","doi-asserted-by":"crossref","unstructured":"Cheng, R., Agia, C., Ren, Y., Li, X., Bingbing, L.: S3CNet: a sparse semantic scene completion network for lidar point clouds. In: CoRL (2021)","DOI":"10.1109\/ICRA48506.2021.9561305"},{"key":"12_CR23","doi-asserted-by":"crossref","unstructured":"Chitta, K., Prakash, A., Jaeger, B., Yu, Z., Renz, K., Geiger, A.: TransFuser: imitation with transformer-based sensor fusion for autonomous driving. PAMI (2022)","DOI":"10.1109\/TPAMI.2022.3200245"},{"key":"12_CR24","doi-asserted-by":"crossref","unstructured":"Codevilla, F., Miiller, M., L\u00f3pez, A., Koltun, V., Dosovitskiy, A.: End-to-end driving via conditional imitation learning. In: ICRA (2018)","DOI":"10.1109\/ICRA.2018.8460487"},{"key":"12_CR25","doi-asserted-by":"crossref","unstructured":"Codevilla, F., Santana, E., L\u00f3pez, A.M., Gaidon, A.: Exploring the limitations of behavior cloning for autonomous driving. In: ICCV (2019)","DOI":"10.1109\/ICCV.2019.00942"},{"key":"12_CR26","doi-asserted-by":"crossref","unstructured":"Deng, J., Dong, W., Socher, R., Li, L.J., Li, K., Fei-Fei, L.: ImageNet: a large-scale hierarchical image database. In: CVPR (2009)","DOI":"10.1109\/CVPR.2009.5206848"},{"key":"12_CR27","unstructured":"Devlin, J., Chang, M.W., Lee, K., Toutanova, K.: BERT: pre-training of deep bidirectional transformers for language understanding. In: NAACL-HLT (2019)"},{"key":"12_CR28","unstructured":"Dosovitskiy, A., Ros, G., Codevilla, F., Lopez, A., Koltun, V.: CARLA: an open urban driving simulator. In: CoRL (2017)"},{"key":"12_CR29","doi-asserted-by":"publisher","first-page":"84","DOI":"10.1016\/j.cobeha.2017.06.005","volume":"17","author":"AD Ekstrom","year":"2017","unstructured":"Ekstrom, A.D., Isham, E.A.: Human spatial navigation: representations across dimensions and scales. Curr. Opin. Behav. Sci. 17, 84\u201389 (2017)","journal-title":"Curr. Opin. Behav. Sci."},{"key":"12_CR30","doi-asserted-by":"crossref","unstructured":"Finkelstein, A., Las, L., Ulanovsky, N.: 3D maps and compasses in the brain. Ann. Rev. Neuroscience (2016)","DOI":"10.1146\/annurev-neuro-070815-013831"},{"key":"12_CR31","unstructured":"Fu, Y., Misra, I., Wang, X.: MonoNeRF: learning generalizable nerfs from monocular videos without camera poses. In: ICML (2022)"},{"key":"12_CR32","doi-asserted-by":"crossref","unstructured":"Gkioxari, G., Ravi, N., Johnson, J.: Learning 3D object shape and layout without 3D supervision. In: CVPR (2022)","DOI":"10.1109\/CVPR52688.2022.00174"},{"key":"12_CR33","doi-asserted-by":"crossref","unstructured":"Gupta, S., Davidson, J., Levine, S., Sukthankar, R., Malik, J.: Cognitive mapping and planning for visual navigation. In: CVPR (2017)","DOI":"10.1109\/CVPR.2017.769"},{"key":"12_CR34","unstructured":"Ha, D., Schmidhuber, J.: Recurrent world models facilitate policy evolution. In: NeurIPS (2018)"},{"key":"12_CR35","doi-asserted-by":"crossref","unstructured":"He, K., Chen, X., Xie, S., Li, Y., Doll\u00e1r, P., Girshick, R.: Masked autoencoders are scalable vision learners. In: CVPR (2022)","DOI":"10.1109\/CVPR52688.2022.01553"},{"key":"12_CR36","doi-asserted-by":"crossref","unstructured":"He, K., Fan, H., Wu, Y., Xie, S., Girshick, R.: Momentum contrast for unsupervised visual representation learning. arXiv preprint arXiv:1911.05722 (2019)","DOI":"10.1109\/CVPR42600.2020.00975"},{"key":"12_CR37","doi-asserted-by":"crossref","unstructured":"He, K., Fan, H., Wu, Y., Xie, S., Girshick, R.: Momentum contrast for unsupervised visual representation learning. In: CVPR (2020)","DOI":"10.1109\/CVPR42600.2020.00975"},{"key":"12_CR38","doi-asserted-by":"crossref","unstructured":"He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In: CVPR (2016)","DOI":"10.1109\/CVPR.2016.90"},{"key":"12_CR39","doi-asserted-by":"crossref","unstructured":"Herweg, N.A., Kahana, M.J.: Spatial representations in the human brain. Front. Hum. Neuroscience (2018)","DOI":"10.3389\/fnhum.2018.00297"},{"key":"12_CR40","doi-asserted-by":"crossref","unstructured":"Hornung, A., Wurm, K.M., Bennewitz, M., Stachniss, C., Burgard, W.: OctoMap: an efficient probabilistic 3D mapping framework based on octrees. Auton. Robots (2013)","DOI":"10.1007\/s10514-012-9321-0"},{"key":"12_CR41","doi-asserted-by":"crossref","unstructured":"Hu, P., Huang, A., Dolan, J., Held, D., Ramanan, D.: Safe local motion planning with self-supervised freespace forecasting. In: CVPR (2021)","DOI":"10.1109\/CVPR46437.2021.01254"},{"key":"12_CR42","doi-asserted-by":"crossref","unstructured":"Hu, S., Chen, L., Wu, P., Li, H., Yan, J., Tao, D.: ST-P3: end-to-end vision-based autonomous driving via spatial-temporal feature learning. In: ECCV (2022)","DOI":"10.1007\/978-3-031-19839-7_31"},{"key":"12_CR43","doi-asserted-by":"crossref","unstructured":"Hu, Y., et\u00a0al.: Planning-oriented autonomous driving. In: CVPR (2023)","DOI":"10.1109\/CVPR52729.2023.01712"},{"key":"12_CR44","doi-asserted-by":"crossref","unstructured":"Jaeger, B., Chitta, K., Geiger, A.: Hidden biases of end-to-end driving models. arXiv preprint arXiv:2306.07957 (2023)","DOI":"10.1109\/ICCV51070.2023.00757"},{"key":"12_CR45","doi-asserted-by":"crossref","unstructured":"Jeffery, K.J., Jovalekic, A., Verriotis, M., Hayman, R.: Navigating in a three-dimensional world. Behav. Brain Sci. (2013)","DOI":"10.1017\/S0140525X12002476"},{"key":"12_CR46","doi-asserted-by":"crossref","unstructured":"Jiang, B., et al.: VAD: Vectorized scene representation for efficient autonomous driving. In: ICCV (2023)","DOI":"10.1109\/ICCV51070.2023.00766"},{"key":"12_CR47","doi-asserted-by":"crossref","unstructured":"Johnson, J., Alahi, A., Fei-Fei, L.: Perceptual losses for real-time style transfer and super-resolution. In: ECCV (2016)","DOI":"10.1007\/978-3-319-46475-6_43"},{"key":"12_CR48","doi-asserted-by":"crossref","unstructured":"Kendall, A., et al.: Learning to drive in a day. In: ICRA (2019)","DOI":"10.1109\/ICRA.2019.8793742"},{"key":"12_CR49","doi-asserted-by":"crossref","unstructured":"Khurana, T., Hu, P., Dave, A., Ziglar, J., Held, D., Ramanan, D.: Differentiable Raycasting for self-supervised occupancy forecasting. In: ECCV (2022)","DOI":"10.1007\/978-3-031-19839-7_21"},{"key":"12_CR50","doi-asserted-by":"crossref","unstructured":"Lai, L., Ohn-Bar, E., Arora, S., Yi, J.S.K.: Uncertainty-guided never-ending learning to drive. In: CVPR (2024)","DOI":"10.1109\/CVPR52733.2024.01429"},{"key":"12_CR51","doi-asserted-by":"crossref","unstructured":"Lai, L., Shangguan, Z., Zhang, J., Ohn-Bar, E.: XVO: generalized visual odometry via cross-modal self-training. In: ICCV (2023)","DOI":"10.1109\/ICCV51070.2023.00926"},{"key":"12_CR52","doi-asserted-by":"crossref","unstructured":"Lai, Z., Liu, S., Efros, A.A., Wang, X.: Video autoencoder: self-supervised disentanglement of 3D structure and motion. In: ICCV (2021)","DOI":"10.1109\/ICCV48922.2021.00959"},{"key":"12_CR53","unstructured":"LeCun, Y.: A path towards autonomous machine intelligence version 0.9. 2, 2022-06-27. Open Rev. (2022)"},{"key":"12_CR54","doi-asserted-by":"crossref","unstructured":"Li, Y., et al.: VoxFormer: sparse voxel transformer for camera-based 3D semantic scene completion. In: CVPR (2023)","DOI":"10.1109\/CVPR52729.2023.00877"},{"key":"12_CR55","doi-asserted-by":"crossref","unstructured":"Li, Z., et al.: BEVFormer: learning bird\u2019s-eye-view representation from multi-camera images via spatiotemporal transformers. In: ECCV (2022)","DOI":"10.1007\/978-3-031-20077-9_1"},{"key":"12_CR56","doi-asserted-by":"crossref","unstructured":"Liao, Y., Xie, J., Geiger, A.: KITTI-360: a novel dataset and benchmarks for urban scene understanding in 2D and 3D. PAMI (2022)","DOI":"10.1109\/TPAMI.2022.3179507"},{"key":"12_CR57","doi-asserted-by":"crossref","unstructured":"Luo, C., Yang, X., Yuille, A.: Self-supervised pillar motion learning for autonomous driving. In: CVPR (2021)","DOI":"10.1109\/CVPR46437.2021.00320"},{"key":"12_CR58","doi-asserted-by":"crossref","unstructured":"Mahajan, D., et al.: Exploring the limits of weakly supervised pretraining. In: ECCV (2018)","DOI":"10.1007\/978-3-030-01216-8_12"},{"key":"12_CR59","unstructured":"Mao, J., Qian, Y., Zhao, H., Wang, Y.: GPT-Driver: learning to drive with GPT. arXiv preprint arXiv:2310.01415 (2023)"},{"key":"12_CR60","doi-asserted-by":"crossref","unstructured":"Menze, M., Geiger, A.: Object scene flow for autonomous vehicles. In: CVPR (2015)","DOI":"10.1109\/CVPR.2015.7298925"},{"key":"12_CR61","doi-asserted-by":"crossref","unstructured":"Mescheder, L., Oechsle, M., Niemeyer, M., Nowozin, S., Geiger, A.: Occupancy networks: learning 3D reconstruction in function space. In: CVPR (2019)","DOI":"10.1109\/CVPR.2019.00459"},{"key":"12_CR62","doi-asserted-by":"crossref","unstructured":"Mildenhall, B., Srinivasan, P.P., Tancik, M., Barron, J.T., Ramamoorthi, R., Ng, R.: NeRF: representing scenes as neural radiance fields for view synthesis. In: ECCV (2020)","DOI":"10.1007\/978-3-030-58452-8_24"},{"key":"12_CR63","unstructured":"M\u00fcller, M., Dosovitskiy, A., Ghanem, B., Koltun, V.: Driving policy transfer via modularity and abstraction. arXiv preprint arXiv:1804.09364 (2018)"},{"key":"12_CR64","doi-asserted-by":"crossref","unstructured":"Ohn-Bar, E., Prakash, A., Behl, A., Chitta, K., Geiger, A.: Learning situational driving. In: CVPR (2020)","DOI":"10.1109\/CVPR42600.2020.01131"},{"key":"12_CR65","unstructured":"Oquab, M., et\u00a0al.: DINOv2: learning robust visual features without supervision. arXiv preprint arXiv:2304.07193 (2023)"},{"key":"12_CR66","doi-asserted-by":"crossref","unstructured":"Pathak, D., et al.: Learning instance segmentation by interaction. In: CVPRW (2018)","DOI":"10.1109\/CVPRW.2018.00276"},{"key":"12_CR67","unstructured":"Pomerleau, D.A.: ALVINN: an autonomous land vehicle in a neural network. In: NeurIPS (1989)"},{"key":"12_CR68","unstructured":"Qi, W., Mullapudi, R.T., Gupta, S., Ramanan, D.: Learning to move with affordance maps. arXiv preprint arXiv:2001.02364 (2020)"},{"key":"12_CR69","doi-asserted-by":"crossref","unstructured":"Riegler, G., Osman\u00a0Ulusoy, A., Geiger, A.: OctNet: learning deep 3D representations at high resolutions. In: CVPR (2017)","DOI":"10.1109\/CVPR.2017.701"},{"key":"12_CR70","doi-asserted-by":"crossref","unstructured":"Spelke, E.S., Lee, S.A.: Core systems of geometry in animal minds. Philos. Trans. Royal Soc. B, Biol. Sci. (2012)","DOI":"10.1098\/rstb.2012.0210"},{"key":"12_CR71","doi-asserted-by":"crossref","unstructured":"Sun, C., Shrivastava, A., Singh, S., Gupta, A.: Revisiting unreasonable effectiveness of data in deep learning era. In: ICCV (2017)","DOI":"10.1109\/ICCV.2017.97"},{"key":"12_CR72","unstructured":"Tian, X., et al.: Occ3D: a large-scale 3D occupancy prediction benchmark for autonomous driving. In: NeurIPS (2024)"},{"issue":"4","key":"12_CR73","doi-asserted-by":"publisher","first-page":"189","DOI":"10.1037\/h0061626","volume":"55","author":"EC Tolman","year":"1948","unstructured":"Tolman, E.C.: Cognitive maps in rats and men. Psychol. Rev. 55(4), 189 (1948)","journal-title":"Psychol. Rev."},{"key":"12_CR74","doi-asserted-by":"crossref","unstructured":"Toromanoff, M., Wirbel, E., Moutarde, F.: End-to-end model-free reinforcement learning for urban driving using implicit affordances. In: CVPR (2020)","DOI":"10.1109\/CVPR42600.2020.00718"},{"key":"12_CR75","doi-asserted-by":"crossref","unstructured":"Wang, D., Devin, C., Cai, Q.Z., Kr\u00e4henb\u00fchl, P., Darrell, T.: Monocular plan view networks for autonomous driving. In: IROS (2019)","DOI":"10.1109\/IROS40897.2019.8967897"},{"key":"12_CR76","doi-asserted-by":"crossref","unstructured":"Wei, C., Fan, H., Xie, S., Wu, C.Y., Yuille, A., Feichtenhofer, C.: Masked feature prediction for self-supervised visual pre-training. In: CVPR (2022)","DOI":"10.1109\/CVPR52688.2022.01426"},{"key":"12_CR77","doi-asserted-by":"crossref","unstructured":"Weng, X., Ivanovic, B., Wang, Y., Wang, Y., Pavone, M.: PARA-Drive: parallelized architecture for real-time autonomous driving. In: CVPR (2024)","DOI":"10.1109\/CVPR52733.2024.01463"},{"key":"12_CR78","unstructured":"Wu, P., Chen, L., Li, H., Jia, X., Yan, J., Qiao, Y.: Policy pre-training for end-to-end autonomous driving via self-supervised geometric modeling. In: ICLR (2023)"},{"key":"12_CR79","unstructured":"Wu, P., Jia, X., Chen, L., Yan, J., Li, H., Qiao, Y.: Trajectory-guided control prediction for end-to-end autonomous driving: A simple yet strong baseline. arXiv preprint arXiv:2206.08129 (2022)"},{"key":"12_CR80","unstructured":"Wu, S., Jakab, T., Rupprecht, C., Vedaldi, A.: DOVE: learning deformable 3D objects by watching videos. arXiv preprint arXiv:2107.10844 (2021)"},{"key":"12_CR81","unstructured":"Yalniz, I.Z., J\u00e9gou, H., Chen, K., Paluri, M., Mahajan, D.: Billion-scale semi-supervised learning for image classification. arXiv preprint arXiv:1905.00546 (2019)"},{"key":"12_CR82","doi-asserted-by":"crossref","unstructured":"Yang, C., et\u00a0al.: BEVFormer v2: adapting modern image backbones to bird\u2019s-eye-view recognition via perspective supervision. In: CVPR (2023)","DOI":"10.1109\/CVPR52729.2023.01710"},{"key":"12_CR83","doi-asserted-by":"crossref","unstructured":"Yang, Z., Chen, L., Sun, Y., Li, H.: Visual point cloud forecasting enables scalable autonomous driving. In: CVPR (2024)","DOI":"10.1109\/CVPR52733.2024.01390"},{"key":"12_CR84","doi-asserted-by":"crossref","unstructured":"Zeng, W., et al.: End-to-end interpretable neural motion planner. In: CVPR (2019)","DOI":"10.1109\/CVPR.2019.00886"},{"key":"12_CR85","doi-asserted-by":"crossref","unstructured":"Zhang, J., Huang, Z., Ohn-Bar, E.: Coaching a teachable student. In: CVPR (2023)","DOI":"10.1109\/CVPR52729.2023.00754"},{"key":"12_CR86","doi-asserted-by":"crossref","unstructured":"Zhang, J., Huang, Z., Ray, A., Ohn-Bar, E.: Feedback-guided autonomous driving. In: CVPR (2024)","DOI":"10.1109\/CVPR52733.2024.01421"},{"key":"12_CR87","doi-asserted-by":"crossref","unstructured":"Zhang, J., Ohn-Bar, E.: Learning by watching. In: CVPR (2021)","DOI":"10.1109\/CVPR46437.2021.01252"},{"key":"12_CR88","doi-asserted-by":"crossref","unstructured":"Zhang, J., Zhu, R., Ohn-Bar, E.: SelfD: self-learning large-scale driving policies from the web. In: CVPR (2022)","DOI":"10.1109\/CVPR52688.2022.01680"},{"key":"12_CR89","doi-asserted-by":"crossref","unstructured":"Zhang, Z., Liniger, A., Dai, D., Yu, F., Van\u00a0Gool, L.: End-to-end urban driving by imitating a reinforcement learning coach. In: ICCV (2021)","DOI":"10.1109\/ICCV48922.2021.01494"},{"key":"12_CR90","doi-asserted-by":"crossref","unstructured":"Zhou, B., Kr\u00e4henb\u00fchl, P.: Cross-view transformers for real-time map-view semantic segmentation. In: CVPR (2022)","DOI":"10.1109\/CVPR52688.2022.01339"},{"key":"12_CR91","unstructured":"Zhou, Y., et al.: End-to-end multi-view fusion for 3D object detection in lidar point clouds. In: CoRL (2020)"},{"key":"12_CR92","unstructured":"Zhu, X., Su, W., Lu, L., Li, B., Wang, X., Dai, J.: Deformable DETR: deformable transformers for end-to-end object detection. In: ICLR (2021)"}],"container-title":["Lecture Notes in Computer Science","Computer Vision \u2013 ECCV 2024"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-72643-9_12","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,11,21]],"date-time":"2024-11-21T21:26:06Z","timestamp":1732224366000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-72643-9_12"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,11,22]]},"ISBN":["9783031726422","9783031726439"],"references-count":92,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-72643-9_12","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,11,22]]},"assertion":[{"value":"22 November 2024","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ECCV","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"European Conference on Computer Vision","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Milan","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Italy","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2024","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"29 September 2024","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"4 October 2024","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"18","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"eccv2024","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/eccv2024.ecva.net\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}