{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,26]],"date-time":"2026-03-26T15:57:50Z","timestamp":1774540670977,"version":"3.50.1"},"reference-count":36,"publisher":"Springer Science and Business Media LLC","issue":"1","license":[{"start":{"date-parts":[[2024,12,3]],"date-time":"2024-12-03T00:00:00Z","timestamp":1733184000000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,12,3]],"date-time":"2024-12-03T00:00:00Z","timestamp":1733184000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["SIViP"],"published-print":{"date-parts":[[2025,1]]},"DOI":"10.1007\/s11760-024-03595-2","type":"journal-article","created":{"date-parts":[[2024,12,3]],"date-time":"2024-12-03T18:51:40Z","timestamp":1733251900000},"update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":3,"title":["LRCFormer: lightweight transformer based radar-camera fusion for 3D target detection"],"prefix":"10.1007","volume":"19","author":[{"given":"Xiaohong","family":"Huang","sequence":"first","affiliation":[]},{"given":"Kunqiang","family":"Xu","sequence":"additional","affiliation":[]},{"given":"Ziran","family":"Tian","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,12,3]]},"reference":[{"key":"3595_CR1","unstructured":"Barbosa, F.M., Os\u00f3rio, F.S.: Camera-Radar Perception for Autonomous Vehicles and ADAS: Concepts, Datasets and Metrics. Preprint at https:\/\/arxiv.org\/abs\/2303.04302 (2023)"},{"issue":"11","key":"3595_CR2","doi-asserted-by":"publisher","first-page":"4208","DOI":"10.3390\/s22114208","volume":"22","author":"Y Zhou","year":"2022","unstructured":"Zhou, Y., Liu, L., et al.: Towards deep radar perception for autonomous driving: datasets, methods, and challenges. Sensors 22(11), 4208 (2022)","journal-title":"Sensors"},{"issue":"7","key":"3595_CR3","doi-asserted-by":"publisher","first-page":"6640","DOI":"10.1109\/TITS.2021.3059674","volume":"23","author":"Z Liu","year":"2021","unstructured":"Liu, Z., Cai, Y., et al.: Robust target recognition and tracking of self-driving cars with radar and camera information fusion under severe weather conditions. IEEE Transact. Intell. Transp. Syst. 23(7), 6640\u20136653 (2021)","journal-title":"IEEE Transact. Intell. Transp. Syst."},{"key":"3595_CR4","doi-asserted-by":"crossref","unstructured":"Yao, S., Guan, R., et al.: Radar-camera fusion for object detection and semantic segmentation in autonomous driving: a comprehensive review. IEEE Transact. Intell. Veh. (2023)","DOI":"10.1109\/TIV.2023.3307157"},{"key":"3595_CR5","first-page":"10421","volume":"35","author":"T Liang","year":"2022","unstructured":"Liang, T., Xie, H., et al.: Bevfusion: a simple and robust lidar-camera fusion framework. Adv. Neural Info. Process. Syst. 35, 10421\u201310434 (2022)","journal-title":"Adv. Neural Info. Process. Syst."},{"key":"3595_CR6","doi-asserted-by":"crossref","unstructured":"Liu, Z., Tang, H., et al.: Bevfusion: Multi-task multi-sensor fusion with unified bird\u2019s-eye view representation. In: Paper Presented at the 2023 IEEE International Conference on Robotics and Automation (ICRA) (2023)","DOI":"10.1109\/ICRA48891.2023.10160968"},{"key":"3595_CR7","doi-asserted-by":"crossref","unstructured":"St\u00e4cker, L., Mishra, S., et al.: RC-BEVFusion: A plug-in module for radar-camera bird\u2019s eye view feature fusion. In: Paper Presented at the DAGM German Conference on Pattern Recognition (2023)","DOI":"10.1007\/978-3-031-54605-1_12"},{"key":"3595_CR8","doi-asserted-by":"crossref","unstructured":"Li, Z., Wang, W., et al.: Bevformer: Learning bird\u2019s-eye-view representation from multi-camera images via spatiotemporal transformers. In: Paper Presented at the European Conference on Computer Vision (2022)","DOI":"10.1007\/978-3-031-20077-9_1"},{"key":"3595_CR9","doi-asserted-by":"crossref","unstructured":"Cui, C., et al.: Radar Enlightens the Dark: Enhancing Low-Visibility Perception for Automated Vehicles with Camera-Radar Fusion. In: Paper Presented at the 2023 IEEE 26th International Conference on Intelligent Transportation Systems (ITSC) (2023)","DOI":"10.1109\/ITSC57777.2023.10422147"},{"key":"3595_CR10","unstructured":"Vaswani, A., Shazeer, N., Parmar, N., et al.: Attention is all you need. Adv. Neural Inform. Process. Syst. 30 (2017)"},{"key":"3595_CR11","doi-asserted-by":"publisher","DOI":"10.1016\/j.compeleceng.2023.108698","volume":"108","author":"Q Zhou","year":"2023","unstructured":"Zhou, Q., Sun, Z., et al.: Mixture lightweight transformer for scene understanding. Comput. Electr. Eng. 108, 108698 (2023)","journal-title":"Comput. Electr. Eng."},{"key":"3595_CR12","doi-asserted-by":"crossref","unstructured":"Zhou, Y., Tuzel, O.: Voxelnet: End-to-end learning for point cloud based 3d object detection. In: Paper Presented at the IEEE Conference on Computer Vision and Pattern Recognition (2018)","DOI":"10.1109\/CVPR.2018.00472"},{"key":"3595_CR13","unstructured":"Qi, C.R., Su, H., Mo, K., Guibas, L.J.: Pointnet: Deep learning on point sets for 3d classification and segmentation. In: Paper Presented at the IEEE Conference on Computer Vision and Pattern Recognition (2017)"},{"key":"3595_CR14","unstructured":"Qi, C.R., Yi, L., Su, H., Guibas, L.J.: Pointnet++: Deep hierarchical feature learning on point sets in a metric space. Adv. Neural Info. Process. Syst. 30 (2017)"},{"key":"3595_CR15","doi-asserted-by":"crossref","unstructured":"Lang, A.H., Vora, S., et al.: Pointpillars: Fast encoders for object detection from point clouds. In: Paper Presented at the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (2019)","DOI":"10.1109\/CVPR.2019.01298"},{"key":"3595_CR16","doi-asserted-by":"crossref","unstructured":"Yin, T., Zhou, X., Krahenbuhl, P.: Center-based 3d object detection and tracking. In: Paper Presented at the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (2021)","DOI":"10.1109\/CVPR46437.2021.01161"},{"key":"3595_CR17","unstructured":"Wang, Y., Guizilini, V.C., et al.: Detr3d: 3d object detection from multi-view images via 3d-to-2d queries. In: Paper Presented at the Conference on Robot Learning (2022)"},{"key":"3595_CR18","doi-asserted-by":"crossref","unstructured":"Carion, N., Massa, F., et al.: End-to-end object detection with transformers. In: Paper Presented at the European conference on computer vision (2020)","DOI":"10.1007\/978-3-030-58452-8_13"},{"key":"3595_CR19","doi-asserted-by":"crossref","unstructured":"Liu, Y., Wang, T., Zhang, X., Sun, J.: Petr: Position embedding transformation for multi-view 3d object detection. In: Paper Presented at the European conference on computer vision (2022)","DOI":"10.1007\/978-3-031-19812-0_31"},{"key":"3595_CR20","doi-asserted-by":"crossref","unstructured":"Li, Y., Ge, Z., et al.: Bevdepth: Acquisition of reliable depth for multi-view 3d object detection. In: Paper Presented at the AAAI Conference on Artificial Intelligence (2023)","DOI":"10.1609\/aaai.v37i2.25233"},{"key":"3595_CR21","doi-asserted-by":"crossref","unstructured":"Nabati, R., Qi, H.: Rrpn: Radar region proposal network for object detection in autonomous vehicles. In: Paper Presented at the 2019 IEEE International Conference on Image Processing (ICIP) (2019)","DOI":"10.1109\/ICIP.2019.8803392"},{"key":"3595_CR22","unstructured":"Bansal, K., Rungta, K., Bharadia, D.: Radsegnet: A reliable approach to radar camera fusion. Preprint at https:\/\/arxiv.org\/abs\/2208.03849 (2022)"},{"key":"3595_CR23","doi-asserted-by":"crossref","unstructured":"John, V., Mita, S.: RVNet: Deep sensor fusion of monocular camera and radar for image-based obstacle detection in challenging environments. In: Paper Presented at the Image and Video Technology: 9th Pacific-Rim Symposium, PSIVT 2019, Sydney, NSW, Australia, November 18\u201322, 2019, Proceedings 9 (2019)","DOI":"10.1007\/978-3-030-34879-3_27"},{"key":"3595_CR24","doi-asserted-by":"crossref","unstructured":"Nabati, R., Qi, H.: Centerfusion: Center-based radar and camera fusion for 3d object detection. In: Paper Presented at the Proceedings of the IEEE\/CVF Winter Conference on Applications of Computer Vision (2021)","DOI":"10.1109\/WACV48630.2021.00157"},{"key":"3595_CR25","doi-asserted-by":"crossref","unstructured":"Duan, K., Bai, S., Xie, L., et al.: Centernet: Keypoint triplets for object detection. In: Paper Presented at the IEEE\/CVF International Conference on Computer Vision (2019)","DOI":"10.1109\/ICCV.2019.00667"},{"key":"3595_CR26","doi-asserted-by":"crossref","unstructured":"Kowol, K., Rottmann, M., Bracke, S., Gottschalk, H.: Yodar: Uncertainty-based sensor fusion for vehicle detection with camera and radar sensors. Preprint at https:\/\/arxiv.org\/abs\/2010.03320 (2020)","DOI":"10.5220\/0010239301770186"},{"key":"3595_CR27","doi-asserted-by":"crossref","unstructured":"Nobis, F., Geisslinger, M., et al.: A deep learning-based radar and camera sensor fusion architecture for object detection. In: Paper Presented at the 2019 Sensor Data Fusion: Trends, Solutions, Applications (SDF) (2019)","DOI":"10.1109\/SDF.2019.8916629"},{"issue":"4","key":"3595_CR28","doi-asserted-by":"publisher","first-page":"956","DOI":"10.3390\/s20040956","volume":"20","author":"S Chang","year":"2020","unstructured":"Chang, S., Zhang, Y., et al.: Spatial attention fusion for obstacle detection using mmwave radar and vision sensor. Sensors 20(4), 956 (2020)","journal-title":"Sensors"},{"key":"3595_CR29","doi-asserted-by":"crossref","unstructured":"Long, Y., Kumar, A., et al.: RADIANT: Radar-image association network for 3D object detection. In: Paper presented at the AAAI Conference on Artificial Intelligence (2023)","DOI":"10.1609\/aaai.v37i2.25270"},{"issue":"2","key":"3595_CR30","doi-asserted-by":"publisher","first-page":"1523","DOI":"10.1109\/TIV.2023.3240287","volume":"8","author":"T Zhou","year":"2023","unstructured":"Zhou, T., Chen, J., et al.: Bridging the view disparity between radar and camera features for multi-modal fusion 3d object detection. IEEE Transact. Intell. Veh. 8(2), 1523\u20131535 (2023)","journal-title":"IEEE Transact. Intell. Veh."},{"key":"3595_CR31","doi-asserted-by":"crossref","unstructured":"Kim, Y., Kim, S., Choi, J.W., Kum, D.: Craft: Camera-radar 3d object detection with spatio-contextual fusion transformer. In: Paper Presented at the AAAI Conference on Artificial Intelligence (2023)","DOI":"10.1609\/aaai.v37i1.25198"},{"key":"3595_CR32","doi-asserted-by":"crossref","unstructured":"Kim, Y., Shin, J., et al.: Crn: Camera radar net for accurate, robust, efficient 3d perception. In: Paper Presented at the IEEE\/CVF International Conference on Computer Vision (2023)","DOI":"10.1109\/ICCV51070.2023.01615"},{"key":"3595_CR33","doi-asserted-by":"crossref","unstructured":"Chen, X., Zhang, T., et al.: Futr3d: A unified sensor fusion framework for 3d detection. In: Paper Presented at the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (2023)","DOI":"10.1109\/CVPRW59228.2023.00022"},{"key":"3595_CR34","doi-asserted-by":"crossref","unstructured":"Shuai, X., Shen, Y., et al.: millieye: A lightweight mmwave radar and camera fusion system for robust object detection. In: Paper Presented at the International Conference on Internet-of-Things Design and Implementation (2021)","DOI":"10.1145\/3450268.3453532"},{"key":"3595_CR35","doi-asserted-by":"crossref","unstructured":"He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In: Paper Presented at the IEEE Conference on Computer Vision and Pattern Recognition (2016)","DOI":"10.1109\/CVPR.2016.90"},{"key":"3595_CR36","doi-asserted-by":"crossref","unstructured":"Lin, T.Y., Doll\u00e1r, P., et al.: Feature pyramid networks for object detection. In: Paper Presented at the IEEE Conference on Computer Vision and Pattern Recognition (2017)","DOI":"10.1109\/CVPR.2017.106"}],"container-title":["Signal, Image and Video Processing"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11760-024-03595-2.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s11760-024-03595-2\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11760-024-03595-2.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,1,28]],"date-time":"2025-01-28T17:51:53Z","timestamp":1738086713000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s11760-024-03595-2"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,12,3]]},"references-count":36,"journal-issue":{"issue":"1","published-print":{"date-parts":[[2025,1]]}},"alternative-id":["3595"],"URL":"https:\/\/doi.org\/10.1007\/s11760-024-03595-2","relation":{},"ISSN":["1863-1703","1863-1711"],"issn-type":[{"value":"1863-1703","type":"print"},{"value":"1863-1711","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,12,3]]},"assertion":[{"value":"28 May 2024","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"27 September 2024","order":2,"name":"revised","label":"Revised","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"6 November 2024","order":3,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"3 December 2024","order":4,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors declare no competing interests.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Competing interests"}}],"article-number":"51"}}