{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,11]],"date-time":"2026-02-11T14:52:18Z","timestamp":1770821538448,"version":"3.50.1"},"reference-count":65,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"6","license":[{"start":{"date-parts":[[2025,6,1]],"date-time":"2025-06-01T00:00:00Z","timestamp":1748736000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2025,6,1]],"date-time":"2025-06-01T00:00:00Z","timestamp":1748736000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,6,1]],"date-time":"2025-06-01T00:00:00Z","timestamp":1748736000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"name":"National Key Research and Development Program of China","award":["2023YFB2504400"],"award-info":[{"award-number":["2023YFB2504400"]}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["52225212"],"award-info":[{"award-number":["52225212"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["U20A20333"],"award-info":[{"award-number":["U20A20333"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["52072160"],"award-info":[{"award-number":["52072160"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Intell. Transport. Syst."],"published-print":{"date-parts":[[2025,6]]},"DOI":"10.1109\/tits.2025.3539658","type":"journal-article","created":{"date-parts":[[2025,2,17]],"date-time":"2025-02-17T13:40:02Z","timestamp":1739799602000},"page":"8834-8847","source":"Crossref","is-referenced-by-count":2,"title":["RTMDet-R: A Robust Instance Segmentation Network for Complex Traffic Scenarios"],"prefix":"10.1109","volume":"26","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-9136-8091","authenticated-orcid":false,"given":"Hai","family":"Wang","sequence":"first","affiliation":[{"name":"School of Automotive and Traffic Engineering, Jiangsu University, Zhenjiang, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0009-6947-446X","authenticated-orcid":false,"given":"Qirui","family":"Qin","sequence":"additional","affiliation":[{"name":"School of Automotive and Traffic Engineering, Jiangsu University, Zhenjiang, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-2079-3867","authenticated-orcid":false,"given":"Long","family":"Chen","sequence":"additional","affiliation":[{"name":"Automotive Engineering Research Institute, Jiangsu University, Zhenjiang, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-1492-3116","authenticated-orcid":false,"given":"Yicheng","family":"Li","sequence":"additional","affiliation":[{"name":"Automotive Engineering Research Institute, Jiangsu University, Zhenjiang, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-0633-9887","authenticated-orcid":false,"given":"Yingfeng","family":"Cai","sequence":"additional","affiliation":[{"name":"Automotive Engineering Research Institute, Jiangsu University, Zhenjiang, China"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.48550\/ARXIV.1706.03762"},{"key":"ref2","first-page":"17721","article-title":"SOLOv2: Dynamic and fast instance segmentation","volume-title":"Proc. Int. Conf. Adv. Neural Inf. Process. Syst.","author":"Wang"},{"key":"ref3","article-title":"Advancing vision transformers with group-mix attention","author":"Ge","year":"2023","journal-title":"arXiv:2311.15157"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00925"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1007\/s41095-023-0364-2"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.01392"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/TITS.2023.3320088"},{"key":"ref8","first-page":"1140","article-title":"SegNeXt: Rethinking convolutional attention design for semantic segmentation","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"35","author":"Guo"},{"key":"ref9","article-title":"MV2DFusion: Leveraging modality-specific object semantics for multi-modal 3D detection","author":"Wang","year":"2024","journal-title":"arXiv:2408.05945"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.48550\/arXiv.2211.11943"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00722"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52733.2024.02308"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58452-8_17"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/TIV.2024.3363830"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00439"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00135"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.322"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.02266"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-10602-1_48"},{"key":"ref20","article-title":"RTMDet: An empirical study of designing real-time object detectors","author":"Lyu","year":"2022","journal-title":"arXiv:2212.07784"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.350"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00271"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1177\/0278364913491297"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/WACV.2019.00190"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.90"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.243"},{"key":"ref27","article-title":"YOLOv3: An incremental improvement","author":"Redmon","year":"2018","journal-title":"arXiv:1804.02767"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.01548"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-46448-0_2"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.106"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00913"},{"key":"ref32","article-title":"Rethinking atrous convolution for semantic image segmentation","author":"Chen","year":"2017","journal-title":"arXiv:1706.05587"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2021.3111116"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01170"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52733.2024.00539"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52733.2024.01683"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52733.2024.00525"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1109\/CVPRW50498.2020.00203"},{"key":"ref39","article-title":"An image is worth 16\u00d716 words: Transformers for image recognition at scale","author":"Dosovitskiy","year":"2020","journal-title":"arXiv:2010.11929"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01055"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.01388"},{"key":"ref42","first-page":"24261","article-title":"MLP-mixer: An all-MLP architecture for vision","volume-title":"Proc. 35th Conf. Neural Inf. Process. Syst.","volume":"34","author":"Tolstikhin"},{"key":"ref43","article-title":"TPSeNCE: Towards artifact-free realistic rain generation for deraining and object detection in rain","author":"Zheng","year":"2023","journal-title":"arXiv:2311.00660"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.1109\/ICCVW.2019.00303"},{"key":"ref45","article-title":"Designing network design strategies through gradient path analysis","author":"Wang","year":"2022","journal-title":"arXiv:2211.04800"},{"key":"ref46","article-title":"How do vision transformers work?","author":"Park","year":"2022","journal-title":"arXiv:2202.06709"},{"key":"ref47","first-page":"12116","article-title":"Do vision transformers see like convolutional neural networks","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Raghu"},{"key":"ref48","first-page":"25346","article-title":"Dual-stream network for visual recognition","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Mao"},{"key":"ref49","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00009"},{"key":"ref50","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01167"},{"key":"ref51","article-title":"UniRepLKNet: A universal perception large-kernel ConvNet for audio, video, point cloud, time-series and image recognition","author":"Ding","year":"2023","journal-title":"arXiv:2311.15599"},{"key":"ref52","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00720"},{"key":"ref53","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00090"},{"key":"ref54","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2015.169"},{"key":"ref55","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.189"},{"key":"ref56","article-title":"MobileNets: Efficient convolutional neural networks for mobile vision applications","author":"Howard","year":"2017","journal-title":"arXiv:1704.04861"},{"key":"ref57","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.01079"},{"key":"ref58","doi-asserted-by":"publisher","DOI":"10.48550\/arXiv.2209.02976"},{"key":"ref59","first-page":"1","article-title":"Gold-YOLO: Efficient object detector via gather-and-distribute mechanism","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"36","author":"Wang"},{"key":"ref60","first-page":"21002","article-title":"Generalized focal loss: Learning qualified and distributed bounding boxes for dense object detection","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"33","author":"Li"},{"key":"ref61","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00075"},{"key":"ref62","doi-asserted-by":"publisher","DOI":"10.1109\/3DV.2016.79"},{"key":"ref63","article-title":"BDD100K: A diverse driving video database with scalable annotation tooling","author":"Yu","year":"2018","journal-title":"arXiv:1805.04687"},{"key":"ref64","article-title":"Adam: A method for stochastic optimization","author":"Kingma","year":"2014","journal-title":"arXiv:1412.6980"},{"key":"ref65","article-title":"SGDR: Stochastic gradient descent with warm restarts","author":"Loshchilov","year":"2016","journal-title":"arXiv:1608.03983"}],"container-title":["IEEE Transactions on Intelligent Transportation Systems"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/6979\/11021249\/10891634.pdf?arnumber=10891634","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,2,6]],"date-time":"2026-02-06T20:51:48Z","timestamp":1770411108000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10891634\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,6]]},"references-count":65,"journal-issue":{"issue":"6"},"URL":"https:\/\/doi.org\/10.1109\/tits.2025.3539658","relation":{},"ISSN":["1524-9050","1558-0016"],"issn-type":[{"value":"1524-9050","type":"print"},{"value":"1558-0016","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025,6]]}}}