{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,11]],"date-time":"2026-04-11T19:46:20Z","timestamp":1775936780892,"version":"3.50.1"},"reference-count":99,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"5","license":[{"start":{"date-parts":[[2025,5,1]],"date-time":"2025-05-01T00:00:00Z","timestamp":1746057600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2025,5,1]],"date-time":"2025-05-01T00:00:00Z","timestamp":1746057600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,5,1]],"date-time":"2025-05-01T00:00:00Z","timestamp":1746057600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62276108"],"award-info":[{"award-number":["62276108"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Circuits Syst. Video Technol."],"published-print":{"date-parts":[[2025,5]]},"DOI":"10.1109\/tcsvt.2024.3524670","type":"journal-article","created":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T19:44:01Z","timestamp":1735760641000},"page":"4870-4882","source":"Crossref","is-referenced-by-count":46,"title":["SparseTrack: Multi-Object Tracking by Performing Scene Decomposition Based on Pseudo-Depth"],"prefix":"10.1109","volume":"35","author":[{"ORCID":"https:\/\/orcid.org\/0000-0001-8589-2578","authenticated-orcid":false,"given":"Zelin","family":"Liu","sequence":"first","affiliation":[{"name":"School of Electronic Information and Communications, Huazhong University of Science and Technology, Wuhan, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-6732-7823","authenticated-orcid":false,"given":"Xinggang","family":"Wang","sequence":"additional","affiliation":[{"name":"School of Electronic Information and Communications, Huazhong University of Science and Technology, Wuhan, China"}]},{"given":"Cheng","family":"Wang","sequence":"additional","affiliation":[{"name":"Institute of Artificial Intelligence, Huazhong University of Science and Technology, Wuhan, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-4582-7488","authenticated-orcid":false,"given":"Wenyu","family":"Liu","sequence":"additional","affiliation":[{"name":"School of Electronic Information and Communications, Huazhong University of Science and Technology, Wuhan, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-3449-5940","authenticated-orcid":false,"given":"Xiang","family":"Bai","sequence":"additional","affiliation":[{"name":"School of Software Engineering, Huazhong University of Science and Technology, Wuhan, China"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2021.3054719"},{"key":"ref2","article-title":"MOTChallenge 2015: Towards a benchmark for multi-target tracking","author":"Leal-Taix\u00e9","year":"2015","journal-title":"arXiv:1504.01942"},{"key":"ref3","article-title":"MOT16: A benchmark for multi-object tracking","author":"Milan","year":"2016","journal-title":"arXiv:1603.00831"},{"key":"ref4","article-title":"MOT20: A benchmark for multi object tracking in crowded scenes","author":"Dendorfer","year":"2020","journal-title":"arXiv:2003.09003"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/ICIP.2016.7533003"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-20047-2_1"},{"key":"ref7","article-title":"TransCenter: Transformers with dense representations for multiple-object tracking","author":"Xu","year":"2021","journal-title":"arXiv:2103.15145"},{"key":"ref8","article-title":"TransTrack: Multiple object tracking with transformer","author":"Sun","year":"2020","journal-title":"arXiv:2012.15460"},{"key":"ref9","article-title":"TransMOT: Spatial-temporal graph transformer for multiple object tracking","author":"Chu","year":"2021","journal-title":"arXiv:2104.00194"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-20047-2_5"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-19812-0_38"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.02112"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2014.81"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1007\/s11263-020-01375-2"},{"key":"ref15","article-title":"DanceTrack: Multi-object tracking in uniform appearance and diverse motion","author":"Sun","year":"2021","journal-title":"arXiv:2111.14690"},{"key":"ref16","first-page":"1","article-title":"High-speed tracking-by-detection without using image information","volume-title":"Proc. 14th IEEE Int. Conf. Adv. Video Signal Based Surveill. (AVSS)","author":"Bochinski"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1115\/1.3662552"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/ICIP.2017.8296962"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/ICME.2018.8486597"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1145\/3581783.3613460"},{"key":"ref21","article-title":"Torchreid: A library for deep learning person re-identification in PyTorch","author":"Zhou","year":"2019","journal-title":"arXiv:1910.10093"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00380"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2021.3069237"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2023.3296680"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2023.3276996"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2023.3268080"},{"key":"ref27","article-title":"Do different tracking tasks require different appearance models?","volume-title":"Proc. 35th Conf. Neural Inf. Process. Syst.","author":"Wang"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.00023"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.02095"},{"key":"ref30","article-title":"Semi-TCL: Semi-supervised track contrastive representation learning","author":"Li","year":"2021","journal-title":"arXiv:2107.02396"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1016\/j.neucom.2022.01.008"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2022.3155660"},{"key":"ref33","article-title":"Momentum contrast for unsupervised visual representation learning","author":"He","year":"2019","journal-title":"arXiv:1911.05722"},{"key":"ref34","article-title":"Improved baselines with momentum contrastive learning","author":"Chen","year":"2020","journal-title":"arXiv:2003.04297"},{"key":"ref35","article-title":"A simple framework for contrastive learning of visual representations","author":"Chen","year":"2020","journal-title":"arXiv:2002.05709"},{"key":"ref36","article-title":"Big self-supervised models are strong semi-supervised learners","author":"Chen","year":"2020","journal-title":"arXiv:2006.10029"},{"key":"ref37","article-title":"Masked autoencoders are scalable vision learners","author":"He","year":"2021","journal-title":"arXiv:2111.06377"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58621-8_7"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1007\/s11263-021-01513-4"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2022.3165376"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2023.3249162"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1109\/LSP.2024.3371331"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.1109\/ICEACE60673.2023.10442431"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00864"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2023.3301933"},{"key":"ref46","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2024.3416880"},{"key":"ref47","doi-asserted-by":"publisher","DOI":"10.48550\/ARXIV.1706.03762"},{"key":"ref48","first-page":"1","article-title":"An image is worth 16 \u00d7 16 words: Transformers for image recognition at scale","volume-title":"Proc. Int. Conf. Learn. Represent.","author":"Dosovitskiy"},{"key":"ref49","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58452-8_13"},{"key":"ref50","first-page":"1","article-title":"Deformable DETR: Deformable transformers for end-to-end object detection","volume-title":"Proc. Int. Conf. Learn. Represent.","author":"Zhu"},{"key":"ref51","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00271"},{"key":"ref52","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.330"},{"key":"ref53","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58548-8_28"},{"key":"ref54","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.01219"},{"key":"ref55","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00103"},{"key":"ref56","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58548-8_9"},{"key":"ref57","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2023.3272319"},{"key":"ref58","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2024.3394534"},{"key":"ref59","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2023.3323852"},{"key":"ref60","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2024.3403497"},{"key":"ref61","first-page":"1","article-title":"ImageNet classification with deep convolutional neural networks","volume-title":"Proc. NIPS","author":"Krizhevsky"},{"key":"ref62","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.02191"},{"key":"ref63","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00628"},{"key":"ref64","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.00526"},{"key":"ref65","doi-asserted-by":"publisher","DOI":"10.1109\/WACV45572.2020.9093347"},{"key":"ref66","article-title":"Joint object detection and multi-object tracking with graph neural networks","author":"Wang","year":"2020","journal-title":"arXiv:2006.13164"},{"key":"ref67","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2022.3207223"},{"key":"ref68","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.01720"},{"key":"ref69","doi-asserted-by":"publisher","DOI":"10.1007\/978-981-97-5979-8_9"},{"key":"ref70","article-title":"BoT-SORT: Robust associations multi-pedestrian tracking","author":"Aharon","year":"2022","journal-title":"arXiv:2206.14651"},{"key":"ref71","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00627"},{"key":"ref72","doi-asserted-by":"publisher","DOI":"10.1109\/CVPRW56347.2022.00368"},{"key":"ref73","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00866"},{"key":"ref74","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58452-8_24"},{"key":"ref75","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00813"},{"key":"ref76","doi-asserted-by":"publisher","DOI":"10.1109\/IVS.2019.8813779"},{"key":"ref77","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.01161"},{"key":"ref78","article-title":"YOLOX: Exceeding YOLO series in 2021","author":"Ge","year":"2021","journal-title":"arXiv:2107.08430"},{"key":"ref79","article-title":"StrongSORT: Make DeepSORT great again","author":"Du","year":"2022","journal-title":"arXiv:2202.13514"},{"key":"ref80","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.01217"},{"key":"ref81","doi-asserted-by":"publisher","DOI":"10.1155\/2008\/246309"},{"key":"ref82","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-48881-3_2"},{"key":"ref83","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00634"},{"key":"ref84","article-title":"MAT: Motion-aware multi-object tracking","author":"Han","year":"2020","journal-title":"arXiv:2009.04794"},{"key":"ref85","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.01068"},{"key":"ref86","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2023.3263884"},{"key":"ref87","article-title":"Observation-centric SORT: Rethinking SORT for robust multi-object tracking","author":"Cao","year":"2022","journal-title":"arXiv:2203.14360"},{"key":"ref88","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2019.2929520"},{"key":"ref89","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.00248"},{"key":"ref90","doi-asserted-by":"publisher","DOI":"10.1016\/j.imavis.2020.104091"},{"key":"ref91","article-title":"RelationTrack: Relation-aware multiple object tracking with decoupled representation","author":"Yu","year":"2021","journal-title":"arXiv:2105.04322"},{"key":"ref92","article-title":"Tracklets predicting based adaptive graph tracking","author":"Shan","year":"2020","journal-title":"arXiv:2010.09015"},{"key":"ref93","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.00387"},{"key":"ref94","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2024.3364828"},{"key":"ref95","doi-asserted-by":"publisher","DOI":"10.1109\/JIOT.2020.2996609"},{"key":"ref96","first-page":"1","article-title":"Multiple object tracking from appearance by hierarchically clustering tracklets","volume-title":"Proc. BMVC","author":"Girbau"},{"key":"ref97","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00857"},{"key":"ref98","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2021.3119563"},{"key":"ref99","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-01249-6_23"}],"container-title":["IEEE Transactions on Circuits and Systems for Video Technology"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/76\/10989278\/10819455.pdf?arnumber=10819455","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,5,7]],"date-time":"2025-05-07T04:21:33Z","timestamp":1746591693000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10819455\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,5]]},"references-count":99,"journal-issue":{"issue":"5"},"URL":"https:\/\/doi.org\/10.1109\/tcsvt.2024.3524670","relation":{},"ISSN":["1051-8215","1558-2205"],"issn-type":[{"value":"1051-8215","type":"print"},{"value":"1558-2205","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025,5]]}}}