{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,11]],"date-time":"2026-04-11T18:39:33Z","timestamp":1775932773018,"version":"3.50.1"},"reference-count":37,"publisher":"Springer Science and Business Media LLC","issue":"10","license":[{"start":{"date-parts":[[2023,9,12]],"date-time":"2023-09-12T00:00:00Z","timestamp":1694476800000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2023,9,12]],"date-time":"2023-09-12T00:00:00Z","timestamp":1694476800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"funder":[{"name":"Natural Science Research Project of Anhui Education Department","award":["KJ2019A0005"],"award-info":[{"award-number":["KJ2019A0005"]}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62076003"],"award-info":[{"award-number":["62076003"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Multimed Tools Appl"],"DOI":"10.1007\/s11042-023-16418-2","type":"journal-article","created":{"date-parts":[[2023,9,12]],"date-time":"2023-09-12T02:01:37Z","timestamp":1694484097000},"page":"29311-29330","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":4,"title":["RGBT Tracking based on modality feature enhancement"],"prefix":"10.1007","volume":"83","author":[{"given":"Sulan","family":"Zhai","sequence":"first","affiliation":[]},{"given":"Yi","family":"Wu","sequence":"additional","affiliation":[]},{"given":"Lei","family":"Liu","sequence":"additional","affiliation":[]},{"given":"Jin","family":"Tang","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2023,9,12]]},"reference":[{"key":"16418_CR1","unstructured":"Cai B, Zhang C, Zhixin LI (2017) Tracking Infrared-visible Target with Joint Histogram. J of Guang xi Normal University"},{"key":"16418_CR2","doi-asserted-by":"crossref","unstructured":"Feng M, Song K, Wang Y, Liu J, Yan Y (2020) Learning Discriminative Update Adaptive Spatial-Temporal Regularized Correlation Filter for RGB-T Tracking. J Vis Commun Image Represent, 72:102881","DOI":"10.1016\/j.jvcir.2020.102881"},{"key":"16418_CR3","doi-asserted-by":"crossref","unstructured":"Gao Y, Li CL, Zhu YB, Tang J, He T, Wang FT (2019) Deep adaptive fusion network for high performance rgbt tracking. In: Proc IEEE Int Conf Comput Vis Workshops","DOI":"10.1109\/ICCVW.2019.00017"},{"key":"16418_CR4","doi-asserted-by":"crossref","unstructured":"He KM, Zhang XY, Ren SQ, Sun J (2016) Deep Residual Learning for Image Recognition. In: Proc IEEE Conf Comput Vis Pattern Recognit, pp 770\u2013778","DOI":"10.1109\/CVPR.2016.90"},{"key":"16418_CR5","doi-asserted-by":"publisher","first-page":"788","DOI":"10.1109\/TIP.2021.3132827","volume":"31","author":"MJ He","year":"2022","unstructured":"He MJ, Zhang J, Shan SG, Liu X, Wu ZQ et al (2022) Locality-Aware Channel-Wise Dropout for Occluded Face Recognition. IEEE Trans Image Process, 31:788\u2013798","journal-title":"IEEE Trans Image Process,"},{"key":"16418_CR6","doi-asserted-by":"crossref","unstructured":"Hu J, Shen L, Sun G (2018) Squeeze-and-excitation networks. In: Proc IEEE Conf Comput Vis Pattern Recognit, pp 7132\u20137141","DOI":"10.1109\/CVPR.2018.00745"},{"key":"16418_CR7","doi-asserted-by":"crossref","unstructured":"Kroeger T, Timofte RD, Dai DX, Van Gool L (2016) Fast optical flow using dense inverse search. In: Proceedings of the Computer Vision-ECCV: 14th European Conference, pp 471-488","DOI":"10.1007\/978-3-319-46493-0_29"},{"key":"16418_CR8","doi-asserted-by":"crossref","unstructured":"Laurense VA, Goh JY, Gerdes JC (2017) Path-tracking for autonomous vehicles at the limit of friction. Am Control Conf, p 5586\u20135 591","DOI":"10.23919\/ACC.2017.7963824"},{"key":"16418_CR9","doi-asserted-by":"crossref","unstructured":"Li CL, Liang XY, Lu YJ, Zhao N, Tang J (2019) RGB-T object tracking: Benchmark and baseline. Pattern Recognit, 96:106977","DOI":"10.1016\/j.patcog.2019.106977"},{"key":"16418_CR10","doi-asserted-by":"crossref","unstructured":"Li CL, Liu L, Lu AD, Ji Q, Tang J (2020) Challenge-aware rgbt tracking. In: Proceedings of the Computer Vision-ECCV: 16th European Conference, pp 222-237","DOI":"10.1007\/978-3-030-58542-6_14"},{"key":"16418_CR11","doi-asserted-by":"publisher","unstructured":"Li CL, Lu AD, Zheng AH, Tu ZZ, Tang J (2019) Multi-Adapter RGBT Tracking. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision Workshops. https:\/\/doi.org\/10.1109\/ICCVW.2019.00279","DOI":"10.1109\/ICCVW.2019.00279"},{"key":"16418_CR12","doi-asserted-by":"publisher","unstructured":"Li CL, Xue WL, Jia YQ, Qu ZC, Luo B, Tang J, et al (2021) LasHeR: A Large-scale High-diversity Benchmark for RGBT Tracking. IEEE Trans Image Process. https:\/\/doi.org\/10.48550\/arXiv.2104.13202","DOI":"10.48550\/arXiv.2104.13202"},{"key":"16418_CR13","doi-asserted-by":"publisher","first-page":"5743","DOI":"10.1109\/TIP.2016.2614135","volume":"25","author":"CL Li","year":"2016","unstructured":"Li CL, Cheng H, Hu SY, Liu XB, Tang J, Lin L (2016) Learning collaborative sparse representation for grayscale-thermal tracking. IEEE Trans Image Process, 25:5743\u20135756","journal-title":"IEEE Trans Image Process,"},{"key":"16418_CR14","first-page":"1","volume":"10","author":"J Li","year":"2017","unstructured":"Li J, Zhang Z, He H (2017) Hierarchical convolutional neural networks for EEG-based emotion recognition. Cogn Comput, 10:1\u201313","journal-title":"Cogn Comput,"},{"key":"16418_CR15","doi-asserted-by":"crossref","unstructured":"Li X, Wang W, Hu X, et al (2019) Selective kernel networks. In: Proc IEEE\/CVF Conf Comput Vis Pattern Recognit. pp 510-519","DOI":"10.1109\/CVPR.2019.00060"},{"key":"16418_CR16","doi-asserted-by":"crossref","unstructured":"Li B, Yan JJ, Wu W, Zhu Z, Hu XL (2018) High performance visual tracking with siamese region proposal network. In: Proc IEEE Conf Comput Vis Pattern Recognit, pp 8971\u20138980","DOI":"10.1109\/CVPR.2018.00935"},{"key":"16418_CR17","doi-asserted-by":"crossref","unstructured":"Lu AD, Li CL, Yan YQ, Tang J, Luo B (2021) RGBT Tracking via Multi-Adapter Network with Hierarchical Divergence Loss. IEEE Trans Image Process, pp 5613\u20135625","DOI":"10.1109\/TIP.2021.3087341"},{"key":"16418_CR18","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2022.3157594","author":"AD Lu","year":"2020","unstructured":"Lu AD, Qian C, Li CL, Tang J, Wang L (2020). Duality-Gated Mutual Condition Network for RGBT Tracking. https:\/\/doi.org\/10.1109\/TNNLS.2022.3157594","journal-title":"Duality-Gated Mutual Condition Network for RGBT Tracking."},{"key":"16418_CR19","doi-asserted-by":"crossref","unstructured":"Marriott RT, Romdhani S, Chen L (2021) A 3D GAN for Improved Large-pose Facial Recognition. In:Proc IEEE Conf Comput Vis Pattern Recognit, pp 13445\u201313455","DOI":"10.1109\/CVPR46437.2021.01324"},{"key":"16418_CR20","doi-asserted-by":"crossref","unstructured":"Nam H, Han B (2016) Learning multi-domain convolutional neural networks for visual tracking. In: Proc IEEE Conf Comput Vis Pattern Recognit, pp 4293\u20134302","DOI":"10.1109\/CVPR.2016.465"},{"key":"16418_CR21","unstructured":"Simonyan K, Zisserman A (2014) Very deep convolutional networks for large-scale image recognition. Comput Vis Pattern Recognit. https:\/\/doi.org\/10.48550\/arXiv.1409.1556"},{"key":"16418_CR22","doi-asserted-by":"crossref","unstructured":"Sun P, Zhang RF, Jiang Y, Kong T, Xu CF, Zhan W, et al (2021) Sparse R-CNN: End-to-End Object Detection with Learnable Proposals. In:Proc IEEE Conf Comput Vis Pattern Recognit, p 14449\u201314458","DOI":"10.1109\/CVPR46437.2021.01422"},{"key":"16418_CR23","doi-asserted-by":"publisher","unstructured":"Tu ZZ, Chun L, Li CL, Tang J, Luo B (2020) M5L: Multi-Modal Multi-Margin Metric Learning for RGBT Tracking. https:\/\/doi.org\/10.48550\/arXiv.2003.07650","DOI":"10.48550\/arXiv.2003.07650"},{"key":"16418_CR24","doi-asserted-by":"crossref","unstructured":"Wang CQ, Xu CY, Cui Z, Zhou L, Zhang T, Zhang XY, et al (2020) Cross-modal pattern-propagation for RGB-T tracking. In: Proc IEEE Conf Comput Vis Pattern Recognit, pp 7064\u20137073","DOI":"10.1109\/CVPR42600.2020.00709"},{"key":"16418_CR25","first-page":"149","volume":"27","author":"K Wang","year":"2018","unstructured":"Wang K, Wei HL, Chen CB, Cao K (2018) Target Tracking Based on Infrared and Visible Light Fusion. Comput Syst Appl, 27:149\u2013153","journal-title":"Comput Syst Appl,"},{"key":"16418_CR26","doi-asserted-by":"publisher","first-page":"7831","DOI":"10.1109\/TITS.2021.3073046","volume":"23","author":"Y Wang","year":"2021","unstructured":"Wang Y, Wei X, Tang X, Shen H, Zhang H (2021) Adaptive Fusion CNN Features for RGBT Object Tracking. IEEE Trans Intell Transp Syst, 23:7831\u20137840","journal-title":"IEEE Trans Intell Transp Syst,"},{"key":"16418_CR27","doi-asserted-by":"crossref","unstructured":"Wang Q, Wu B, Zhu P, et al (2020) ECA-Net: Efficient channel attention for deep convolutional neural networks. In: Proc IEEE\/CVF Conf Comput Vis Pattern Recognit, p 11534-11542","DOI":"10.1109\/CVPR42600.2020.01155"},{"key":"16418_CR28","first-page":"2831","volume":"36","author":"Y Xiao","year":"2022","unstructured":"Xiao Y, Yang MM, Li CL, Liu L, Tang J (2022) Attribute-based Progressive Fusion Network for RGBT Tracking. Proc AAAI Conf Artif Intell, 36:2831\u20132838","journal-title":"Proc AAAI Conf Artif Intell,"},{"key":"16418_CR29","doi-asserted-by":"publisher","first-page":"172","DOI":"10.1016\/j.neucom.2019.01.022","volume":"334","author":"S Zhai","year":"2019","unstructured":"Zhai S, Shao P, Liang X, Wang X (2019) Fast RGB-T Tracking via Cross-Modal Correlation Filters. Neurocomputing 334:172\u2013181","journal-title":"Neurocomputing"},{"key":"16418_CR30","doi-asserted-by":"crossref","unstructured":"Zhang LC, Danelljan M, Gonzalez-Garcia A, van de Weijer J, Shah-baz Khan F (2019) Multi-modal fusion for end-to-end rgb-t tracking. In: Proc IEEE Conf Comput Vis Workshops","DOI":"10.1109\/ICCVW.2019.00278"},{"key":"16418_CR31","doi-asserted-by":"crossref","unstructured":"Zhang PY, Wang D, Lu H, Yang X (2021) Learning Adaptive Attribute-Driven Representation for Real-Time RGB-T Tracking. Int J Comput Vis, pp 1\u201316","DOI":"10.1007\/s11263-021-01495-3"},{"key":"16418_CR32","doi-asserted-by":"publisher","first-page":"393","DOI":"10.3390\/s20020393","volume":"20","author":"H Zhang","year":"2020","unstructured":"Zhang H, Zhang L, Zhuo L, Zhang J (2020) Object tracking in rgb-t videos using modal-aware attention network and competitive learning. Sensors 20:393","journal-title":"Sensors"},{"key":"16418_CR33","doi-asserted-by":"publisher","first-page":"3335","DOI":"10.1109\/TIP.2021.3060862","volume":"30","author":"PY Zhang","year":"2021","unstructured":"Zhang PY, Zhao J, Bo CJ, Wang D, Lu HC, Yang XY (2021) Jointly modeling motion and appearance cues for robust RGB-T tracking. IEEE Trans Image Process, 30:3335\u20133347","journal-title":"IEEE Trans Image Process,"},{"key":"16418_CR34","doi-asserted-by":"crossref","unstructured":"Zhu YB, Li CL, Luo B, Tang J, Wang X (2019) Dense feature aggregation and pruning for rgbt tracking. In: Proceedings of the 27th ACM International Conference on Multimedia, pp 465\u2013472","DOI":"10.1145\/3343031.3350928"},{"key":"16418_CR35","doi-asserted-by":"publisher","first-page":"121","DOI":"10.1109\/TIV.2020.2980735","volume":"6","author":"YB Zhu","year":"2021","unstructured":"Zhu YB, Li CL, Tang J, Luo B (2021) Quality-Aware Feature Aggregation Network for Robust RGBT Tracking. IEEE Trans Intell Veh, 6:121\u2013130","journal-title":"IEEE Trans Intell Veh,"},{"key":"16418_CR36","doi-asserted-by":"publisher","first-page":"579","DOI":"10.1109\/TCSVT.2021.3067997","volume":"32","author":"YB Zhu","year":"2021","unstructured":"Zhu YB, Li CL, Tang J, Luo B, Wang L (2021) RGBT Tracking by Trident Fusion Network. IEEE Trans Circ Syst Video Technol, 32:579\u2013592","journal-title":"IEEE Trans Circ Syst Video Technol,"},{"key":"16418_CR37","doi-asserted-by":"crossref","unstructured":"Zhu G, Porikli F, Li HD (2016) Beyond local search: Tracking objects everywhere with instance-specific proposals. In:Proc IEEE Conf Comput Vis Pattern Recognit, pp 943\u2013951","DOI":"10.1109\/CVPR.2016.108"}],"container-title":["Multimedia Tools and Applications"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11042-023-16418-2.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s11042-023-16418-2\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11042-023-16418-2.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,3,7]],"date-time":"2024-03-07T08:24:39Z","timestamp":1709799879000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s11042-023-16418-2"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,9,12]]},"references-count":37,"journal-issue":{"issue":"10","published-online":{"date-parts":[[2024,3]]}},"alternative-id":["16418"],"URL":"https:\/\/doi.org\/10.1007\/s11042-023-16418-2","relation":{},"ISSN":["1573-7721"],"issn-type":[{"value":"1573-7721","type":"electronic"}],"subject":[],"published":{"date-parts":[[2023,9,12]]},"assertion":[{"value":"8 July 2022","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"21 July 2023","order":2,"name":"revised","label":"Revised","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"24 July 2023","order":3,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"12 September 2023","order":4,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"This article does not contain any studies with animals performed by any of the authors.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Ethics Approval and Consent to Participate"}},{"value":"The authors declare that there is no confict of interests regarding the publication of this paper.","order":3,"name":"Ethics","group":{"name":"EthicsHeading","label":"Competing Interests"}}]}}