{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,14]],"date-time":"2026-04-14T14:41:19Z","timestamp":1776177679690,"version":"3.50.1"},"reference-count":55,"publisher":"Elsevier BV","license":[{"start":{"date-parts":[[2026,11,1]],"date-time":"2026-11-01T00:00:00Z","timestamp":1793491200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.elsevier.com\/tdm\/userlicense\/1.0\/"},{"start":{"date-parts":[[2026,11,1]],"date-time":"2026-11-01T00:00:00Z","timestamp":1793491200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.elsevier.com\/legal\/tdmrep-license"},{"start":{"date-parts":[[2026,11,1]],"date-time":"2026-11-01T00:00:00Z","timestamp":1793491200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-017"},{"start":{"date-parts":[[2026,11,1]],"date-time":"2026-11-01T00:00:00Z","timestamp":1793491200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"},{"start":{"date-parts":[[2026,11,1]],"date-time":"2026-11-01T00:00:00Z","timestamp":1793491200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-012"},{"start":{"date-parts":[[2026,11,1]],"date-time":"2026-11-01T00:00:00Z","timestamp":1793491200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2026,11,1]],"date-time":"2026-11-01T00:00:00Z","timestamp":1793491200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-004"}],"funder":[{"DOI":"10.13039\/501100013058","name":"Jiangsu Provincial Key Research and Development Program","doi-asserted-by":"publisher","award":["BE2022138"],"award-info":[{"award-number":["BE2022138"]}],"id":[{"id":"10.13039\/501100013058","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100013804","name":"Fundamental Research Funds for the Central Universities","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100013804","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62072232"],"award-info":[{"award-number":["62072232"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62576098"],"award-info":[{"award-number":["62576098"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100012226","name":"Fundamental Research Funds for the Central Universities","doi-asserted-by":"publisher","award":["021714380026"],"award-info":[{"award-number":["021714380026"]}],"id":[{"id":"10.13039\/501100012226","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["elsevier.com","sciencedirect.com"],"crossmark-restriction":true},"short-container-title":["Pattern Recognition"],"published-print":{"date-parts":[[2026,11]]},"DOI":"10.1016\/j.patcog.2026.113532","type":"journal-article","created":{"date-parts":[[2026,3,18]],"date-time":"2026-03-18T09:20:45Z","timestamp":1773825645000},"page":"113532","update-policy":"https:\/\/doi.org\/10.1016\/elsevier_cm_policy","source":"Crossref","is-referenced-by-count":0,"special_numbering":"PA","title":["Learning frequency and memory-aware prompts for multi-modal object tracking"],"prefix":"10.1016","volume":"179","author":[{"ORCID":"https:\/\/orcid.org\/0009-0003-7697-9630","authenticated-orcid":false,"given":"Boyue","family":"Xu","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0001-8111-7339","authenticated-orcid":false,"given":"Ruichao","family":"Hou","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-3092-424X","authenticated-orcid":false,"given":"Tongwei","family":"Ren","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-0139-9415","authenticated-orcid":false,"given":"Dongming","family":"Zhou","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-1391-1762","authenticated-orcid":false,"given":"Gangshan","family":"Wu","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-3133-7119","authenticated-orcid":false,"given":"Jinde","family":"Cao","sequence":"additional","affiliation":[]}],"member":"78","reference":[{"key":"10.1016\/j.patcog.2026.113532_bib0001","series-title":"Proc. IEEE\/CVF Conf. Comput. Vis. Pattern Recognit.","first-page":"19165","article-title":"Dynamic updates for language adaptation in visual\u2013language tracking","author":"Li","year":"2025"},{"key":"10.1016\/j.patcog.2026.113532_bib0002","doi-asserted-by":"crossref","DOI":"10.1016\/j.inffus.2024.102492","article-title":"RGB-T tracking: a comprehensive review","volume":"110","author":"Feng","year":"2024","journal-title":"Inf. Fusion."},{"key":"10.1016\/j.patcog.2026.113532_bib0003","series-title":"Proc. IEEE\/CVF Conf. Comput. Vis. Pattern Recognit.","article-title":"Visible-thermal UAV tracking: a large-scale benchmark and new baseline","author":"Zhang","year":"2022"},{"issue":"4","key":"10.1016\/j.patcog.2026.113532_bib0004","first-page":"1345","article-title":"Impedance learning-based adaptive force tracking for robot on unknown terrains","volume":"30","author":"Li","year":"2025","journal-title":"IEEE Trans. Robot."},{"issue":"4","key":"10.1016\/j.patcog.2026.113532_bib0005","doi-asserted-by":"crossref","first-page":"2125","DOI":"10.1109\/TCSVT.2023.3301933","article-title":"Towards unified token learning for vision\u2013language tracking","volume":"34","author":"Zheng","year":"2023","journal-title":"IEEE Trans. Circuits Syst. Video Technol."},{"key":"10.1016\/j.patcog.2026.113532_bib0006","doi-asserted-by":"crossref","first-page":"392","DOI":"10.1109\/TIP.2021.3130533","article-title":"LasHeR: a large-scale high-diversity benchmark for RGB-T tracking","volume":"31","author":"Li","year":"2021","journal-title":"IEEE Trans. Image Process."},{"key":"10.1016\/j.patcog.2026.113532_bib0007","series-title":"Proc. AAAI Conf. Artif. Intell.","article-title":"RGBD1K: a large-scale dataset and benchmark for RGB-D object tracking","author":"Zhu","year":"2023"},{"issue":"3","key":"10.1016\/j.patcog.2026.113532_bib0008","doi-asserted-by":"crossref","first-page":"1997","DOI":"10.1109\/TCYB.2023.3318601","article-title":"VisEvent: reliable object tracking via collaboration of frame and event flows","volume":"54","author":"Wang","year":"2023","journal-title":"IEEE Trans. Cybern."},{"key":"10.1016\/j.patcog.2026.113532_bib0009","series-title":"Proc. IEEE Int. Conf. Multimedia Expo","article-title":"MTNet: learning modality-aware representation with transformer for RGB-T tracking","author":"Hou","year":"2023"},{"key":"10.1016\/j.patcog.2026.113532_bib0010","series-title":"Proc. IEEE\/CVF Conf. Comput. Vis. Pattern Recognit.","article-title":"Bridging search region interaction with template for RGB-T tracking","author":"Hui","year":"2023"},{"key":"10.1016\/j.patcog.2026.113532_bib0011","series-title":"Proc. IEEE\/CVF Int. Conf. Comput. Vis.","article-title":"DepthTrack: unveiling the power of RGB-D tracking","author":"Yan","year":"2021"},{"key":"10.1016\/j.patcog.2026.113532_bib0012","series-title":"Proc. IEEE Int. Conf. Multimedia Expo","first-page":"1","article-title":"MIRNet: a robust RGBT tracking jointly with multi-modal interaction and refinement","author":"Hou","year":"2022"},{"key":"10.1016\/j.patcog.2026.113532_bib0013","series-title":"Proc. IEEE\/CVF Conf. Comput. Vis. Pattern Recognit.","article-title":"SDSTrack: self-distillation symmetric adapter learning for multi-modal visual object tracking","author":"Hou","year":"2024"},{"key":"10.1016\/j.patcog.2026.113532_bib0014","series-title":"Proc. IEEE\/CVF Conf. Comput. Vis. Pattern Recognit.","article-title":"Visual prompt multi-modal tracking","author":"Zhu","year":"2023"},{"key":"10.1016\/j.patcog.2026.113532_bib0015","series-title":"Proc. IEEE\/CVF Conf. Comput. Vis. Pattern Recognit.","article-title":"OneTracker: unifying visual object tracking with foundation models and efficient tuning","author":"Hong","year":"2024"},{"issue":"2","key":"10.1016\/j.patcog.2026.113532_bib0016","doi-asserted-by":"crossref","first-page":"1093","DOI":"10.1109\/TPAMI.2023.3330416","article-title":"Image restoration via frequency selection","volume":"46","author":"Cui","year":"2023","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"10.1016\/j.patcog.2026.113532_bib0017","doi-asserted-by":"crossref","DOI":"10.1016\/j.patcog.2023.110043","article-title":"Frequency-aware feature aggregation network with dual-task consistency for RGB-T salient object detection","volume":"146","author":"Zhou","year":"2024","journal-title":"Pattern Recogn."},{"issue":"34","key":"10.1016\/j.patcog.2026.113532_bib0018","doi-asserted-by":"crossref","first-page":"24389","DOI":"10.1007\/s00521-023-09024-8","article-title":"Multiple frequency-spatial network for RGB-T tracking in the presence of motion blur","volume":"35","author":"Fan","year":"2023","journal-title":"Neural Comput. Appl."},{"issue":"9","key":"10.1016\/j.patcog.2026.113532_bib0019","doi-asserted-by":"crossref","first-page":"15517","DOI":"10.1109\/JSEN.2024.3370144","article-title":"Learning multi-frequency integration network for RGB-T tracking","volume":"24","author":"Mei","year":"2024","journal-title":"IEEE Sensors J."},{"key":"10.1016\/j.patcog.2026.113532_bib0020","series-title":"Proc. IEEE\/CVF Conf. Comput. Vis. Pattern Recognit.","article-title":"TCTrack: Temporal contexts for aerial tracking","author":"Cao","year":"2022"},{"issue":"1","key":"10.1016\/j.patcog.2026.113532_bib0021","first-page":"360","article-title":"Visual tracking via dynamic memory networks","volume":"43","author":"Yang","year":"2021","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"issue":"3","key":"10.1016\/j.patcog.2026.113532_bib0022","first-page":"1224","article-title":"Aligned spatial\u2013temporal memory network for thermal infrared target tracking","volume":"70","author":"Yuan","year":"2023","journal-title":"IEEE Trans. Circuits Syst. II Express Briefs"},{"key":"10.1016\/j.patcog.2026.113532_bib0023","series-title":"Proc. Eur. Conf. Comput. Vis.","article-title":"XMem: long-term video object segmentation with an atkinson\u2013shiffrin memory model","author":"Cheng","year":"2022"},{"issue":"4","key":"10.1016\/j.patcog.2026.113532_bib0024","doi-asserted-by":"crossref","first-page":"155","DOI":"10.5214\/ans.0972.7531.200408","article-title":"Memory: a contribution to experimental psychology","volume":"20","author":"Ebbinghaus","year":"2013","journal-title":"Ann. Neurosci."},{"key":"10.1016\/j.patcog.2026.113532_bib0025","series-title":"Proc. IEEE\/CVF Conf. Comput. Vis. Pattern Recognit.","article-title":"ArkitTrack: a new diverse dataset for tracking using mobile RGB-D data","author":"Zhao","year":"2023"},{"key":"10.1016\/j.patcog.2026.113532_bib0026","series-title":"Proc. AAAI Conf. Artif. Intell.","article-title":"Attribute-based progressive fusion network for RGB-T tracking","author":"Xiao","year":"2022"},{"key":"10.1016\/j.patcog.2026.113532_bib0027","series-title":"Proc. AAAI Conf. Artif. Intell.","first-page":"2239","article-title":"SuTrack: towards simple and unified single object tracking","volume":"39","author":"Chen","year":"2025"},{"key":"10.1016\/j.patcog.2026.113532_bib0028","series-title":"Proc. IEEE Int. Conf. Comput. Vis.","article-title":"Learning spatio\u2013temporal transformer for visual tracking","author":"Yan","year":"2021"},{"key":"10.1016\/j.patcog.2026.113532_bib0029","series-title":"Proc. IEEE\/CVF Conf. Comput. Vis. Pattern Recognit.","article-title":"SeqTrack: sequence-to-sequence learning for visual object tracking","author":"Chen","year":"2023"},{"key":"10.1016\/j.patcog.2026.113532_bib0030","series-title":"Proc. AAAI Conf. Artif. Intell.","article-title":"ODTrack: online dense temporal token learning for visual tracking","author":"Zheng","year":"2024"},{"key":"10.1016\/j.patcog.2026.113532_bib0031","unstructured":"X. Chen, B. Kang, J. Zhu, D. Wang, H. Peng, H. Lu, Unified Sequence-to-Sequence Learning for Single- and Multi-Modal Visual Object Tracking, (2024). arXiv: 2404.00000."},{"key":"10.1016\/j.patcog.2026.113532_bib0032","series-title":"Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition","first-page":"15180","article-title":"Lighttrack: finding lightweight neural networks for object tracking via one-shot architecture search","author":"Yan","year":"2021"},{"key":"10.1016\/j.patcog.2026.113532_bib0033","series-title":"Proceedings of the IEEE\/CVF Winter Conference on Applications of Computer Vision","first-page":"1571","article-title":"Efficient visual tracking with exemplar transformers","author":"Blatter","year":"2023"},{"key":"10.1016\/j.patcog.2026.113532_bib0034","series-title":"Proceedings of the IEEE\/CVF International Conference on Computer Vision","first-page":"9612","article-title":"Exploring lightweight hierarchical vision transformers for efficient visual tracking","author":"Kang","year":"2023"},{"key":"10.1016\/j.patcog.2026.113532_bib0035","series-title":"European Conference on Computer Vision","first-page":"644","article-title":"FEAR: Fast, efficient, accurate and robust visual tracker","author":"Borsuk","year":"2022"},{"key":"10.1016\/j.patcog.2026.113532_bib0036","doi-asserted-by":"crossref","DOI":"10.1016\/j.patcog.2019.106977","article-title":"RGB-T object tracking: benchmark and baseline","volume":"96","author":"Li","year":"2019","journal-title":"Pattern Recognit."},{"key":"10.1016\/j.patcog.2026.113532_bib0037","series-title":"Proc. Eur. Conf. Comput. Vis.","article-title":"The tenth visual object tracking VOT2022 challenge results","author":"Kristan","year":"2022"},{"key":"10.1016\/j.patcog.2026.113532_bib0038","series-title":"Proc. AAAI Conf. Artif. Intell.","article-title":"Generative-based fusion mechanism for multi-modal tracking","author":"Tang","year":"2024"},{"issue":"11","key":"10.1016\/j.patcog.2026.113532_bib0039","doi-asserted-by":"crossref","first-page":"12059","DOI":"10.1109\/TCSVT.2024.3425455","article-title":"Transformer RGB-T tracking with spatio-temporal multimodal tokens","volume":"34","author":"Sun","year":"2024","journal-title":"IEEE Trans. Circuits Syst. Video Technol."},{"key":"10.1016\/j.patcog.2026.113532_bib0040","article-title":"RGB-T tracking via challenge-based appearance disentanglement and interaction","author":"Liu","year":"2024","journal-title":"IEEE Trans. Image Process."},{"key":"10.1016\/j.patcog.2026.113532_bib0041","series-title":"Proc. AAAI Conf. Artif. Intell.","first-page":"8682","article-title":"Cross-modulated attention transformer for RGB-T tracking","volume":"39","author":"Xiao","year":"2025"},{"key":"10.1016\/j.patcog.2026.113532_bib0042","doi-asserted-by":"crossref","DOI":"10.1016\/j.patcog.2024.110984","article-title":"UniRTL: a universal RGBT and low-light benchmark for object tracking","volume":"158","author":"Zhang","year":"2025","journal-title":"Pattern Recognit."},{"key":"10.1016\/j.patcog.2026.113532_bib0043","article-title":"RGBT tracking via supervised mutual guiding","author":"Liu","year":"2025","journal-title":"Pattern Recognit."},{"key":"10.1016\/j.patcog.2026.113532_bib0044","series-title":"Proc. ACM Int. Conf. Multimedia","article-title":"Prompting for multi-modal tracking","author":"Yang","year":"2022"},{"key":"10.1016\/j.patcog.2026.113532_bib0045","series-title":"Proc. IEEE\/CVF Conf. Comput. Vis. Pattern Recognit.","article-title":"Single-model and any-Modality for video object tracking","author":"Wu","year":"2024"},{"key":"10.1016\/j.patcog.2026.113532_bib0046","series-title":"Proc. AAAI Conf. Artif. Intell.","article-title":"Temporal adaptive RGB-T tracking with modality prompt","author":"Wang","year":"2024"},{"key":"10.1016\/j.patcog.2026.113532_bib0047","series-title":"Proc. AAAI Conf. Artif. Intell.","article-title":"Bi-Directional adapter for multimodal tracking","author":"Cao","year":"2024"},{"issue":"5","key":"10.1016\/j.patcog.2026.113532_bib0048","doi-asserted-by":"crossref","first-page":"2599","DOI":"10.1007\/s11263-024-02311-4","article-title":"Modality-Missing RGB-T tracking: invertible prompt learning and high-quality benchmarks","volume":"133","author":"Lu","year":"2025","journal-title":"Int. J. Comput. Vis."},{"issue":"7","key":"10.1016\/j.patcog.2026.113532_bib0049","doi-asserted-by":"crossref","first-page":"5847","DOI":"10.1109\/TPAMI.2025.3555485","article-title":"Cross-modality distillation for multi-modal tracking","volume":"47","author":"Zhang","year":"2025","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"10.1016\/j.patcog.2026.113532_bib0050","series-title":"Proc. Eur. Conf. Comput. Vis.","article-title":"Joint feature learning and relation modeling for tracking: a one-Stream framework","author":"Ye","year":"2022"},{"key":"10.1016\/j.patcog.2026.113532_bib0051","doi-asserted-by":"crossref","DOI":"10.1016\/j.patcog.2024.111053","article-title":"Temporal adaptive bidirectional bridging for RGB-D tracking","volume":"158","author":"Ying","year":"2025","journal-title":"Pattern Recogn."},{"key":"10.1016\/j.patcog.2026.113532_bib0052","series-title":"Proc. IEEE Int. Conf. Comput. Vis.","article-title":"Learning discriminative model prediction for tracking","author":"Bhat","year":"2019"},{"issue":"20","key":"10.1016\/j.patcog.2026.113532_bib0053","doi-asserted-by":"crossref","first-page":"23564","DOI":"10.1007\/s10489-023-04763-6","article-title":"SwinEFT: a robust and powerful swin transformer based event frame tracker","volume":"53","author":"Zeng","year":"2023","journal-title":"Appl. Intell."},{"key":"10.1016\/j.patcog.2026.113532_bib0054","doi-asserted-by":"crossref","DOI":"10.1016\/j.neunet.2024.106493","article-title":"Reliable object tracking by multimodal hybrid feature extraction and transformer-based fusion","volume":"178","author":"Sun","year":"2024","journal-title":"Neural Networks"},{"key":"10.1016\/j.patcog.2026.113532_bib0055","article-title":"Revisiting color-event based tracking: a unified network, dataset, and metric","author":"Tang","year":"2025","journal-title":"Pattern Recognit."}],"container-title":["Pattern Recognition"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/api.elsevier.com\/content\/article\/PII:S003132032600498X?httpAccept=text\/xml","content-type":"text\/xml","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/api.elsevier.com\/content\/article\/PII:S003132032600498X?httpAccept=text\/plain","content-type":"text\/plain","content-version":"vor","intended-application":"text-mining"}],"deposited":{"date-parts":[[2026,4,14]],"date-time":"2026-04-14T13:42:14Z","timestamp":1776174134000},"score":1,"resource":{"primary":{"URL":"https:\/\/linkinghub.elsevier.com\/retrieve\/pii\/S003132032600498X"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2026,11]]},"references-count":55,"alternative-id":["S003132032600498X"],"URL":"https:\/\/doi.org\/10.1016\/j.patcog.2026.113532","relation":{},"ISSN":["0031-3203"],"issn-type":[{"value":"0031-3203","type":"print"}],"subject":[],"published":{"date-parts":[[2026,11]]},"assertion":[{"value":"Elsevier","name":"publisher","label":"This article is maintained by"},{"value":"Learning frequency and memory-aware prompts for multi-modal object tracking","name":"articletitle","label":"Article Title"},{"value":"Pattern Recognition","name":"journaltitle","label":"Journal Title"},{"value":"https:\/\/doi.org\/10.1016\/j.patcog.2026.113532","name":"articlelink","label":"CrossRef DOI link to publisher maintained version"},{"value":"article","name":"content_type","label":"Content Type"},{"value":"\u00a9 2026 Elsevier Ltd. All rights are reserved, including those for text and data mining, AI training, and similar technologies.","name":"copyright","label":"Copyright"}],"article-number":"113532"}}