{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,2]],"date-time":"2026-04-02T05:19:48Z","timestamp":1775107188165,"version":"3.50.1"},"reference-count":59,"publisher":"Elsevier BV","license":[{"start":{"date-parts":[[2026,4,1]],"date-time":"2026-04-01T00:00:00Z","timestamp":1775001600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.elsevier.com\/tdm\/userlicense\/1.0\/"},{"start":{"date-parts":[[2026,4,1]],"date-time":"2026-04-01T00:00:00Z","timestamp":1775001600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.elsevier.com\/legal\/tdmrep-license"},{"start":{"date-parts":[[2026,4,1]],"date-time":"2026-04-01T00:00:00Z","timestamp":1775001600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-017"},{"start":{"date-parts":[[2026,4,1]],"date-time":"2026-04-01T00:00:00Z","timestamp":1775001600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"},{"start":{"date-parts":[[2026,4,1]],"date-time":"2026-04-01T00:00:00Z","timestamp":1775001600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-012"},{"start":{"date-parts":[[2026,4,1]],"date-time":"2026-04-01T00:00:00Z","timestamp":1775001600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2026,4,1]],"date-time":"2026-04-01T00:00:00Z","timestamp":1775001600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-004"}],"funder":[{"DOI":"10.13039\/501100013058","name":"Jiangsu Provincial Key Research and Development Program","doi-asserted-by":"publisher","award":["BE2022138"],"award-info":[{"award-number":["BE2022138"]}],"id":[{"id":"10.13039\/501100013058","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100012226","name":"Fundamental Research Funds for the Central Universities","doi-asserted-by":"publisher","award":["021714380026"],"award-info":[{"award-number":["021714380026"]}],"id":[{"id":"10.13039\/501100012226","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100013804","name":"Fundamental Research Funds for the Central Universities","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100013804","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62072232"],"award-info":[{"award-number":["62072232"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100011246","name":"State Key Laboratory of Novel Software Technology","doi-asserted-by":"publisher","award":["ZZKT2024B20"],"award-info":[{"award-number":["ZZKT2024B20"]}],"id":[{"id":"10.13039\/501100011246","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["elsevier.com","sciencedirect.com"],"crossmark-restriction":true},"short-container-title":["Image and Vision Computing"],"published-print":{"date-parts":[[2026,4]]},"DOI":"10.1016\/j.imavis.2026.105929","type":"journal-article","created":{"date-parts":[[2026,2,11]],"date-time":"2026-02-11T16:45:28Z","timestamp":1770828328000},"page":"105929","update-policy":"https:\/\/doi.org\/10.1016\/elsevier_cm_policy","source":"Crossref","is-referenced-by-count":0,"special_numbering":"C","title":["STIFormer: RGB-T tracking via Spatial\u2013Temporal Interaction Transformer"],"prefix":"10.1016","volume":"168","author":[{"ORCID":"https:\/\/orcid.org\/0009-0003-7697-9630","authenticated-orcid":false,"given":"Boyue","family":"Xu","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0009-0001-9065-968X","authenticated-orcid":false,"given":"Yaqun","family":"Fang","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0001-8111-7339","authenticated-orcid":false,"given":"Ruichao","family":"Hou","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-3092-424X","authenticated-orcid":false,"given":"Tongwei","family":"Ren","sequence":"additional","affiliation":[]}],"member":"78","reference":[{"issue":"4","key":"10.1016\/j.imavis.2026.105929_b1","doi-asserted-by":"crossref","first-page":"1","DOI":"10.1145\/3391743","article-title":"Video object segmentation and tracking: A survey","volume":"11","author":"Yao","year":"2020","journal-title":"ACM Trans. Intell. Syst. Technol."},{"key":"10.1016\/j.imavis.2026.105929_b2","unstructured":"T.-X. Xu, Y.-C. Guo, Y.-K. Lai, S.-H. Zhang, CXTrack: improving 3D point cloud tracking with contextual information, in: IEEE Conference on Computer Vision and Pattern Recognition, 2023."},{"key":"10.1016\/j.imavis.2026.105929_b3","doi-asserted-by":"crossref","unstructured":"H. Ye, J. Zhao, Y. Pan, W. Cherr, L. He, H. Zhang, Robot person following under partial occlusion, in: IEEE International Conference on Robotics and Automation, 2023.","DOI":"10.1109\/ICRA48891.2023.10160738"},{"key":"10.1016\/j.imavis.2026.105929_b4","series-title":"Empowering embodied visual tracking with visual foundation models and offline RL","author":"Zhong","year":"2024"},{"key":"10.1016\/j.imavis.2026.105929_b5","doi-asserted-by":"crossref","unstructured":"L. Zhou, Z. Zhou, K. Mao, Z. He, Joint visual grounding and tracking with natural language specification, in: IEEE Conference on Computer Vision and Pattern Recognition, 2023.","DOI":"10.1109\/CVPR52729.2023.02217"},{"key":"10.1016\/j.imavis.2026.105929_b6","first-page":"1","article-title":"Jointly modeling association and motion cues for robust infrared uav tracking","author":"Xu","year":"2024","journal-title":"Vis. Comput."},{"key":"10.1016\/j.imavis.2026.105929_b7","doi-asserted-by":"crossref","unstructured":"B. Xu, R. Hou, T. Ren, G. Wu, RGB-D Video Object Segmentation via Enhanced Multi-store Feature Memory, in: Proceedings of the International Conference on Multimedia Retrieval, 2024, pp. 1016\u20131024.","DOI":"10.1145\/3652583.3658036"},{"issue":"11","key":"10.1016\/j.imavis.2026.105929_b8","doi-asserted-by":"crossref","first-page":"775","DOI":"10.1007\/s10489-025-06658-0","article-title":"X modality assisting RGBT object tracking","volume":"55","author":"Ding","year":"2025","journal-title":"Appl. Intell."},{"key":"10.1016\/j.imavis.2026.105929_b9","doi-asserted-by":"crossref","DOI":"10.1109\/LSP.2023.3316021","article-title":"Region selective fusion network for robust rgb-t tracking","author":"Yu","year":"2023","journal-title":"IEEE Signal Process. Lett."},{"key":"10.1016\/j.imavis.2026.105929_b10","article-title":"Dynamic feature-memory transformer network for RGBT tracking","author":"Li","year":"2023","journal-title":"IEEE Sensors J."},{"key":"10.1016\/j.imavis.2026.105929_b11","first-page":"1","article-title":"Rgbt image fusion tracking via sparse trifurcate transformer aggregation network","volume":"73","author":"Feng","year":"2024","journal-title":"IEEE Trans. Instrum. Meas."},{"key":"10.1016\/j.imavis.2026.105929_b12","article-title":"Rgbt tracking via progressive fusion transformer with dynamically guided learning","author":"Zhu","year":"2024","journal-title":"IEEE Trans. Circuits Syst. Video Technol."},{"key":"10.1016\/j.imavis.2026.105929_b13","doi-asserted-by":"crossref","unstructured":"R. Hou, B. Xu, T. Ren, G. Wu, MTNet: learning modality-aware representation with transformer for RGBT tracking, in: IEEE International Conference on Multimedia and Expo, 2023, pp. 1163\u20131168.","DOI":"10.1109\/ICME55011.2023.00203"},{"key":"10.1016\/j.imavis.2026.105929_b14","doi-asserted-by":"crossref","unstructured":"H. Wang, X. Liu, Y. Li, M. Sun, D. Yuan, J. Liu, Temporal adaptive rgbt tracking with modality prompt, in: Proceedings of the AAAI Conference on Artificial Intelligence Conference on Artificial Intelligence, 2024.","DOI":"10.1609\/aaai.v38i6.28352"},{"key":"10.1016\/j.imavis.2026.105929_b15","doi-asserted-by":"crossref","unstructured":"X. Chen, H. Peng, D. Wang, H. Lu, H. Hu, Seqtrack: Sequence to sequence learning for visual object tracking, in: IEEE Conference on Computer Vision and Pattern Recognition, 2023, pp. 14572\u201314581.","DOI":"10.1109\/CVPR52729.2023.01400"},{"key":"10.1016\/j.imavis.2026.105929_b16","series-title":"Association for the Advance of Artificial Intelligence","article-title":"Odtrack: Online dense temporal token learning for visual tracking","author":"Zheng","year":"2024"},{"key":"10.1016\/j.imavis.2026.105929_b17","doi-asserted-by":"crossref","unstructured":"I. Jung, J. Son, M. Baek, B. Han, Real-time mdnet, in: Proceedings of the European Conference on Computer Vision (European Conference on Computer Vision), 2018, pp. 83\u201398.","DOI":"10.1007\/978-3-030-01225-0_6"},{"key":"10.1016\/j.imavis.2026.105929_b18","doi-asserted-by":"crossref","unstructured":"H. Nam, B. Han, Learning multi-domain convolutional neural networks for visual tracking, in: IEEE Conference on Computer Vision and Pattern Recognition, 2016, pp. 4293\u20134302.","DOI":"10.1109\/CVPR.2016.465"},{"key":"10.1016\/j.imavis.2026.105929_b19","first-page":"1","article-title":"Duality-gated mutual condition network for RGBT tracking","author":"Lu","year":"2022","journal-title":"IEEE Trans. Neural Networks Learn. Syst."},{"key":"10.1016\/j.imavis.2026.105929_b20","doi-asserted-by":"crossref","unstructured":"R. Hou, T. Ren, G. Wu, Mirnet: A robust rgbt tracking jointly with multi-modal interaction and refinement, in: IEEE International Conference on Multimedia and Expo, 2022, pp. 1\u20136.","DOI":"10.1109\/ICME52920.2022.9860018"},{"key":"10.1016\/j.imavis.2026.105929_b21","series-title":"Association for the Advancement of Artificial Intelligence","first-page":"2831","article-title":"Attribute-based progressive fusion network for rgbt tracking","author":"Xiao","year":"2022"},{"key":"10.1016\/j.imavis.2026.105929_b22","article-title":"RGBT tracking via challenge-based appearance disentanglement and interaction","author":"Liu","year":"2024","journal-title":"IEEE Trans. Image Process."},{"key":"10.1016\/j.imavis.2026.105929_b23","doi-asserted-by":"crossref","unstructured":"L. Bertinetto, J. Valmadre, J.F. Henriques, A. Vedaldi, P.H. Torr, Fully-convolutional siamese networks for object tracking, in: European Conference on Computer Vision, 2016, pp. 850\u2013865.","DOI":"10.1007\/978-3-319-48881-3_56"},{"issue":"4","key":"10.1016\/j.imavis.2026.105929_b24","first-page":"1","article-title":"Robust rgb-t tracking via adaptive modality weight correlation filters and cross-modality learning","volume":"20","author":"Zhou","year":"2023","journal-title":"ACM Trans. Multimed. Comput. Commun. Appl."},{"issue":"9","key":"10.1016\/j.imavis.2026.105929_b25","doi-asserted-by":"crossref","first-page":"3281","DOI":"10.1007\/s13042-023-01833-6","article-title":"Siamese infrared and visible light fusion network for rgb-t tracking","volume":"14","author":"Peng","year":"2023","journal-title":"Int. J. Mach. Learn. Cybern."},{"key":"10.1016\/j.imavis.2026.105929_b26","doi-asserted-by":"crossref","DOI":"10.1016\/j.knosys.2022.108945","article-title":"Learning reliable modal weight with transformer for robust rgbt tracking","volume":"249","author":"Feng","year":"2022","journal-title":"Knowl.-Based Syst."},{"key":"10.1016\/j.imavis.2026.105929_b27","doi-asserted-by":"crossref","first-page":"3378","DOI":"10.1109\/TMM.2023.3310295","article-title":"Learning multi-layer attention aggregation siamese network for robust RGBT tracking","volume":"26","author":"Feng","year":"2024","journal-title":"IEEE Trans. Multimed."},{"key":"10.1016\/j.imavis.2026.105929_b28","doi-asserted-by":"crossref","first-page":"50","DOI":"10.1109\/TMM.2021.3120873","article-title":"EAPT: efficient attention pyramid transformer for image processing","volume":"25","author":"Lin","year":"2021","journal-title":"IEEE Trans. Multimed."},{"key":"10.1016\/j.imavis.2026.105929_b29","series-title":"Advances in Neural Information Processing Systems","article-title":"Attention is all you need","author":"Vaswani","year":"2017"},{"key":"10.1016\/j.imavis.2026.105929_b30","doi-asserted-by":"crossref","unstructured":"T. Meinhardt, A. Kirillov, L. Leal-Taixe, C. Feichtenhofer, Trackformer: Multi-object tracking with transformers, in: IEEE Conference on Computer Vision and Pattern Recognition, 2022, pp. 8844\u20138854.","DOI":"10.1109\/CVPR52688.2022.00864"},{"key":"10.1016\/j.imavis.2026.105929_b31","series-title":"Advances in Neural Information Processing Systems","first-page":"16743","article-title":"Swintrack: A simple and strong baseline for transformer tracking","author":"Lin","year":"2022"},{"key":"10.1016\/j.imavis.2026.105929_b32","series-title":"Advances in Neural Information Processing Systems","article-title":"Mixformerv2: Efficient fully transformer tracking","author":"Cui","year":"2024"},{"key":"10.1016\/j.imavis.2026.105929_b33","doi-asserted-by":"crossref","unstructured":"Y. Cui, C. Jiang, L. Wang, G. Wu, Mixformer: End-to-end tracking with iterative mixed attention, in: IEEE Conference on Computer Vision and Pattern Recognition, 2022, pp. 13608\u201313618.","DOI":"10.1109\/CVPR52688.2022.01324"},{"key":"10.1016\/j.imavis.2026.105929_b34","doi-asserted-by":"crossref","unstructured":"B. Ye, H. Chang, B. Ma, S. Shan, X. Chen, Joint feature learning and relation modeling for tracking: A one-stream framework, in: European Conference on Computer Vision, 2022.","DOI":"10.1007\/978-3-031-20047-2_20"},{"key":"10.1016\/j.imavis.2026.105929_b35","doi-asserted-by":"crossref","unstructured":"B. Yan, H. Peng, J. Fu, D. Wang, H. Lu, Learning Spatio\u2013Temporal Transformer for Visual Tracking, in: Proceedings of IEEE International Conference on Computer Vision, 2021.","DOI":"10.1109\/ICCV48922.2021.01028"},{"issue":"1","key":"10.1016\/j.imavis.2026.105929_b36","first-page":"360","article-title":"Visual tracking via dynamic memory networks","volume":"43","author":"Yang","year":"2021","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"10.1016\/j.imavis.2026.105929_b37","doi-asserted-by":"crossref","unstructured":"Z. Cao, Z. Huang, L. Pan, S. Zhang, Z. Liu, C. Fu, TCTrack: Temporal Contexts for Aerial Tracking, in: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, 2022.","DOI":"10.1109\/CVPR52688.2022.01438"},{"issue":"3","key":"10.1016\/j.imavis.2026.105929_b38","first-page":"1224","article-title":"Aligned spatial\u2013temporal memory network for thermal infrared target tracking","volume":"70","author":"Yuan","year":"2023","journal-title":"IEEE Trans. Circuits Syst."},{"key":"10.1016\/j.imavis.2026.105929_b39","doi-asserted-by":"crossref","unstructured":"X. Chen, H. Peng, D. Wang, H. Lu, H. Hu, SeqTrack: Sequence-to-Sequence Learning for Visual Object Tracking, in: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, 2023.","DOI":"10.1109\/CVPR52729.2023.01400"},{"key":"10.1016\/j.imavis.2026.105929_b40","series-title":"Unified sequence-to-sequence learning for single- and multi-modal visual object tracking","author":"Chen","year":"2024"},{"key":"10.1016\/j.imavis.2026.105929_b41","doi-asserted-by":"crossref","unstructured":"X. Hou, J. Xing, Y. Qian, Y. Guo, S. Xin, J. Chen, K. Tang, M. Wang, Z. Jiang, L. Liu, et al., Sdstrack: Self-Distillation Symmetric Adapter Learning for Multi-Modal Visual Object Tracking, in: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, 2024.","DOI":"10.1109\/CVPR52733.2024.02507"},{"key":"10.1016\/j.imavis.2026.105929_b42","doi-asserted-by":"crossref","first-page":"4303","DOI":"10.1109\/TIP.2024.3428316","article-title":"Exploring multi-modal spatial\u2013temporal contexts for high-performance RGB-t tracking","volume":"33","author":"Zhang","year":"2024","journal-title":"IEEE Trans. Image Process."},{"issue":"13","key":"10.1016\/j.imavis.2026.105929_b43","doi-asserted-by":"crossref","first-page":"25386","DOI":"10.1109\/JSEN.2025.3575188","article-title":"Transformer-based RGBT tracking with spatio-temporal information fusion","volume":"25","author":"Yuan","year":"2025","journal-title":"IEEE Sensors J."},{"issue":"11","key":"10.1016\/j.imavis.2026.105929_b44","doi-asserted-by":"crossref","first-page":"12059","DOI":"10.1109\/TCSVT.2024.3425455","article-title":"Transformer rgb-t tracking with spatio-temporal multimodal tokens","volume":"34","author":"Sun","year":"2024","journal-title":"IEEE Trans. Circuits Syst. Video Technol."},{"key":"10.1016\/j.imavis.2026.105929_b45","doi-asserted-by":"crossref","DOI":"10.1016\/j.patcog.2019.106977","article-title":"RGB-t object tracking: Benchmark and baseline","volume":"96","author":"Li","year":"2019","journal-title":"Pattern Recognit."},{"key":"10.1016\/j.imavis.2026.105929_b46","doi-asserted-by":"crossref","unstructured":"T. Hui, Z. Xun, F. Peng, J. Huang, X. Wei, X. Wei, J. Dai, J. Han, S. Liu, Bridging Search Region Interaction with Template for RGB-T Tracking, in: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, 2023.","DOI":"10.1109\/CVPR52729.2023.01310"},{"key":"10.1016\/j.imavis.2026.105929_b47","doi-asserted-by":"crossref","unstructured":"J. Yang, Z. Li, F. Zheng, A. Leonardis, J. Song, Prompting for Multi-Modal Tracking, in: Proceedings of the ACM International Conference on Multimedia, 2022.","DOI":"10.1145\/3503161.3547851"},{"key":"10.1016\/j.imavis.2026.105929_b48","doi-asserted-by":"crossref","unstructured":"J. Zhu, S. Lai, X. Chen, D. Wang, H. Lu, Visual prompt multi-modal tracking, in: IEEE Conference on Computer Vision and Pattern Recognition, 2023, pp. 9516\u20139526.","DOI":"10.1109\/CVPR52729.2023.00918"},{"key":"10.1016\/j.imavis.2026.105929_b49","doi-asserted-by":"crossref","unstructured":"Z. Tang, T. Xu, X. Wu, X.-F. Zhu, J. Kittler, Generative-Based Fusion Mechanism for Multi-Modal Tracking, in: Proceedings of the AAAI Conference on Artificial Intelligence, 2024.","DOI":"10.1609\/aaai.v38i6.28325"},{"key":"10.1016\/j.imavis.2026.105929_b50","doi-asserted-by":"crossref","unstructured":"Z. Wu, J. Zheng, X. Ren, F.-A. Vasluianu, C. Ma, D.P. Paudel, L. Van Gool, R. Timofte, Single-Model and Any-Modality for Video Object Tracking, in: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, 2024.","DOI":"10.1109\/CVPR52733.2024.01812"},{"key":"10.1016\/j.imavis.2026.105929_b51","doi-asserted-by":"crossref","unstructured":"L. Hong, S. Yan, R. Zhang, W. Li, X. Zhou, P. Guo, K. Jiang, Y. Chen, J. Li, Z. Chen, et al., Onetracker: Unifying Visual Object Tracking with Foundation Models and Efficient Tuning, in: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, 2024.","DOI":"10.1109\/CVPR52733.2024.01805"},{"key":"10.1016\/j.imavis.2026.105929_b52","doi-asserted-by":"crossref","unstructured":"B. Cao, J. Guo, P. Zhu, Q. Hu, Bi-Directional Adapter for Multimodal Tracking, in: Proceedings of the AAAI Conference on Artificial Intelligence, 2024.","DOI":"10.1609\/aaai.v38i2.27852"},{"issue":"5","key":"10.1016\/j.imavis.2026.105929_b53","doi-asserted-by":"crossref","first-page":"2599","DOI":"10.1007\/s11263-024-02311-4","article-title":"Modality-missing rgb-t tracking: Invertible prompt learning and high-quality benchmarks","volume":"133","author":"Lu","year":"2025","journal-title":"Int. J. Comput. Vis."},{"issue":"7","key":"10.1016\/j.imavis.2026.105929_b54","doi-asserted-by":"crossref","first-page":"5847","DOI":"10.1109\/TPAMI.2025.3555485","article-title":"Cross-modality distillation for multi-modal tracking","volume":"47","author":"Zhang","year":"2025","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"10.1016\/j.imavis.2026.105929_b55","series-title":"Proceedings of the AAAI Conference on Artificial Intelligence","first-page":"8682","article-title":"Cross-modulated attention transformer for rgb-t tracking","volume":"vol. 39","author":"Xiao","year":"2025"},{"key":"10.1016\/j.imavis.2026.105929_b56","doi-asserted-by":"crossref","first-page":"392","DOI":"10.1109\/TIP.2021.3130533","article-title":"LasHeR: A large-scale high-diversity benchmark for RGBT tracking","volume":"31","author":"Li","year":"2021","journal-title":"IEEE Trans. Image Process."},{"key":"10.1016\/j.imavis.2026.105929_b57","doi-asserted-by":"crossref","unstructured":"P. Zhang, J. Zhao, D. Wang, H. Lu, X. Ruan, Visible-thermal UAV tracking: A large-scale benchmark and new baseline, in: IEEE Conference on Computer Vision and Pattern Recognition, 2022, pp. 8886\u20138895.","DOI":"10.1109\/CVPR52688.2022.00868"},{"key":"10.1016\/j.imavis.2026.105929_b58","doi-asserted-by":"crossref","unstructured":"K. He, X. Chen, S. Xie, Y. Li, P. Doll\u00e1r, R. Girshick, Masked autoencoders are scalable vision learners, in: IEEE Conference on Computer Vision and Pattern Recognition, 2022, pp. 16000\u201316009.","DOI":"10.1109\/CVPR52688.2022.01553"},{"key":"10.1016\/j.imavis.2026.105929_b59","doi-asserted-by":"crossref","unstructured":"Y. Zheng, B. Zhong, Q. Liang, Z. Mo, S. Zhang, X. Li, ODTrack: Online Dense Temporal Token Learning for Visual Tracking, in: Proceedings of the AAAI Conference on Artificial Intelligence, 2024.","DOI":"10.1609\/aaai.v38i7.28591"}],"container-title":["Image and Vision Computing"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/api.elsevier.com\/content\/article\/PII:S0262885626000351?httpAccept=text\/xml","content-type":"text\/xml","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/api.elsevier.com\/content\/article\/PII:S0262885626000351?httpAccept=text\/plain","content-type":"text\/plain","content-version":"vor","intended-application":"text-mining"}],"deposited":{"date-parts":[[2026,4,2]],"date-time":"2026-04-02T03:51:04Z","timestamp":1775101864000},"score":1,"resource":{"primary":{"URL":"https:\/\/linkinghub.elsevier.com\/retrieve\/pii\/S0262885626000351"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2026,4]]},"references-count":59,"alternative-id":["S0262885626000351"],"URL":"https:\/\/doi.org\/10.1016\/j.imavis.2026.105929","relation":{},"ISSN":["0262-8856"],"issn-type":[{"value":"0262-8856","type":"print"}],"subject":[],"published":{"date-parts":[[2026,4]]},"assertion":[{"value":"Elsevier","name":"publisher","label":"This article is maintained by"},{"value":"STIFormer: RGB-T tracking via Spatial\u2013Temporal Interaction Transformer","name":"articletitle","label":"Article Title"},{"value":"Image and Vision Computing","name":"journaltitle","label":"Journal Title"},{"value":"https:\/\/doi.org\/10.1016\/j.imavis.2026.105929","name":"articlelink","label":"CrossRef DOI link to publisher maintained version"},{"value":"article","name":"content_type","label":"Content Type"},{"value":"\u00a9 2026 Elsevier B.V. All rights are reserved, including those for text and data mining, AI training, and similar technologies.","name":"copyright","label":"Copyright"}],"article-number":"105929"}}