{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,26]],"date-time":"2026-02-26T08:31:17Z","timestamp":1772094677258,"version":"3.50.1"},"reference-count":70,"publisher":"Elsevier BV","license":[{"start":{"date-parts":[[2026,3,1]],"date-time":"2026-03-01T00:00:00Z","timestamp":1772323200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.elsevier.com\/tdm\/userlicense\/1.0\/"},{"start":{"date-parts":[[2026,3,1]],"date-time":"2026-03-01T00:00:00Z","timestamp":1772323200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.elsevier.com\/legal\/tdmrep-license"},{"start":{"date-parts":[[2026,3,1]],"date-time":"2026-03-01T00:00:00Z","timestamp":1772323200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-017"},{"start":{"date-parts":[[2026,3,1]],"date-time":"2026-03-01T00:00:00Z","timestamp":1772323200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"},{"start":{"date-parts":[[2026,3,1]],"date-time":"2026-03-01T00:00:00Z","timestamp":1772323200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-012"},{"start":{"date-parts":[[2026,3,1]],"date-time":"2026-03-01T00:00:00Z","timestamp":1772323200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2026,3,1]],"date-time":"2026-03-01T00:00:00Z","timestamp":1772323200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-004"}],"content-domain":{"domain":["elsevier.com","sciencedirect.com"],"crossmark-restriction":true},"short-container-title":["Knowledge-Based Systems"],"published-print":{"date-parts":[[2026,3]]},"DOI":"10.1016\/j.knosys.2026.115369","type":"journal-article","created":{"date-parts":[[2026,1,21]],"date-time":"2026-01-21T16:43:35Z","timestamp":1769013815000},"page":"115369","update-policy":"https:\/\/doi.org\/10.1016\/elsevier_cm_policy","source":"Crossref","is-referenced-by-count":0,"special_numbering":"C","title":["AdaptTrack: Perception field adaptation with contrastive attention for robust visual tracking"],"prefix":"10.1016","volume":"337","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-3627-1465","authenticated-orcid":false,"given":"Yongjun","family":"Wang","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0009-0004-6401-218X","authenticated-orcid":false,"given":"Xiaohui","family":"Hao","sequence":"additional","affiliation":[]}],"member":"78","reference":[{"key":"10.1016\/j.knosys.2026.115369_bib0001","series-title":"Proceedings of the European Conference on Computer Vision","first-page":"146","article-title":"Aiatrack: attention in attention for transformer visual tracking","author":"Gao","year":"2022"},{"key":"10.1016\/j.knosys.2026.115369_bib0002","doi-asserted-by":"crossref","DOI":"10.1016\/j.knosys.2024.111562","article-title":"ASAFormer: visual tracking with convolutional vision transformer and asymmetric selective attention","volume":"291","author":"Gong","year":"2024","journal-title":"Knowl. Based Syst."},{"key":"10.1016\/j.knosys.2026.115369_bib0003","doi-asserted-by":"crossref","DOI":"10.1016\/j.knosys.2024.112184","article-title":"Exploring the complementarity between convolution and transformer matching for visual tracking","volume":"300","author":"Wang","year":"2024","journal-title":"Knowl. Based Syst."},{"key":"10.1016\/j.knosys.2026.115369_bib0004","doi-asserted-by":"crossref","DOI":"10.1016\/j.knosys.2024.111368","article-title":"Efficient correlation information mixer for visual object tracking","volume":"285","author":"Chen","year":"2024","journal-title":"Knowl. Based Syst."},{"key":"10.1016\/j.knosys.2026.115369_bib0005","series-title":"Proceedings of the IEEE\/CVF International Conference on Computer Vision","first-page":"10117","article-title":"Foreground-background distribution modeling transformer for visual object tracking","author":"Yang","year":"2023"},{"key":"10.1016\/j.knosys.2026.115369_bib0006","series-title":"Proceedings of the IEEE\/CVF Winter Conference on Applications of Computer Vision","first-page":"6708","article-title":"Separable self and mixed attention transformers for efficient object tracking","author":"Gopal","year":"2024"},{"key":"10.1016\/j.knosys.2026.115369_bib0007","first-page":"7588","article-title":"Online dense temporal token learning for visual tracking","volume":"38","author":"Zheng","year":"2024","journal-title":"Proc. AAAI Conf. Artif. Intell."},{"key":"10.1016\/j.knosys.2026.115369_bib0008","doi-asserted-by":"crossref","DOI":"10.1016\/j.knosys.2023.111206","article-title":"Joint spatio-temporal modeling for visual tracking","volume":"283","author":"Sun","year":"2024","journal-title":"Knowl. Based Syst."},{"key":"10.1016\/j.knosys.2026.115369_bib0009","series-title":"Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition","first-page":"19300","article-title":"Autoregressive queries for adaptive tracking with spatio-temporal transformers","author":"Xie","year":"2024"},{"key":"10.1016\/j.knosys.2026.115369_bib0010","first-page":"90579","article-title":"DeTrack: in-model latent denoising learning for visual object tracking","volume":"37","author":"Zhou","year":"2024","journal-title":"Adv. Neural Inf. Process. Syst."},{"key":"10.1016\/j.knosys.2026.115369_bib0011","doi-asserted-by":"crossref","DOI":"10.1016\/j.knosys.2023.111025","article-title":"Multiple templates transformer for visual object tracking","volume":"280","author":"Pang","year":"2023","journal-title":"Knowl. Based Syst."},{"key":"10.1016\/j.knosys.2026.115369_bib0012","series-title":"Proceedings of the IEEE\/CVF International Conference on Computer Vision","first-page":"9589","article-title":"Robust object modeling for visual tracking","author":"Cai","year":"2023"},{"key":"10.1016\/j.knosys.2026.115369_bib0013","series-title":"2017 IEEE 60th International Midwest Symposium on Circuits and Systems (MWSCAS)","first-page":"1597","article-title":"Gate-variants of gated recurrent unit (gru) neural networks","author":"Dey","year":"2017"},{"key":"10.1016\/j.knosys.2026.115369_bib0014","series-title":"Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition","first-page":"8126","article-title":"Transformer tracking","author":"Chen","year":"2021"},{"key":"10.1016\/j.knosys.2026.115369_bib0015","series-title":"Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition","first-page":"13608","article-title":"MixFormer: end-to-end tracking with iterative mixed attention","author":"Cui","year":"2022"},{"key":"10.1016\/j.knosys.2026.115369_bib0016","first-page":"16743","article-title":"SwinTrack: a simple and strong baseline for transformer tracking","volume":"35","author":"Lin","year":"2022","journal-title":"Adv. Neural Inf. Process. Syst."},{"key":"10.1016\/j.knosys.2026.115369_bib0017","doi-asserted-by":"crossref","first-page":"7554","DOI":"10.1109\/TCSVT.2025.3549953","article-title":"Avltrack: dynamic sparse learning for aerial vision-language tracking","volume":"35","author":"Xue","year":"2025","journal-title":"IEEE Trans. Circuits Syst. Video Technol."},{"issue":"11","key":"10.1016\/j.knosys.2026.115369_bib0018","doi-asserted-by":"crossref","first-page":"10845","DOI":"10.1109\/TCSVT.2024.3411301","article-title":"Consistent representation mining for multi-drone single object tracking","volume":"34","author":"Xue","year":"2024","journal-title":"IEEE Trans. Circuits Syst. Video Technol."},{"key":"10.1016\/j.knosys.2026.115369_bib0019","first-page":"1","article-title":"SmallTrack: wavelet pooling and graph enhanced classification for uav small object tracking","volume":"61","author":"Xue","year":"2023","journal-title":"IEEE Trans. Geosci. Remote Sens."},{"key":"10.1016\/j.knosys.2026.115369_bib0020","first-page":"12519","article-title":"Sequential fusion based multi-granularity consistency for space-time transformer tracking","volume":"38","author":"Hu","year":"2024","journal-title":"Proc. AAAI Conf. Artif. Intell."},{"key":"10.1016\/j.knosys.2026.115369_bib0021","series-title":"Proceedings of the IEEE\/CVF International Conference on Computer Vision","first-page":"9612","article-title":"Exploring lightweight hierarchical vision transformers for efficient visual tracking","author":"Kang","year":"2023"},{"key":"10.1016\/j.knosys.2026.115369_bib0022","series-title":"Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition","first-page":"8761","article-title":"Global tracking via ensemble of local trackers","author":"Zhou","year":"2022"},{"key":"10.1016\/j.knosys.2026.115369_bib0023","first-page":"58736","article-title":"MixFormerV2: efficient fully transformer tracking","volume":"36","author":"Cui","year":"2023","journal-title":"Adv. Neural Inf. Process. Syst."},{"key":"10.1016\/j.knosys.2026.115369_bib0024","series-title":"Proceedings of the European Conference on Computer Vision","first-page":"300","article-title":"Tracking meets lora: faster training, larger model, stronger performance","author":"Lin","year":"2024"},{"key":"10.1016\/j.knosys.2026.115369_bib0025","series-title":"Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition","first-page":"9697","article-title":"Autoregressive visual tracking","author":"Wei","year":"2023"},{"key":"10.1016\/j.knosys.2026.115369_bib0026","series-title":"Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition","first-page":"19048","article-title":"Artrackv2: prompting autoregressive tracker where to look and how to describe","author":"Bai","year":"2024"},{"key":"10.1016\/j.knosys.2026.115369_bib0027","series-title":"Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition","first-page":"14572","article-title":"SeqTrack: sequence to sequence learning for visual object tracking","author":"Chen","year":"2023"},{"key":"10.1016\/j.knosys.2026.115369_bib0028","doi-asserted-by":"crossref","DOI":"10.1016\/j.knosys.2024.112742","article-title":"Online learning discriminative sparse convolution networks for robust uav object tracking","volume":"308","author":"Xu","year":"2025","journal-title":"Knowl. Based Syst."},{"key":"10.1016\/j.knosys.2026.115369_bib0029","series-title":"Proceedings of the European Conference on Computer Vision","first-page":"319","article-title":"DiFF-tracker: text-to-image diffusion models are unsupervised trackers","author":"Zhang","year":"2024"},{"key":"10.1016\/j.knosys.2026.115369_bib0030","series-title":"International Conference on Learning Representations","article-title":"An image is worth 16x16 words: transformers for image recognition at scale","author":"Dosovitskiy","year":"2021"},{"key":"10.1016\/j.knosys.2026.115369_bib0031","series-title":"Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition","first-page":"5374","article-title":"Lasot: a high-quality benchmark for large-scale single object tracking","author":"Fan","year":"2019"},{"key":"10.1016\/j.knosys.2026.115369_bib0032","doi-asserted-by":"crossref","first-page":"211","DOI":"10.1007\/s11263-015-0816-y","article-title":"ImageNet large scale visual recognition challenge","volume":"115","author":"Russakovsky","year":"2015","journal-title":"Int. J. Comput. Vis."},{"key":"10.1016\/j.knosys.2026.115369_bib0033","series-title":"Proceedings of the European Conference on Computer Vision","first-page":"740","article-title":"Microsoft coco: common objects in context","author":"Lin","year":"2014"},{"key":"10.1016\/j.knosys.2026.115369_bib0034","series-title":"Proceedings of the European Conference on Computer Vision","first-page":"300","article-title":"Trackingnet: a large-scale dataset and benchmark for object tracking in the wild","author":"Muller","year":"2018"},{"issue":"5","key":"10.1016\/j.knosys.2026.115369_bib0035","doi-asserted-by":"crossref","first-page":"1562","DOI":"10.1109\/TPAMI.2019.2957464","article-title":"Got-10k: a large high-diversity benchmark for generic object tracking in the wild","volume":"43","author":"Huang","year":"2019","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"10.1016\/j.knosys.2026.115369_bib0036","series-title":"Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition","first-page":"4282","article-title":"Siamrpn : evolution of siamese visual tracking with very deep networks","author":"Li","year":"2019"},{"key":"10.1016\/j.knosys.2026.115369_bib0037","series-title":"Proceedings of the IEEE\/CVF International Conference on Computer Vision","first-page":"6182","article-title":"Learning discriminative model prediction for tracking","author":"Bhat","year":"2019"},{"key":"10.1016\/j.knosys.2026.115369_bib0038","first-page":"12549","article-title":"SiamFC : towards robust and accurate visual tracking with target estimation guidelines","volume":"34","author":"Xu","year":"2020","journal-title":"Proc. AAAI Conf. Artif. Intell."},{"key":"10.1016\/j.knosys.2026.115369_bib0039","series-title":"Proceedings of the European Conference on Computer Vision","first-page":"205","article-title":"Know your surroundings: exploiting scene information for object tracking","author":"Bhat","year":"2020"},{"key":"10.1016\/j.knosys.2026.115369_bib0040","series-title":"Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition","first-page":"6269","article-title":"Siamcar: siamese fully convolutional classification and regression for visual tracking","author":"Guo","year":"2020"},{"key":"10.1016\/j.knosys.2026.115369_bib0041","series-title":"Proceedings of the European Conference on Computer Vision","first-page":"771","article-title":"Ocean: object-aware anchor-free tracking","author":"Zhang","year":"2020"},{"key":"10.1016\/j.knosys.2026.115369_bib0042","series-title":"Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition","first-page":"6578","article-title":"Siam r-cnn: visual tracking by re-detection","author":"Voigtlaender","year":"2020"},{"key":"10.1016\/j.knosys.2026.115369_bib0043","series-title":"Proceedings of the IEEE\/CVF International Conference on Computer Vision","first-page":"13339","article-title":"Learn to match: automatic matching network design for visual tracking","author":"Zhang","year":"2021"},{"key":"10.1016\/j.knosys.2026.115369_bib0044","series-title":"Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition","first-page":"1571","article-title":"Transformer meets tracker: exploiting temporal context for robust visual tracking","author":"Wang","year":"2021"},{"key":"10.1016\/j.knosys.2026.115369_bib0045","series-title":"Proceedings of the IEEE\/CVF International Conference on Computer Vision","first-page":"9856","article-title":"High-performance discriminative tracking with transformers","author":"Yu","year":"2021"},{"key":"10.1016\/j.knosys.2026.115369_bib0046","series-title":"Proceedings of the IEEE\/CVF International Conference on Computer Vision","first-page":"10448","article-title":"Learning spatio-temporal transformer for visual tracking","author":"Yan","year":"2021"},{"key":"10.1016\/j.knosys.2026.115369_bib0047","series-title":"Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition","first-page":"8751","article-title":"Correlation-aware deep tracking","author":"Xie","year":"2022"},{"key":"10.1016\/j.knosys.2026.115369_bib0048","series-title":"Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition","first-page":"8731","article-title":"Transforming model prediction for tracking","author":"Mayer","year":"2022"},{"key":"10.1016\/j.knosys.2026.115369_bib0049","series-title":"Proceedings of the European Conference on Computer Vision","first-page":"733","article-title":"Towards grand unification of object tracking","author":"Yan","year":"2022"},{"key":"10.1016\/j.knosys.2026.115369_bib0050","series-title":"Proceedings of the European Conference on Computer Vision","first-page":"375","article-title":"Backbone is all your need: a simplified architecture for visual object tracking","author":"Chen","year":"2022"},{"key":"10.1016\/j.knosys.2026.115369_bib0051","series-title":"Proceedings of the European Conference on Computer Vision","first-page":"341","article-title":"Joint feature learning and relation modeling for tracking: a one-stream framework","author":"Ye","year":"2022"},{"key":"10.1016\/j.knosys.2026.115369_bib0052","series-title":"Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition","first-page":"18686","article-title":"Generalized relation modeling for transformer tracking","author":"Gao","year":"2023"},{"key":"10.1016\/j.knosys.2026.115369_bib0053","series-title":"Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition","first-page":"22826","article-title":"Videotrack: learning to track objects via video transformer","author":"Xie","year":"2023"},{"key":"10.1016\/j.knosys.2026.115369_bib0054","series-title":"Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition","first-page":"17","article-title":"Point set diffusion model for visual object tracking","author":"Xie","year":"2024"},{"key":"10.1016\/j.knosys.2026.115369_bib0055","first-page":"39303","article-title":"ChatTracker: enhancing visual tracking performance via chatting with multimodal large language model","volume":"37","author":"Sun","year":"2025","journal-title":"Adv. Neural Inf. Process. Syst."},{"key":"10.1016\/j.knosys.2026.115369_bib0056","first-page":"4838","article-title":"Explicit visual prompts for visual object tracking","volume":"38","author":"Shi","year":"2024","journal-title":"Proc. AAAI Conf. Artif. Intell."},{"key":"10.1016\/j.knosys.2026.115369_bib0057","first-page":"8824","article-title":"Less is more: token context-aware learning for object tracking","volume":"39","author":"Xu","year":"2025","journal-title":"Proc. AAAI Conf. Artif. Intell."},{"key":"10.1016\/j.knosys.2026.115369_bib0058","first-page":"8727","article-title":"Robust tracking via mamba-based context-aware token learning","volume":"39","author":"Xie","year":"2025","journal-title":"Proc. AAAI Conf. Artif. Intell."},{"key":"10.1016\/j.knosys.2026.115369_bib0059","series-title":"Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition","first-page":"19165","article-title":"Dynamic updates for language adaptation in visual-language tracking","author":"Li","year":"2025"},{"key":"10.1016\/j.knosys.2026.115369_bib0060","doi-asserted-by":"crossref","first-page":"439","DOI":"10.1007\/s11263-020-01387-y","article-title":"Lasot: a high-quality large-scale single object tracking benchmark","volume":"129","author":"Fan","year":"2021","journal-title":"Int. J. Comput. Vis."},{"key":"10.1016\/j.knosys.2026.115369_bib0061","series-title":"Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition","first-page":"1328","article-title":"Fast online object tracking and segmentation: a unifying approach","author":"Wang","year":"2019"},{"key":"10.1016\/j.knosys.2026.115369_bib0062","series-title":"Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition","first-page":"4660","article-title":"Atom: accurate tracking by overlap maximization","author":"Danelljan","year":"2019"},{"key":"10.1016\/j.knosys.2026.115369_bib0063","series-title":"Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition","first-page":"7133","article-title":"D3s-A discriminative single shot segmentation tracker","author":"Lukezic","year":"2020"},{"key":"10.1016\/j.knosys.2026.115369_bib0064","series-title":"Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition","first-page":"5289","article-title":"Alpha-refine: boosting tracking performance by precise bounding box estimation","author":"Yan","year":"2021"},{"issue":"5","key":"10.1016\/j.knosys.2026.115369_bib0065","doi-asserted-by":"crossref","first-page":"9474","DOI":"10.1109\/TNNLS.2024.3442290","article-title":"Graph attention network for context-aware visual tracking","volume":"35","author":"Shao","year":"2024","journal-title":"IEEE Trans. Neural Netw. Learn. Syst."},{"issue":"4","key":"10.1016\/j.knosys.2026.115369_bib0066","doi-asserted-by":"crossref","first-page":"6478","DOI":"10.1109\/TNNLS.2024.3402994","article-title":"ScalableTrack: scalable one-stream tracking via alternating learning","volume":"36","author":"Liu","year":"2024","journal-title":"IEEE Trans. Neural Netw. Learn. Syst."},{"key":"10.1016\/j.knosys.2026.115369_bib0067","series-title":"Proceedings of the European Conference on Computer Vision","first-page":"547","article-title":"The eighth visual object tracking vot2020 challenge results","author":"Kristan","year":"2020"},{"key":"10.1016\/j.knosys.2026.115369_bib0068","series-title":"Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition","first-page":"13763","article-title":"Towards more flexible and accurate object tracking with natural language: algorithms and benchmark","author":"Wang","year":"2021"},{"issue":"9","key":"10.1016\/j.knosys.2026.115369_bib0069","doi-asserted-by":"crossref","first-page":"1834","DOI":"10.1109\/TPAMI.2014.2388226","article-title":"Object tracking benchmark","volume":"37","author":"Wu","year":"2015","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"10.1016\/j.knosys.2026.115369_bib0070","series-title":"Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition","first-page":"17675","article-title":"Mc-bench: a benchmark for multi-context visual grounding in the era of mllms","author":"Xu","year":"2025"}],"container-title":["Knowledge-Based Systems"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/api.elsevier.com\/content\/article\/PII:S0950705126001127?httpAccept=text\/xml","content-type":"text\/xml","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/api.elsevier.com\/content\/article\/PII:S0950705126001127?httpAccept=text\/plain","content-type":"text\/plain","content-version":"vor","intended-application":"text-mining"}],"deposited":{"date-parts":[[2026,2,26]],"date-time":"2026-02-26T07:42:59Z","timestamp":1772091779000},"score":1,"resource":{"primary":{"URL":"https:\/\/linkinghub.elsevier.com\/retrieve\/pii\/S0950705126001127"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2026,3]]},"references-count":70,"alternative-id":["S0950705126001127"],"URL":"https:\/\/doi.org\/10.1016\/j.knosys.2026.115369","relation":{},"ISSN":["0950-7051"],"issn-type":[{"value":"0950-7051","type":"print"}],"subject":[],"published":{"date-parts":[[2026,3]]},"assertion":[{"value":"Elsevier","name":"publisher","label":"This article is maintained by"},{"value":"AdaptTrack: Perception field adaptation with contrastive attention for robust visual tracking","name":"articletitle","label":"Article Title"},{"value":"Knowledge-Based Systems","name":"journaltitle","label":"Journal Title"},{"value":"https:\/\/doi.org\/10.1016\/j.knosys.2026.115369","name":"articlelink","label":"CrossRef DOI link to publisher maintained version"},{"value":"article","name":"content_type","label":"Content Type"},{"value":"\u00a9 2026 Elsevier B.V. All rights are reserved, including those for text and data mining, AI training, and similar technologies.","name":"copyright","label":"Copyright"}],"article-number":"115369"}}