{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,20]],"date-time":"2026-03-20T17:16:20Z","timestamp":1774026980227,"version":"3.50.1"},"reference-count":76,"publisher":"Elsevier BV","license":[{"start":{"date-parts":[[2026,4,1]],"date-time":"2026-04-01T00:00:00Z","timestamp":1775001600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.elsevier.com\/tdm\/userlicense\/1.0\/"},{"start":{"date-parts":[[2026,4,1]],"date-time":"2026-04-01T00:00:00Z","timestamp":1775001600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.elsevier.com\/legal\/tdmrep-license"},{"start":{"date-parts":[[2026,4,1]],"date-time":"2026-04-01T00:00:00Z","timestamp":1775001600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-017"},{"start":{"date-parts":[[2026,4,1]],"date-time":"2026-04-01T00:00:00Z","timestamp":1775001600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"},{"start":{"date-parts":[[2026,4,1]],"date-time":"2026-04-01T00:00:00Z","timestamp":1775001600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-012"},{"start":{"date-parts":[[2026,4,1]],"date-time":"2026-04-01T00:00:00Z","timestamp":1775001600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2026,4,1]],"date-time":"2026-04-01T00:00:00Z","timestamp":1775001600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-004"}],"funder":[{"DOI":"10.13039\/100007219","name":"Natural Science Foundation of Shanghai Municipality","doi-asserted-by":"publisher","award":["23ZR1422800"],"award-info":[{"award-number":["23ZR1422800"]}],"id":[{"id":"10.13039\/100007219","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["elsevier.com","sciencedirect.com"],"crossmark-restriction":true},"short-container-title":["Knowledge-Based Systems"],"published-print":{"date-parts":[[2026,4]]},"DOI":"10.1016\/j.knosys.2026.115603","type":"journal-article","created":{"date-parts":[[2026,2,25]],"date-time":"2026-02-25T07:47:41Z","timestamp":1772005661000},"page":"115603","update-policy":"https:\/\/doi.org\/10.1016\/elsevier_cm_policy","source":"Crossref","is-referenced-by-count":0,"special_numbering":"C","title":["Negative-Sampling prompt learning for hard negative sample discrimination"],"prefix":"10.1016","volume":"339","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-8344-1201","authenticated-orcid":false,"given":"Haoyang","family":"Li","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0009-0004-2065-6004","authenticated-orcid":false,"given":"Liang","family":"Wang","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-1312-9527","authenticated-orcid":false,"given":"Yan","family":"Peng","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-4843-1953","authenticated-orcid":false,"given":"Chao","family":"Wang","sequence":"additional","affiliation":[]}],"member":"78","reference":[{"key":"10.1016\/j.knosys.2026.115603_bib0001","article-title":"Vilbert: pretraining task-agnostic visiolinguistic representations for vision-and-language tasks","volume":"32","author":"Lu","year":"2019","journal-title":"Adv. Neural Inf. Process. Syst."},{"key":"10.1016\/j.knosys.2026.115603_bib0002","series-title":"International Conference on Machine Learning","first-page":"5583","article-title":"Vilt: vision-and-language transformer without convolution or region supervision","author":"Kim","year":"2021"},{"key":"10.1016\/j.knosys.2026.115603_bib0003","article-title":"Coca: contrastive captioners are image-Text foundation models","author":"Yu","year":"2022","journal-title":"Trans. Mach. Learn. Res."},{"key":"10.1016\/j.knosys.2026.115603_bib0004","first-page":"9694","article-title":"Align before fuse: vision and language representation learning with momentum distillation","volume":"34","author":"Li","year":"2021","journal-title":"Adv. Neural Inf. Process. Syst."},{"key":"10.1016\/j.knosys.2026.115603_bib0005","series-title":"International Conference on Machine Learning","first-page":"19730","article-title":"Blip-2: bootstrapping language-image pre-training with frozen image encoders and large language models","author":"Li","year":"2023"},{"key":"10.1016\/j.knosys.2026.115603_bib0006","doi-asserted-by":"crossref","DOI":"10.1016\/j.neucom.2024.127505","article-title":"ZVQAF: Zero-shot visual question answering with feedback from large language models","volume":"580","author":"Liu","year":"2024","journal-title":"Neurocomputing"},{"key":"10.1016\/j.knosys.2026.115603_bib0007","series-title":"European Conference on Computer Vision","first-page":"740","article-title":"Microsoft coco: common objects in context","author":"Lin","year":"2014"},{"key":"10.1016\/j.knosys.2026.115603_bib0008","doi-asserted-by":"crossref","first-page":"67","DOI":"10.1162\/tacl_a_00166","article-title":"From image descriptions to visual denotations: new similarity metrics for semantic inference over event descriptions","volume":"2","author":"Young","year":"2014","journal-title":"Trans. Assoc. Comput. Linguist."},{"key":"10.1016\/j.knosys.2026.115603_bib0009","series-title":"Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing","first-page":"3876","article-title":"MedCLIP: contrastive learning from unpaired medical images and text","author":"Wang","year":"2022"},{"key":"10.1016\/j.knosys.2026.115603_bib0010","series-title":"European Conference on Computer Vision","first-page":"634","article-title":"Fashionvil: fashion-Focused vision-and-Language representation learning","author":"Han","year":"2022"},{"key":"10.1016\/j.knosys.2026.115603_bib0011","series-title":"Proceedings of the AAAI Conference on Artificial Intelligence","first-page":"12047","article-title":"Fine-grained recognition: accounting for subtle differences between similar classes","volume":"34","author":"Sun","year":"2020"},{"key":"10.1016\/j.knosys.2026.115603_bib0012","article-title":"Contrastive learning with hard negative samples","author":"Robinson","year":"2021","journal-title":"Int. Conf. Learn. Representat."},{"key":"10.1016\/j.knosys.2026.115603_bib0013","series-title":"Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition","first-page":"2606","article-title":"A-Fast-rcnn: hard positive generation via adversary for object detection","author":"Wang","year":"2017"},{"key":"10.1016\/j.knosys.2026.115603_bib0014","series-title":"Proceedings of the IEEE\/CVF Winter Conference on Applications of Computer Vision","first-page":"5563","article-title":"Enhancing multimodal compositional reasoning of visual language models with generative negative mining","author":"Sahin","year":"2024"},{"key":"10.1016\/j.knosys.2026.115603_bib0015","doi-asserted-by":"crossref","DOI":"10.1016\/j.engappai.2025.110609","article-title":"Dialogue response coherency evaluation with feature sensitive negative sample using multi list-wise ranking loss","volume":"150","author":"Hwang","year":"2025","journal-title":"Eng. Appl. Artif. Intell."},{"key":"10.1016\/j.knosys.2026.115603_bib0016","article-title":"A review: deep learning for medical image segmentation using multi-modality fusion","volume":"3","author":"Zhou","year":"2019","journal-title":"Array"},{"key":"10.1016\/j.knosys.2026.115603_bib0017","series-title":"ICASSP 2023-2023 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)","first-page":"1","article-title":"On negative sampling for contrastive audio-text retrieval","author":"Xie","year":"2023"},{"issue":"1","key":"10.1016\/j.knosys.2026.115603_bib0018","doi-asserted-by":"crossref","first-page":"3094","DOI":"10.1038\/s41467-022-30761-2","article-title":"Towards artificial general intelligence via a multimodal foundation model","volume":"13","author":"Fei","year":"2022","journal-title":"Nat. Commun."},{"key":"10.1016\/j.knosys.2026.115603_bib0019","series-title":"Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition","first-page":"21406","article-title":"Vl-interpret: an interactive visualization tool for interpreting vision-language transformers","author":"Aflalo","year":"2022"},{"issue":"9","key":"10.1016\/j.knosys.2026.115603_bib0020","doi-asserted-by":"crossref","first-page":"2337","DOI":"10.1007\/s11263-022-01653-1","article-title":"Learning to prompt for vision-language models","volume":"130","author":"Zhou","year":"2022","journal-title":"Int. J. Comput. Vis."},{"key":"10.1016\/j.knosys.2026.115603_bib0021","series-title":"International Conference on Machine Learning","first-page":"8748","article-title":"Learning transferable visual models from natural language supervision","author":"Radford","year":"2021"},{"key":"10.1016\/j.knosys.2026.115603_bib0022","series-title":"Proceedings of the AAAI Conference on Artificial Intelligence","first-page":"1218","article-title":"Similarity reasoning and filtration for image-text matching","volume":"35","author":"Diao","year":"2021"},{"key":"10.1016\/j.knosys.2026.115603_bib0023","doi-asserted-by":"crossref","unstructured":"M. Cao, S. Li, J. Li, L. Nie, M. Zhang, Image-text retrieval: A survey on recent research and development, (2022). arXiv: 2203.14713.","DOI":"10.24963\/ijcai.2022\/759"},{"key":"10.1016\/j.knosys.2026.115603_bib0024","series-title":"European Conference on Computer Vision","first-page":"709","article-title":"Visual prompt tuning","author":"Jia","year":"2022"},{"key":"10.1016\/j.knosys.2026.115603_bib0025","series-title":"Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition","first-page":"49","article-title":"Learning deep representations of fine-grained visual descriptions","author":"Reed","year":"2016"},{"key":"10.1016\/j.knosys.2026.115603_bib0026","unstructured":"N. Rostamzadeh, S. Hosseini, T. Boquet, W. Stokowiec, Y. Zhang, C. Jauvin, C. Pal, Fashion-gen: The generative fashion dataset and challenge, (2018). arXiv: 1806.08317."},{"key":"10.1016\/j.knosys.2026.115603_bib0027","unstructured":"F. Faghri, D.J. Fleet, J.R. Kiros, S. Fidler, Vse++: Improving visual-semantic embeddings with hard negatives, (2017). arXiv: 1707.05612."},{"key":"10.1016\/j.knosys.2026.115603_bib0028","series-title":"Proceedings of the 59Th Annual Meeting of the Association for Computational Linguistics and the 11Th International Joint Conference on Natural Language Processing (Volume 1: Long Papers)","first-page":"2592","article-title":"UNIMO: Towards unified-Modal understanding and generation via cross-Modal contrastive learning","author":"Li","year":"2021"},{"key":"10.1016\/j.knosys.2026.115603_bib0029","doi-asserted-by":"crossref","DOI":"10.1016\/j.engappai.2021.104489","article-title":"HMMN: Online metric learning for human re-identification via hard sample mining memory network","volume":"106","author":"Han","year":"2021","journal-title":"Eng. Appl. Artif. Intell."},{"key":"10.1016\/j.knosys.2026.115603_bib0030","first-page":"32897","article-title":"Vlmo: unified vision-language pre-training with mixture-of-modality-experts","volume":"35","author":"Bao","year":"2022","journal-title":"Adv. Neural Inf. Process. Syst."},{"key":"10.1016\/j.knosys.2026.115603_bib0031","series-title":"2023 IEEE\/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)","first-page":"2843","article-title":"Hard-negative sampling with cascaded fine-Tuning network to boost flare removal performance in the nighttime images","author":"Song","year":"2023"},{"key":"10.1016\/j.knosys.2026.115603_bib0032","doi-asserted-by":"crossref","unstructured":"P.J. R\u00f6sch, N. Oswald, M. Geierhos, J. Libovick\u1ef3, Enhancing Conceptual Understanding in Multimodal Contrastive Learning through Hard Negative Samples, (2024). arXiv: 2403.02875.","DOI":"10.18653\/v1\/2024.alvr-1.9"},{"key":"10.1016\/j.knosys.2026.115603_bib0033","article-title":"Deep multi-similarity hashing via label-guided network for cross-modal retrieval","author":"Wu","year":"2024","journal-title":"Neurocomputing"},{"key":"10.1016\/j.knosys.2026.115603_bib0034","series-title":"Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition","first-page":"7882","article-title":"Cross modal focal loss for rgbd face anti-spoofing","author":"George","year":"2021"},{"key":"10.1016\/j.knosys.2026.115603_bib0035","unstructured":"H. Wang, M. Huang, R. Huang, L. Hong, H. Xu, T. Hu, X. Liang, Z. Li, Boosting visual-language models by exploiting hard samples, (2023). arXiv: 2305.05208."},{"key":"10.1016\/j.knosys.2026.115603_bib0036","doi-asserted-by":"crossref","DOI":"10.1016\/j.neucom.2024.128047","article-title":"Event representation via contrastive learning with prototype based hard negative sampling","volume":"600","author":"Kong","year":"2024","journal-title":"Neurocomputing"},{"key":"10.1016\/j.knosys.2026.115603_bib0037","series-title":"Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition","first-page":"6967","article-title":"Filtering, distillation, and hard negatives for vision-language pre-training","author":"Radenovic","year":"2023"},{"key":"10.1016\/j.knosys.2026.115603_bib0038","series-title":"Proceedings of the AAAI Conference on Artificial Intelligence","first-page":"2417","article-title":"Structure-CLIP: towards scene graph knowledge to enhance multi-Modal structured representations","volume":"38","author":"Huang","year":"2024"},{"key":"10.1016\/j.knosys.2026.115603_bib0039","series-title":"Proceedings of the 60Th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)","first-page":"3013","article-title":"Cross-Modal discrete representation learning","author":"Liu","year":"2022"},{"key":"10.1016\/j.knosys.2026.115603_bib0040","series-title":"2023 IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR)","first-page":"10910","article-title":"CREPE: Can vision-Language foundation models reason compositionally?","author":"Ma","year":"2023"},{"key":"10.1016\/j.knosys.2026.115603_bib0041","unstructured":"F. Petroni, T. Rockt\u00e4schel, P. Lewis, A. Bakhtin, Y. Wu, A.H. Miller, S. Riedel, Language models as knowledge bases?, (2019). arXiv: 1909.01066."},{"key":"10.1016\/j.knosys.2026.115603_bib0042","unstructured":"X.L. Li, P. Liang, Prefix-tuning: optimizing continuous prompts for generation, (2021). arXiv: 2101.00190."},{"key":"10.1016\/j.knosys.2026.115603_bib0043","first-page":"24824","article-title":"Chain-of-thought prompting elicits reasoning in large language models","volume":"35","author":"Wei","year":"2022","journal-title":"Adv. Neural Inf. Process. Syst."},{"key":"10.1016\/j.knosys.2026.115603_bib0044","series-title":"2023 IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR)","first-page":"15211","article-title":"Prompt, generate, then cache: cascade of foundation models makes strong few-Shot learners","author":"Zhang","year":"2023"},{"key":"10.1016\/j.knosys.2026.115603_bib0045","unstructured":"S. Menon, C. Vondrick, Visual classification via description from large language models, (2022). arXiv: 2210.07183."},{"key":"10.1016\/j.knosys.2026.115603_bib0046","series-title":"2022 IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR)","first-page":"16795","article-title":"Conditional prompt learning for vision-Language models","author":"Zhou","year":"2022"},{"key":"10.1016\/j.knosys.2026.115603_bib0047","article-title":"Learning to decompose visual features with latent textual prompts","author":"Wang","year":"2023","journal-title":"ICLR"},{"key":"10.1016\/j.knosys.2026.115603_bib0048","doi-asserted-by":"crossref","DOI":"10.1016\/j.neucom.2024.128290","article-title":"Multi-task prompt tuning with soft context sharing for vision\u2013language models","volume":"603","author":"Ding","year":"2024","journal-title":"Neurocomputing"},{"key":"10.1016\/j.knosys.2026.115603_bib0049","series-title":"2023 IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR)","first-page":"10878","article-title":"Diversity-Aware meta visual prompting","author":"Huang","year":"2023"},{"key":"10.1016\/j.knosys.2026.115603_bib0050","article-title":"Fine-grained visual prompting","volume":"36","author":"Yang","year":"2024","journal-title":"Adv. Neural Inf. Process. Syst."},{"key":"10.1016\/j.knosys.2026.115603_bib0051","unstructured":"Y. Zang, W. Li, K. Zhou, C. Huang, C.C. Loy, Unified vision and language prompt learning, (2022). arXiv: 2210.07225."},{"key":"10.1016\/j.knosys.2026.115603_bib0052","doi-asserted-by":"crossref","unstructured":"Z. Li, X. Li, X. Fu, X. Zhang, W. Wang, J. Yang, PromptKD: Unsupervised Prompt Distillation for Vision-Language Models, (2024). arXiv: 2403.02781.","DOI":"10.1109\/CVPR52733.2024.02513"},{"key":"10.1016\/j.knosys.2026.115603_bib0053","unstructured":"F. Cui, Y. Zhang, X. Wang, X. Wang, L. Xiao, Generalizable Prompt Learning of CLIP: A Brief Overview, (2025). arXiv: 2503.01263."},{"key":"10.1016\/j.knosys.2026.115603_bib0054","unstructured":"J. Devlin, M.-W. Chang, K. Lee, K. Toutanova, Bert: Pre-training of deep bidirectional transformers for language understanding, (2018). arXiv: 1810.04805."},{"key":"10.1016\/j.knosys.2026.115603_bib0055","series-title":"Proceedings of the 56Th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)","first-page":"2556","article-title":"Conceptual captions: a cleaned, hypernymed, image alt-text dataset for automatic image captioning","author":"Sharma","year":"2018"},{"key":"10.1016\/j.knosys.2026.115603_bib0056","series-title":"2004 Conference on Computer Vision and Pattern Recognition Workshop","first-page":"178","article-title":"Learning generative visual models from few training examples: an incremental bayesian approach tested on 101 object categories","author":"Fei-Fei","year":"2004"},{"key":"10.1016\/j.knosys.2026.115603_bib0057","unstructured":"T. Mikolov, K. Chen, G. Corrado, J. Dean, Efficient estimation of word representations in vector space, (2013).arXiv: 1301.3781."},{"issue":"4","key":"10.1016\/j.knosys.2026.115603_bib0058","doi-asserted-by":"crossref","first-page":"433","DOI":"10.1002\/wics.101","article-title":"Principal component analysis","volume":"2","author":"Abdi","year":"2010","journal-title":"Wiley Interdiscip. Rev. Comput. Stat."},{"issue":"1","key":"10.1016\/j.knosys.2026.115603_bib0059","first-page":"411","article-title":"Spacy 2: natural language understanding with bloom embeddings, convolutional neural networks and incremental parsing","volume":"7","author":"Honnibal","year":"2017","journal-title":"To appear"},{"issue":"4-5","key":"10.1016\/j.knosys.2026.115603_bib0060","doi-asserted-by":"crossref","first-page":"385","DOI":"10.1016\/j.artint.2005.12.002","article-title":"Breadth-first heuristic search","volume":"170","author":"Zhou","year":"2006","journal-title":"Artif. Intell."},{"key":"10.1016\/j.knosys.2026.115603_bib0061","series-title":"Proceedings of the IEEE\/CVF International Conference on Computer Vision","first-page":"15659","article-title":"Prompt-aligned gradient for prompt tuning","author":"Zhu","year":"2023"},{"key":"10.1016\/j.knosys.2026.115603_bib0062","article-title":"Decouple before align: visual disentanglement enhances prompt tuning","author":"Zhang","year":"2025","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"issue":"13","key":"10.1016\/j.knosys.2026.115603_bib0063","doi-asserted-by":"crossref","first-page":"3521","DOI":"10.1073\/pnas.1611835114","article-title":"Overcoming catastrophic forgetting in neural networks","volume":"114","author":"Kirkpatrick","year":"2017","journal-title":"Proc. Natl. Acad. Sci."},{"key":"10.1016\/j.knosys.2026.115603_bib0064","unstructured":"Y. Luo, Z. Yang, F. Meng, Y. Li, J. Zhou, Y. Zhang, An empirical study of catastrophic forgetting in large language models during continual fine-tuning, (2023). arXiv: 2308.08747."},{"key":"10.1016\/j.knosys.2026.115603_bib0065","series-title":"Proceedings of the 31St ACM International Conference on Multimedia","first-page":"6510","article-title":"Knowledge decomposition and replay: a novel cross-modal image-Text retrieval continual learning method","author":"Yang","year":"2023"},{"key":"10.1016\/j.knosys.2026.115603_bib0066","series-title":"Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition","first-page":"2669","article-title":"Fame-vil: multi-tasking vision-language model for heterogeneous fashion tasks","author":"Han","year":"2023"},{"key":"10.1016\/j.knosys.2026.115603_bib0067","unstructured":"A. Dosovitskiy, L. Beyer, A. Kolesnikov, D. Weissenborn, X. Zhai, T. Unterthiner, M. Dehghani, M. Minderer, G. Heigold, S. Gelly, et al., An image is worth 16x16 words: Transformers for image recognition at scale, (2020). arXiv: 2010.11929."},{"key":"10.1016\/j.knosys.2026.115603_bib0068","doi-asserted-by":"crossref","first-page":"2056","DOI":"10.1109\/TMM.2023.3291588","article-title":"Dual modality prompt tuning for vision-language pre-trained model","volume":"26","author":"Xing","year":"2023","journal-title":"IEEE Trans. Multimedia"},{"key":"10.1016\/j.knosys.2026.115603_bib0069","series-title":"2025 IEEE International Conference on Multimedia and Expo (ICME)","first-page":"1","article-title":"MAO: Efficient model-Agnostic optimization of prompt tuning for vision-Language models","author":"Li","year":"2025"},{"key":"10.1016\/j.knosys.2026.115603_bib0070","series-title":"Proceedings of the Computer Vision and Pattern Recognition Conference","first-page":"25623","article-title":"Dpc: dual-prompt collaboration for tuning vision-language models","author":"Li","year":"2025"},{"key":"10.1016\/j.knosys.2026.115603_bib0071","series-title":"Proceedings of the 43Rd International ACM SIGIR Conference on Research and Development in Information Retrieval","first-page":"2251","article-title":"Fashionbert: text and image matching with adaptive loss for cross-modal retrieval","author":"Gao","year":"2020"},{"key":"10.1016\/j.knosys.2026.115603_bib0072","series-title":"Computer Vision\u2013ECCV 2020: 16th European Conference, Glasgow, UK, August 23\u201328, 2020, Proceedings, Part XXX 16","first-page":"121","article-title":"Oscar: object-semantics aligned pre-training for vision-language tasks","author":"Li","year":"2020"},{"key":"10.1016\/j.knosys.2026.115603_bib0073","series-title":"2021 IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR)","first-page":"12642","article-title":"Kaleido-BERT: vision-Language pre-training on fashion domain","author":"Zhuge","year":"2021"},{"key":"10.1016\/j.knosys.2026.115603_bib0074","doi-asserted-by":"crossref","DOI":"10.1016\/j.neucom.2023.03.057","article-title":"Efficient text-image semantic search: a multi-modal vision-language approach for fashion retrieval","volume":"538","author":"Moro","year":"2023","journal-title":"Neurocomputing"},{"key":"10.1016\/j.knosys.2026.115603_bib0075","series-title":"Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition","first-page":"16000","article-title":"Masked autoencoders are scalable vision learners","author":"He","year":"2022"},{"key":"10.1016\/j.knosys.2026.115603_bib0076","series-title":"2023 IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR)","first-page":"23390","article-title":"Scaling language-Image pre-Training via masking","author":"Li","year":"2023"}],"container-title":["Knowledge-Based Systems"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/api.elsevier.com\/content\/article\/PII:S0950705126003436?httpAccept=text\/xml","content-type":"text\/xml","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/api.elsevier.com\/content\/article\/PII:S0950705126003436?httpAccept=text\/plain","content-type":"text\/plain","content-version":"vor","intended-application":"text-mining"}],"deposited":{"date-parts":[[2026,3,20]],"date-time":"2026-03-20T16:27:00Z","timestamp":1774024020000},"score":1,"resource":{"primary":{"URL":"https:\/\/linkinghub.elsevier.com\/retrieve\/pii\/S0950705126003436"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2026,4]]},"references-count":76,"alternative-id":["S0950705126003436"],"URL":"https:\/\/doi.org\/10.1016\/j.knosys.2026.115603","relation":{},"ISSN":["0950-7051"],"issn-type":[{"value":"0950-7051","type":"print"}],"subject":[],"published":{"date-parts":[[2026,4]]},"assertion":[{"value":"Elsevier","name":"publisher","label":"This article is maintained by"},{"value":"Negative-Sampling prompt learning for hard negative sample discrimination","name":"articletitle","label":"Article Title"},{"value":"Knowledge-Based Systems","name":"journaltitle","label":"Journal Title"},{"value":"https:\/\/doi.org\/10.1016\/j.knosys.2026.115603","name":"articlelink","label":"CrossRef DOI link to publisher maintained version"},{"value":"article","name":"content_type","label":"Content Type"},{"value":"\u00a9 2026 Elsevier B.V. All rights are reserved, including those for text and data mining, AI training, and similar technologies.","name":"copyright","label":"Copyright"}],"article-number":"115603"}}