{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,20]],"date-time":"2026-03-20T16:10:49Z","timestamp":1774023049732,"version":"3.50.1"},"reference-count":59,"publisher":"Elsevier BV","license":[{"start":{"date-parts":[[2026,4,1]],"date-time":"2026-04-01T00:00:00Z","timestamp":1775001600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.elsevier.com\/tdm\/userlicense\/1.0\/"},{"start":{"date-parts":[[2026,4,1]],"date-time":"2026-04-01T00:00:00Z","timestamp":1775001600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.elsevier.com\/legal\/tdmrep-license"},{"start":{"date-parts":[[2026,4,1]],"date-time":"2026-04-01T00:00:00Z","timestamp":1775001600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-017"},{"start":{"date-parts":[[2026,4,1]],"date-time":"2026-04-01T00:00:00Z","timestamp":1775001600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"},{"start":{"date-parts":[[2026,4,1]],"date-time":"2026-04-01T00:00:00Z","timestamp":1775001600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-012"},{"start":{"date-parts":[[2026,4,1]],"date-time":"2026-04-01T00:00:00Z","timestamp":1775001600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2026,4,1]],"date-time":"2026-04-01T00:00:00Z","timestamp":1775001600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-004"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["61841602"],"award-info":[{"award-number":["61841602"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100012226","name":"Fundamental Research Funds for the Central Universities","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100012226","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100007129","name":"Natural Science Foundation of Shandong Province","doi-asserted-by":"publisher","award":["ZR2021MF017"],"award-info":[{"award-number":["ZR2021MF017"]}],"id":[{"id":"10.13039\/501100007129","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100007129","name":"Natural Science Foundation of Shandong Province","doi-asserted-by":"publisher","award":["ZR2020MF147"],"award-info":[{"award-number":["ZR2020MF147"]}],"id":[{"id":"10.13039\/501100007129","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100004032","name":"Jilin University","doi-asserted-by":"publisher","award":["93K172021K12"],"award-info":[{"award-number":["93K172021K12"]}],"id":[{"id":"10.13039\/501100004032","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["elsevier.com","sciencedirect.com"],"crossmark-restriction":true},"short-container-title":["Journal of Visual Communication and Image Representation"],"published-print":{"date-parts":[[2026,4]]},"DOI":"10.1016\/j.jvcir.2026.104747","type":"journal-article","created":{"date-parts":[[2026,2,21]],"date-time":"2026-02-21T07:18:52Z","timestamp":1771658332000},"page":"104747","update-policy":"https:\/\/doi.org\/10.1016\/elsevier_cm_policy","source":"Crossref","is-referenced-by-count":0,"special_numbering":"C","title":["Semantic similarity guided contrastive hashing for unsupervised cross-modal retrieval"],"prefix":"10.1016","volume":"117","author":[{"given":"Limeng","family":"Gao","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-2716-6388","authenticated-orcid":false,"given":"Zhen","family":"Wang","sequence":"additional","affiliation":[]},{"given":"Xinzhong","family":"Wang","sequence":"additional","affiliation":[]},{"given":"Zhen","family":"Zheng","sequence":"additional","affiliation":[]},{"given":"Mingzhe","family":"Yang","sequence":"additional","affiliation":[]}],"member":"78","reference":[{"key":"10.1016\/j.jvcir.2026.104747_b1","doi-asserted-by":"crossref","first-page":"4613","DOI":"10.1109\/TMM.2025.3535306","article-title":"Deep semantic-consistent penalizing hashing for cross-modal retrieval","volume":"27","author":"Qin","year":"2025","journal-title":"IEEE Trans. Multimed."},{"key":"10.1016\/j.jvcir.2026.104747_b2","doi-asserted-by":"crossref","first-page":"824","DOI":"10.1109\/TMM.2023.3272169","article-title":"Hierarchical consensus hashing for cross-modal retrieval","volume":"26","author":"Sun","year":"2024","journal-title":"IEEE Trans. Multimed."},{"key":"10.1016\/j.jvcir.2026.104747_b3","doi-asserted-by":"crossref","unstructured":"Yuan Sun, Jian Dai, Zhenwen Ren, Yingke Chen, Dezhong Peng, Peng Hu, Dual self-paced cross-modal hashing, in: AAAI Conference on Artificial Intelligence, 2024, pp. 15184\u201315192.","DOI":"10.1609\/aaai.v38i14.29441"},{"key":"10.1016\/j.jvcir.2026.104747_b4","unstructured":"Kaiming Liu, Yunhong Gong, Yu Cao, Zhenwen Ren, Dezhong Peng, Yuan Sun, Dual semantic fusion hashing for multi-label cross-modal retrieval, in: International Joint Conference on Artificial Intelligence, 2024."},{"issue":"1","key":"10.1016\/j.jvcir.2026.104747_b5","doi-asserted-by":"crossref","first-page":"576","DOI":"10.1109\/TCSVT.2023.3285266","article-title":"Deep semantic-aware proxy hashing for multi-label cross-modal retrieval","volume":"34","author":"Huo","year":"2024","journal-title":"IEEE Trans. Circuits Syst. Video Technol."},{"key":"10.1016\/j.jvcir.2026.104747_b6","doi-asserted-by":"crossref","first-page":"6361","DOI":"10.1109\/TMM.2023.3349075","article-title":"Deep neighborhood-preserving hashing with quadratic spherical mutual information for cross-modal retrieval","volume":"26","author":"Qin","year":"2024","journal-title":"IEEE Trans. Multimed."},{"issue":"11","key":"10.1016\/j.jvcir.2026.104747_b7","doi-asserted-by":"crossref","first-page":"5926","DOI":"10.1109\/TKDE.2024.3401050","article-title":"Deep hierarchy-aware proxy hashing with self-paced learning for cross-modal retrieval","volume":"36","author":"Huo","year":"2024","journal-title":"IEEE Trans. Knowl. Data Eng."},{"issue":"6","key":"10.1016\/j.jvcir.2026.104747_b8","doi-asserted-by":"crossref","first-page":"1","DOI":"10.1007\/s00530-024-01539-x","article-title":"Dark knowledge association guided hashing for unsupervised cross-modal retrieval","volume":"30","author":"Kang","year":"2024","journal-title":"Multimedia Syst."},{"issue":"9","key":"10.1016\/j.jvcir.2026.104747_b9","doi-asserted-by":"crossref","first-page":"8838","DOI":"10.1109\/TKDE.2022.3218656","article-title":"Work together: Correlation-identity reconstruction hashing for unsupervised cross-modal retrieval","volume":"35","author":"Zhu","year":"2023","journal-title":"IEEE Trans. Knowl. Data Eng."},{"key":"10.1016\/j.jvcir.2026.104747_b10","volume":"182","author":"Chen","year":"2025","journal-title":"Neural Netw."},{"key":"10.1016\/j.jvcir.2026.104747_b11","doi-asserted-by":"crossref","unstructured":"Xiaoshuai Hao, Wanqian Zhang, Dayan Wu, Fei Zhu, Bo Li, Dual alignment unsupervised domain sdaptation for video-text retrieval, Conference on Computer Vision and Pattern Recognition Workshops, in: IEEE Computer Society Conference on Computer Vision and Pattern Recognition. Workshops, 2023, pp. 18962\u201318972.","DOI":"10.1109\/CVPR52729.2023.01818"},{"key":"10.1016\/j.jvcir.2026.104747_b12","doi-asserted-by":"crossref","DOI":"10.1016\/j.inffus.2025.103665","article-title":"SearchExpert: A genai-driven framework for reasoning-intensive multimedia information fusion through fine-tuning and reinforcement learning","volume":"126","author":"Li","year":"2026","journal-title":"Inf. Fusion"},{"issue":"2","key":"10.1016\/j.jvcir.2026.104747_b13","first-page":"1185","article-title":"Adaptive label correlation based asymmetric discrete hashing for cross-modal retrieval","volume":"35","author":"Li","year":"2023","journal-title":"IEEE Trans. Knowl. Data Eng."},{"key":"10.1016\/j.jvcir.2026.104747_b14","article-title":"Weighted semantic feature based self-supervised deep cross-modal hashing","volume":"14","author":"Gao","year":"2025","journal-title":"Int. J. Multimed. Inf. Retr."},{"key":"10.1016\/j.jvcir.2026.104747_b15","doi-asserted-by":"crossref","first-page":"16","DOI":"10.1007\/s13735-023-00288-3","article-title":"Deep adversarial multi-label cross-modal hashing algorithm","volume":"12","author":"Yang","year":"2023","journal-title":"Int. J. Multimed. Inf. Retr."},{"key":"10.1016\/j.jvcir.2026.104747_b16","doi-asserted-by":"crossref","DOI":"10.1016\/j.neunet.2024.106211","article-title":"Structure-aware contrastive hashing for unsupervised cross-modal retrieval","volume":"174","author":"Cui","year":"2024","journal-title":"Neural Netw."},{"issue":"2","key":"10.1016\/j.jvcir.2026.104747_b17","doi-asserted-by":"crossref","first-page":"553","DOI":"10.1109\/TBDATA.2024.3423704","article-title":"Deep cross-modal hashing with ranking learning for noisy labels","volume":"11","author":"Shu","year":"2025","journal-title":"IEEE Trans. Big Data"},{"issue":"4","key":"10.1016\/j.jvcir.2026.104747_b18","doi-asserted-by":"crossref","first-page":"371","DOI":"10.1109\/TBDATA.2023.3338951","article-title":"Proxy-based graph convolutional hashing for cross-modal retrieval","volume":"10","author":"Bai","year":"2024","journal-title":"IEEE Trans. Big Data"},{"key":"10.1016\/j.jvcir.2026.104747_b19","article-title":"Two-stage zero-shot sparse hashing with missing labels for cross-modal retrieval","volume":"155","author":"Yong","year":"2024","journal-title":"BPRA Int. Conf. Pattern Recognit."},{"key":"10.1016\/j.jvcir.2026.104747_b20","article-title":"Robust online hashing with label semantic enhancement for cross-modal retrieval","volume":"145","year":"2024","journal-title":"Pattern Recognit."},{"issue":"1","key":"10.1016\/j.jvcir.2026.104747_b21","doi-asserted-by":"crossref","first-page":"387","DOI":"10.1109\/TPAMI.2024.3467130","article-title":"Unsupervised dual deep hashing with semantic-index and content-code for cross-modal retrieval","volume":"47","author":"Zhang","year":"2025","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"10.1016\/j.jvcir.2026.104747_b22","doi-asserted-by":"crossref","first-page":"1768","DOI":"10.1109\/TIP.2024.3371358","article-title":"Exploring hierarchical information in hyperbolic space for self-supervised image hashing","volume":"33","author":"Wei","year":"2024","journal-title":"IEEE Trans. Image Process."},{"issue":"4","key":"10.1016\/j.jvcir.2026.104747_b23","doi-asserted-by":"crossref","first-page":"4756","DOI":"10.1109\/TNNLS.2022.3174970","article-title":"Graph convolutional network discrete hashing for cross-modal retrieval","volume":"35","author":"Bai","year":"2024","journal-title":"IEEE Trans. Neural Networks Learn. Syst."},{"issue":"8","key":"10.1016\/j.jvcir.2026.104747_b24","doi-asserted-by":"crossref","first-page":"6937","DOI":"10.1007\/s10115-025-02419-0","article-title":"Label-consistent kernel transform learning based sparse hashing for cross-modal retrieval","volume":"67","author":"Maggu","year":"2025","journal-title":"Knowl. Inf. Syst."},{"key":"10.1016\/j.jvcir.2026.104747_b25","doi-asserted-by":"crossref","DOI":"10.1016\/j.patcog.2023.109934","article-title":"Efficient supervised graph embedding hashing for large-scale cross-media retrieval","volume":"145","author":"Yao","year":"2024","journal-title":"Pattern Recognit."},{"key":"10.1016\/j.jvcir.2026.104747_b26","article-title":"Similarity graph-correlation reconstruction network for unsupervised cross-modal hashing","volume":"237","author":"Li","year":"2024","journal-title":"Expert Syst. Appl."},{"key":"10.1016\/j.jvcir.2026.104747_b27","doi-asserted-by":"crossref","DOI":"10.1016\/j.knosys.2022.109503","article-title":"Discrete fusion adversarial hashing for cross-modal retrieval","volume":"253","author":"Li","year":"2022","journal-title":"Knowl.-Based Syst."},{"key":"10.1016\/j.jvcir.2026.104747_b28","doi-asserted-by":"crossref","first-page":"3009","DOI":"10.1109\/TIP.2024.3385656","article-title":"Multi-relational deep hashing for cross-modal search","volume":"33","author":"Liang","year":"2024","journal-title":"IEEE Trans. Image Process."},{"key":"10.1016\/j.jvcir.2026.104747_b29","first-page":"539","article-title":"Unsupervised generative adversarial cross-modal hashing","author":"Zhang","year":"2018","journal-title":"Comput. Res. Repos."},{"issue":"10","key":"10.1016\/j.jvcir.2026.104747_b30","doi-asserted-by":"crossref","first-page":"7255","DOI":"10.1109\/TCSVT.2022.3172716","article-title":"Deep adaptively enhanced hashing with discriminative similarity guidance for unsupervised cross-modal retrieval","volume":"32","author":"Shi","year":"2022","journal-title":"IEEE Trans. Circuits Syst. Video Technol."},{"key":"10.1016\/j.jvcir.2026.104747_b31","doi-asserted-by":"crossref","unstructured":"You Wu, Zhixin Li, Mining similarity relationships for unsupervised cross-modal hashing, in: Proceedings. IEEE International Conference on Multimedia and Expo, 2024.","DOI":"10.1109\/ICME57554.2024.10687927"},{"key":"10.1016\/j.jvcir.2026.104747_b32","doi-asserted-by":"crossref","first-page":"152","DOI":"10.1016\/j.neucom.2021.06.087","article-title":"Clustering-driven deep adversarial hashing for scalable unsupervised cross-modal retrieval","volume":"459","author":"Shen","year":"2021","journal-title":"Neurocomputing"},{"key":"10.1016\/j.jvcir.2026.104747_b33","unstructured":"Shupeng Su, Zhisheng Zhong, Chao Zhang, Deep joint-semantics reconstructing hashing for large-scale unsupervised cross-modal retrieval, in: IEEE International Conference on Computer Vision, 2019."},{"key":"10.1016\/j.jvcir.2026.104747_b34","doi-asserted-by":"crossref","unstructured":"Song Liu, Shengsheng Qian, Jiawei Zhan, Long Ying, Joint-modal distribution-based similarity hashing for large-scale unsupervised deep cross-modal retrieval, in: Annual International ACM SIGIR Conference on Research and Development in Information Retrieval, 2020, pp. 1379\u20131388.","DOI":"10.1145\/3397271.3401086"},{"key":"10.1016\/j.jvcir.2026.104747_b35","first-page":"1","article-title":"Unsupervised deep hashing with multiple similarity preservation for cross-modal image-text retrieval","author":"Xiong","year":"2024","journal-title":"Int. J. Mach. Learn. Cybern."},{"key":"10.1016\/j.jvcir.2026.104747_b36","doi-asserted-by":"crossref","first-page":"2","DOI":"10.1016\/j.ipm.2024.103958","article-title":"Unsupervised adaptive hypergraph correlation hashing for multimedia retrieval","volume":"62","author":"Chen","year":"2025","journal-title":"Inf. Process. Manag."},{"issue":"6","key":"10.1016\/j.jvcir.2026.104747_b37","first-page":"6461","article-title":"Watch: two-stage discrete cross-media hashing","volume":"35","author":"Zhang","year":"2023","journal-title":"IEEE Trans. Knowl. Data Eng."},{"issue":"4","key":"10.1016\/j.jvcir.2026.104747_b38","doi-asserted-by":"crossref","first-page":"1838","DOI":"10.1109\/TNNLS.2020.2997020","article-title":"Deep semantic multimodal hashing network for scalable image-text and video-text retrievals","volume":"34","author":"Jin","year":"2023","journal-title":"IEEE Trans. Neural Networks Learn. Syst."},{"key":"10.1016\/j.jvcir.2026.104747_b39","doi-asserted-by":"crossref","first-page":"6","DOI":"10.1145\/3643639","article-title":"Deep neighborhood-aware proxy hashing with uniform distribution constraint for cross-modal retrieval","volume":"20","author":"Huo","year":"2024","journal-title":"ACM Trans. Multimed. Comput. Commun. Appl."},{"key":"10.1016\/j.jvcir.2026.104747_b40","doi-asserted-by":"crossref","DOI":"10.1016\/j.knosys.2024.111837","article-title":"Supervised contrastive discrete hashing for cross-modal retrieval","volume":"295","author":"Li","year":"2024","journal-title":"Knowl.-Based Syst."},{"issue":"4","key":"10.1016\/j.jvcir.2026.104747_b41","first-page":"973","article-title":"Deep binary reconstruction for cross-modal hashing","volume":"21","author":"Li","year":"2018","journal-title":"IEEE Trans. Multimed."},{"key":"10.1016\/j.jvcir.2026.104747_b42","doi-asserted-by":"crossref","unstructured":"Gengshen Wu, Zijia Lin, Jungong Han, Li Liu, Guiguang Ding, Baochang Zhang, Jialie Shen, Unsupervised deep hashing via binary latent factor models for large-scale cross-modal retrieval, in: International Joint Conference on Artificial Intelligence, 2018, pp. 2854\u20132860.","DOI":"10.24963\/ijcai.2018\/396"},{"key":"10.1016\/j.jvcir.2026.104747_b43","doi-asserted-by":"crossref","unstructured":"Dejie Yang, Dayan Wu, Wanqian Zhang, Haisu Zhang, Bo Li, Weiping Wang, Deep semantic-alignment hashing for unsupervised cross-modal retrieval, in: International Conference on Multimedia Retrieval, 2020, pp. 44\u201352.","DOI":"10.1145\/3372278.3390673"},{"key":"10.1016\/j.jvcir.2026.104747_b44","doi-asserted-by":"crossref","first-page":"466","DOI":"10.1109\/TMM.2021.3053766","article-title":"Aggregation-based graph convolutional hashing for unsupervised cross-modal retrieval","volume":"24","author":"Zhang","year":"2021","journal-title":"IEEE Trans. Multimed."},{"key":"10.1016\/j.jvcir.2026.104747_b45","doi-asserted-by":"crossref","DOI":"10.1016\/j.inffus.2023.101968","article-title":"When CLIP meets cross-modal hashing retrieval: a new strong baseline","volume":"100","author":"Xia","year":"2023","journal-title":"Inf. Fusion"},{"key":"10.1016\/j.jvcir.2026.104747_b46","doi-asserted-by":"crossref","DOI":"10.1016\/j.neucom.2024.127911","article-title":"Joint-modal graph convolutional hashing for unsupervised cross-modal retrieval","volume":"595","author":"Meng","year":"2024","journal-title":"Neurocomputing"},{"issue":"3","key":"10.1016\/j.jvcir.2026.104747_b47","first-page":"3877","article-title":"Unsupervised contrastive cross-modal hashing","volume":"45","author":"Hu","year":"2023","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"10.1016\/j.jvcir.2026.104747_b48","doi-asserted-by":"crossref","unstructured":"Kangkang Lu, Yanhua Yu, Meiyu Liang, Min Zhang, Xiaowen Cao, Zehua Zhao, Mengran Yin, Zhe Xue, Deep unsupervised momentum contrastive hashing for cross-modal retrieval, in: Proceedings. IEEE International Conference on Multimedia and Expo, 2023, pp. 126\u2013131.","DOI":"10.1109\/ICME55011.2023.00030"},{"key":"10.1016\/j.jvcir.2026.104747_b49","doi-asserted-by":"crossref","DOI":"10.1016\/j.engappai.2024.108969","article-title":"Unsupervised cross-modal hashing retrieval via dynamic contrast and optimization","volume":"136","author":"Xie","year":"2024","journal-title":"Eng. Appl. Artif. Intell."},{"key":"10.1016\/j.jvcir.2026.104747_b50","doi-asserted-by":"crossref","DOI":"10.1016\/j.ins.2023.119543","article-title":"Multi-similarity reconstructing and clustering-based contrastive hashing for cross-modal retrieval","volume":"647","author":"Xie","year":"2023","journal-title":"Inform. Sci."},{"key":"10.1016\/j.jvcir.2026.104747_b51","doi-asserted-by":"crossref","unstructured":"Mark J. Huiskes, Michael S. Lew, The mir flickr retrieval evaluation, in: ACM International Conference on Multimedia Information Retrieval, 2008, pp. 39\u201343.","DOI":"10.1145\/1460096.1460104"},{"key":"10.1016\/j.jvcir.2026.104747_b52","doi-asserted-by":"crossref","unstructured":"Tat-Seng Chua, Jinhui Tang, Richang Hong, Haojie Li, Zhiping Luo, Yantao Zheng, Nus-wide: A real-world web image database from national university of singapore, in: ACM International Conference on Image and Video Retrieval, 2009, pp. 1\u20139.","DOI":"10.1145\/1646396.1646452"},{"key":"10.1016\/j.jvcir.2026.104747_b53","series-title":"European Conference on Computer Vision","first-page":"740","article-title":"Microsoft coco: Common objects in context","author":"Lin","year":"2014"},{"key":"10.1016\/j.jvcir.2026.104747_b54","unstructured":"Shaishav Kumar, Raghavendra Udupa, Learning hash functions for cross-view similarity search, in: International Joint Conference on Artificial Intelligence, 2011, pp. 1360\u20131365."},{"key":"10.1016\/j.jvcir.2026.104747_b55","doi-asserted-by":"crossref","unstructured":"Jingkuan Song, Yi Yang Yang, Zi Huang, Heng Tao Shen, Inter-media hashing for large scale retrieval from heterogeneous data sources, in: ACM SIGMOD Conference, 2013, pp. 785\u2013796.","DOI":"10.1145\/2463676.2465274"},{"key":"10.1016\/j.jvcir.2026.104747_b56","doi-asserted-by":"crossref","unstructured":"Jile Zhou, Guiguang Ding, Yuchen Guo, Latent semantic sparse hashing for cross-modal similarity search, in: Annual International ACM SIGIR Conference on Research and Development in Information Retrieval, 2014.","DOI":"10.1145\/2600428.2609610"},{"key":"10.1016\/j.jvcir.2026.104747_b57","first-page":"2083","article-title":"Collective matrix factorization hashing for multimodal data","author":"Ding","year":"2014","journal-title":"Comput. Vis. Pattern Recognit."},{"issue":"1","key":"10.1016\/j.jvcir.2026.104747_b58","first-page":"1","article-title":"CLIP-based fusion-modal reconstructing hashing for large scale unsupervised cross-modal retrieval","volume":"12","author":"Mingyong","year":"2023","journal-title":"Int. J. Multimed. Inf. Retr."},{"key":"10.1016\/j.jvcir.2026.104747_b59","doi-asserted-by":"crossref","DOI":"10.1016\/j.neucom.2024.128844","article-title":"Revising similarity relationship hashing for unsupervised cross-modal retrieval","volume":"614","author":"Wu","year":"2025","journal-title":"Neurocomputing"}],"container-title":["Journal of Visual Communication and Image Representation"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/api.elsevier.com\/content\/article\/PII:S1047320326000428?httpAccept=text\/xml","content-type":"text\/xml","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/api.elsevier.com\/content\/article\/PII:S1047320326000428?httpAccept=text\/plain","content-type":"text\/plain","content-version":"vor","intended-application":"text-mining"}],"deposited":{"date-parts":[[2026,3,20]],"date-time":"2026-03-20T15:24:52Z","timestamp":1774020292000},"score":1,"resource":{"primary":{"URL":"https:\/\/linkinghub.elsevier.com\/retrieve\/pii\/S1047320326000428"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2026,4]]},"references-count":59,"alternative-id":["S1047320326000428"],"URL":"https:\/\/doi.org\/10.1016\/j.jvcir.2026.104747","relation":{},"ISSN":["1047-3203"],"issn-type":[{"value":"1047-3203","type":"print"}],"subject":[],"published":{"date-parts":[[2026,4]]},"assertion":[{"value":"Elsevier","name":"publisher","label":"This article is maintained by"},{"value":"Semantic similarity guided contrastive hashing for unsupervised cross-modal retrieval","name":"articletitle","label":"Article Title"},{"value":"Journal of Visual Communication and Image Representation","name":"journaltitle","label":"Journal Title"},{"value":"https:\/\/doi.org\/10.1016\/j.jvcir.2026.104747","name":"articlelink","label":"CrossRef DOI link to publisher maintained version"},{"value":"article","name":"content_type","label":"Content Type"},{"value":"\u00a9 2026 Elsevier Inc. All rights are reserved, including those for text and data mining, AI training, and similar technologies.","name":"copyright","label":"Copyright"}],"article-number":"104747"}}