{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,25]],"date-time":"2026-02-25T16:35:50Z","timestamp":1772037350941,"version":"3.50.1"},"reference-count":53,"publisher":"Elsevier BV","license":[{"start":{"date-parts":[[2026,4,1]],"date-time":"2026-04-01T00:00:00Z","timestamp":1775001600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.elsevier.com\/tdm\/userlicense\/1.0\/"},{"start":{"date-parts":[[2026,4,1]],"date-time":"2026-04-01T00:00:00Z","timestamp":1775001600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.elsevier.com\/legal\/tdmrep-license"},{"start":{"date-parts":[[2026,4,1]],"date-time":"2026-04-01T00:00:00Z","timestamp":1775001600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-017"},{"start":{"date-parts":[[2026,4,1]],"date-time":"2026-04-01T00:00:00Z","timestamp":1775001600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"},{"start":{"date-parts":[[2026,4,1]],"date-time":"2026-04-01T00:00:00Z","timestamp":1775001600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-012"},{"start":{"date-parts":[[2026,4,1]],"date-time":"2026-04-01T00:00:00Z","timestamp":1775001600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2026,4,1]],"date-time":"2026-04-01T00:00:00Z","timestamp":1775001600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-004"}],"funder":[{"DOI":"10.13039\/501100004607","name":"Natural Science Foundation of Guangxi Province","doi-asserted-by":"publisher","award":["2019GXNSFDA245018"],"award-info":[{"award-number":["2019GXNSFDA245018"]}],"id":[{"id":"10.13039\/501100004607","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62276073"],"award-info":[{"award-number":["62276073"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["61966004"],"award-info":[{"award-number":["61966004"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["elsevier.com","sciencedirect.com"],"crossmark-restriction":true},"short-container-title":["Neurocomputing"],"published-print":{"date-parts":[[2026,4]]},"DOI":"10.1016\/j.neucom.2026.132767","type":"journal-article","created":{"date-parts":[[2026,1,20]],"date-time":"2026-01-20T07:42:12Z","timestamp":1768894932000},"page":"132767","update-policy":"https:\/\/doi.org\/10.1016\/elsevier_cm_policy","source":"Crossref","is-referenced-by-count":0,"special_numbering":"C","title":["Bi-directional similarity enhancement and adjustment hashing for unsupervised cross-modal retrieval"],"prefix":"10.1016","volume":"672","author":[{"given":"Dan","family":"Yao","sequence":"first","affiliation":[]},{"given":"Bo","family":"Li","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-5313-6134","authenticated-orcid":false,"given":"Zhixin","family":"Li","sequence":"additional","affiliation":[]}],"member":"78","reference":[{"key":"10.1016\/j.neucom.2026.132767_bib0005","series-title":"Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition","first-page":"19883","article-title":"Bicro: noisy correspondence rectification for multi-modality data via bi-directional cross-modal similarity consistency","author":"Yang","year":"2023"},{"key":"10.1016\/j.neucom.2026.132767_bib0010","series-title":"Proceedings of the European Conference on Computer Vision","first-page":"700","article-title":"Coder: coupled diversity-sensitive momentum contrastive learning for image-text retrieval","author":"Wang","year":"2022"},{"key":"10.1016\/j.neucom.2026.132767_bib0015","series-title":"Proceedings of the Winter Conference on Applications of Computer Vision","first-page":"1022","article-title":"Cross-modal semantic enhanced interaction for image-sentence retrieval","author":"Ge","year":"2023"},{"key":"10.1016\/j.neucom.2026.132767_bib0020","doi-asserted-by":"crossref","DOI":"10.1016\/j.engappai.2024.108005","article-title":"Cross-modal semantic interference suppression for image-text matching","volume":"133","author":"Yao","year":"2024","journal-title":"Eng. Appl. Artif. Intell."},{"key":"10.1016\/j.neucom.2026.132767_bib0025","series-title":"Proceedings of the 46th International ACM SIGIR Conference on Research and Development in Information Retrieval","first-page":"1252","article-title":"Learnable pillar-based re-ranking for image-text retrieval","author":"Qu","year":"2023"},{"key":"10.1016\/j.neucom.2026.132767_bib0030","doi-asserted-by":"crossref","DOI":"10.1016\/j.engappai.2023.105923","article-title":"Cross-modal information balance-aware reasoning network for image-text retrieval","volume":"120","author":"Qin","year":"2023","journal-title":"Eng. Appl. Artif. Intell."},{"key":"10.1016\/j.neucom.2026.132767_bib0035","series-title":"Proceedings of the Conference on Artificial Intelligence","first-page":"3262","article-title":"Show your faith: cross-modal confidence-aware network for image-text matching","author":"Zhang","year":"2022"},{"key":"10.1016\/j.neucom.2026.132767_bib0040","doi-asserted-by":"crossref","first-page":"1320","DOI":"10.1109\/TMM.2022.3141603","article-title":"Unified adaptive relevance distinguishable attention network for image-text matching","volume":"25","author":"Zhang","year":"2022","journal-title":"IEEE Trans. Multimed."},{"issue":"1","key":"10.1016\/j.neucom.2026.132767_bib0045","doi-asserted-by":"crossref","DOI":"10.1016\/j.ipm.2022.103154","article-title":"Unifying knowledge iterative dissemination and relational reconstruction network for image\u2013text matching","volume":"60","author":"Xie","year":"2023","journal-title":"Inf. Process. Manag."},{"key":"10.1016\/j.neucom.2026.132767_bib0050","series-title":"Proceedings of the ACM International Conference on Multimedia","first-page":"893","article-title":"Multi-granularity interactive transformer hashing for cross-modal retrieval","author":"Liu","year":"2023"},{"issue":"3","key":"10.1016\/j.neucom.2026.132767_bib0055","doi-asserted-by":"crossref","first-page":"3018","DOI":"10.1109\/TITS.2022.3221787","article-title":"Cross-modal generation and pair correlation alignment hashing","volume":"24","author":"Ou","year":"2022","journal-title":"IEEE Trans. Intell. Transp. Syst."},{"issue":"10","key":"10.1016\/j.neucom.2026.132767_bib0060","doi-asserted-by":"crossref","first-page":"7255","DOI":"10.1109\/TCSVT.2022.3172716","article-title":"Deep adaptively-enhanced hashing with discriminative similarity guidance for unsupervised cross-modal retrieval","volume":"32","author":"Shi","year":"2022","journal-title":"IEEE Trans. Circuits Syst. Video Technol."},{"key":"10.1016\/j.neucom.2026.132767_bib0065","doi-asserted-by":"crossref","DOI":"10.1016\/j.engappai.2023.106473","article-title":"Label embedding asymmetric discrete hashing for efficient cross-modal retrieval","volume":"123","author":"Yang","year":"2023","journal-title":"Eng. Appl. Artif. Intell."},{"key":"10.1016\/j.neucom.2026.132767_bib0070","series-title":"Proceedings of the IEEE International Conference on Multimedia and Expo","first-page":"1","article-title":"Attention-guided semantic hashing for unsupervised cross-modal retrieval","author":"Shen","year":"2021"},{"key":"10.1016\/j.neucom.2026.132767_bib0075","doi-asserted-by":"crossref","first-page":"276","DOI":"10.1016\/j.neunet.2023.12.018","article-title":"Large-scale cross-modal hashing with unified learning and multi-object regional correlation reasoning","volume":"171","author":"Li","year":"2023","journal-title":"Neural Networks"},{"key":"10.1016\/j.neucom.2026.132767_bib0080","doi-asserted-by":"crossref","DOI":"10.1016\/j.displa.2023.102489","article-title":"Rich: a rapid method for image-text cross-modal hash retrieval","volume":"79","author":"Li","year":"2023","journal-title":"Displays"},{"issue":"11","key":"10.1016\/j.neucom.2026.132767_bib0085","doi-asserted-by":"crossref","first-page":"6306","DOI":"10.1109\/TNNLS.2021.3076684","article-title":"Fddh: fast discriminative discrete hashing for large-scale cross-modal retrieval","volume":"33","author":"Liu","year":"2021","journal-title":"IEEE Trans. Neural Netw. Learn. Syst."},{"key":"10.1016\/j.neucom.2026.132767_bib0090","doi-asserted-by":"crossref","DOI":"10.1016\/j.knosys.2021.107252","article-title":"Learning a maximized shared latent factor for cross-modal hashing","volume":"228","author":"Wang","year":"2021","journal-title":"Knowl. Based Syst."},{"issue":"11","key":"10.1016\/j.neucom.2026.132767_bib0095","doi-asserted-by":"crossref","first-page":"11780","DOI":"10.1109\/TCYB.2021.3081615","article-title":"Average approximate hashing-based double projections learning for cross-modal retrieval","volume":"52","author":"Fang","year":"2021","journal-title":"IEEE Trans. Cybern."},{"key":"10.1016\/j.neucom.2026.132767_bib0100","doi-asserted-by":"crossref","DOI":"10.1016\/j.image.2020.116131","article-title":"Multi-label semantics preserving based deep cross-modal hashing","volume":"93","author":"Zou","year":"2021","journal-title":"Signal Process. Image Commun."},{"key":"10.1016\/j.neucom.2026.132767_bib0105","doi-asserted-by":"crossref","DOI":"10.1016\/j.engappai.2024.108969","article-title":"Unsupervised cross-modal hashing retrieval via dynamic contrast and optimization","volume":"136","author":"Xie","year":"2024","journal-title":"Eng. Appl. Artif. Intell."},{"issue":"2","key":"10.1016\/j.neucom.2026.132767_bib0110","doi-asserted-by":"crossref","first-page":"560","DOI":"10.1109\/TKDE.2020.2987312","article-title":"Deep cross-modal hashing with hashing functions and unified hash codes jointly learning","volume":"34","author":"Tu","year":"2020","journal-title":"IEEE Trans. Knowl. Data Eng."},{"key":"10.1016\/j.neucom.2026.132767_bib0115","first-page":"6798","article-title":"Deep cross-modal proxy hashing","volume":"35","author":"Tu","year":"2022","journal-title":"IEEE Trans. Knowl. Data Eng."},{"key":"10.1016\/j.neucom.2026.132767_bib0120","series-title":"Proceedings of the IEEE International Conference on Computer Vision","first-page":"3027","article-title":"Deep joint-semantics reconstructing hashing for large-scale unsupervised cross-modal retrieval","author":"Su","year":"2019"},{"key":"10.1016\/j.neucom.2026.132767_bib0125","series-title":"Proceedings of the International ACM SIGIR Conference on Research and Development in Information Retrieval","first-page":"1379","article-title":"Joint-modal distribution-based similarity hashing for large-scale unsupervised deep cross-modal retrieval","author":"Liu","year":"2020"},{"key":"10.1016\/j.neucom.2026.132767_bib0130","series-title":"Proceedings of the International Conference on Multimedia Retrieval","first-page":"44","article-title":"Deep semantic-alignment hashing for unsupervised cross-modal retrieval","author":"Yang","year":"2020"},{"issue":"3","key":"10.1016\/j.neucom.2026.132767_bib0135","first-page":"3877","article-title":"Unsupervised contrastive cross-modal hashing","volume":"45","author":"Hu","year":"2022","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"10.1016\/j.neucom.2026.132767_bib0140","doi-asserted-by":"crossref","first-page":"563","DOI":"10.1007\/s11280-020-00859-y","article-title":"High-order nonlocal hashing for unsupervised cross-modal retrieval","volume":"24","author":"Zhang","year":"2021","journal-title":"World Wide Web"},{"key":"10.1016\/j.neucom.2026.132767_bib0145","author":"Yu"},{"key":"10.1016\/j.neucom.2026.132767_bib0150","doi-asserted-by":"crossref","DOI":"10.1016\/j.knosys.2022.109891","article-title":"Multiple instance relation graph reasoning for cross-modal hash retrieval","volume":"256","author":"Hou","year":"2022","journal-title":"Knowl. Based Syst."},{"key":"10.1016\/j.neucom.2026.132767_bib0155","doi-asserted-by":"crossref","DOI":"10.1016\/j.eswa.2023.121516","article-title":"Similarity graph-correlation reconstruction network for unsupervised cross-modal hashing","volume":"237","author":"Yao","year":"2024","journal-title":"Expert Syst. Appl."},{"key":"10.1016\/j.neucom.2026.132767_bib0160","doi-asserted-by":"crossref","first-page":"3476","DOI":"10.1109\/TMM.2025.3535378","article-title":"Ensemble prototype networks for unsupervised cross-modal hashing with cross-task consistency","volume":"27","author":"Liu","year":"2025","journal-title":"IEEE Trans. Multimed."},{"key":"10.1016\/j.neucom.2026.132767_bib0165","series-title":"Proceedings of the International Conference on Multimedia Retrieval","first-page":"499","article-title":"Learning from expert: vision-language knowledge distillation for unsupervised cross-modal hashing retrieval","author":"Sun","year":"2023"},{"key":"10.1016\/j.neucom.2026.132767_bib0170","series-title":"Proceedings of the International Joint Conference on Artificial Intelligence","first-page":"853","article-title":"Set and rebase: determining the semantic graph connectivity for unsupervised cross-modal hashing","author":"Wang","year":"2021"},{"key":"10.1016\/j.neucom.2026.132767_bib0175","doi-asserted-by":"crossref","first-page":"466","DOI":"10.1109\/TMM.2021.3053766","article-title":"Aggregation-based graph convolutional hashing for unsupervised cross-modal retrieval","volume":"24","author":"Zhang","year":"2021","journal-title":"IEEE Trans. Multimed."},{"key":"10.1016\/j.neucom.2026.132767_bib0180","first-page":"84","article-title":"Imagenet classification with deep convolutional neural networks","volume":"60","author":"Krizhevsky","year":"2012","journal-title":"Adv. Neural Inf. Process. Syst."},{"key":"10.1016\/j.neucom.2026.132767_bib0185","doi-asserted-by":"crossref","DOI":"10.1016\/j.patcog.2021.108084","article-title":"Deep robust multilevel semantic hashing for multi-label cross-modal retrieval","volume":"120","author":"Song","year":"2021","journal-title":"Pattern Recognit."},{"key":"10.1016\/j.neucom.2026.132767_bib0190","doi-asserted-by":"crossref","first-page":"152","DOI":"10.1016\/j.neucom.2021.06.087","article-title":"Clustering-driven deep adversarial hashing for scalable unsupervised cross-modal retrieval","volume":"459","author":"Shen","year":"2021","journal-title":"Neurocomputing"},{"key":"10.1016\/j.neucom.2026.132767_bib0195","doi-asserted-by":"crossref","DOI":"10.1016\/j.compeleceng.2021.107262","article-title":"Self-attention and adversary learning deep hashing network for cross-modal retrieval","volume":"93","author":"Chen","year":"2021","journal-title":"Comput. Electr. Eng."},{"key":"10.1016\/j.neucom.2026.132767_bib0200","doi-asserted-by":"crossref","first-page":"3023","DOI":"10.1007\/s13042-021-01395-5","article-title":"Discrete matrix factorization hashing for cross-modal retrieval","volume":"12","author":"Fang","year":"2021","journal-title":"Int. J. Mach. Learn. Cybern."},{"issue":"2","key":"10.1016\/j.neucom.2026.132767_bib0205","first-page":"1185","article-title":"Adaptive label correlation based asymmetric discrete hashing for cross-modal retrieval","volume":"35","author":"Li","year":"2021","journal-title":"IEEE Trans. Knowl. Data Eng."},{"key":"10.1016\/j.neucom.2026.132767_bib0210","doi-asserted-by":"crossref","DOI":"10.1016\/j.jvcir.2021.103256","article-title":"Robust multimodal discrete hashing for cross-modal similarity search","volume":"79","author":"Fang","year":"2021","journal-title":"J. Vis. Commun. Image Represent."},{"key":"10.1016\/j.neucom.2026.132767_bib0215","series-title":"Proceedings of the IEEE International Conference on Computer Vision","first-page":"5608","article-title":"Hashnet: deep learning to hash by continuation","author":"Cao","year":"2017"},{"key":"10.1016\/j.neucom.2026.132767_bib0220","series-title":"Proceedings of the ACM International Conference on Multimedia","first-page":"1398","article-title":"Deep binary reconstruction for cross-modal hashing","author":"Li","year":"2017"},{"issue":"6","key":"10.1016\/j.neucom.2026.132767_bib0225","doi-asserted-by":"crossref","DOI":"10.1016\/j.ipm.2020.102374","article-title":"Semantic-rebased cross-modal hashing for scalable unsupervised text-visual retrieval","volume":"57","author":"Wang","year":"2020","journal-title":"Inf. Process. Manag."},{"key":"10.1016\/j.neucom.2026.132767_bib0230","series-title":"Proceedings of the ACM International Conference on Multimedia","first-page":"3712","article-title":"Adaptive structural similarity preserving for unsupervised cross modal hashing","author":"Li","year":"2022"},{"key":"10.1016\/j.neucom.2026.132767_bib0235","series-title":"Proceedings of the ACM International Conference on Image and Video Retrieval","first-page":"1","article-title":"Nus-wide: a real-world web image database from national university of Singapore","author":"Chua","year":"2009"},{"key":"10.1016\/j.neucom.2026.132767_bib0240","series-title":"Proceedings of the ACM International Conference on Multimedia","first-page":"39","article-title":"The MIR flickr retrieval evaluation","author":"Huiskes","year":"2008"},{"key":"10.1016\/j.neucom.2026.132767_bib0245","series-title":"Proceedings of the International Joint Conference on Artificial Intelligence","first-page":"52854","article-title":"Unsupervised deep hashing via binary latent factor models for large-scale cross-modal retrieval","author":"Wu","year":"2018"},{"key":"10.1016\/j.neucom.2026.132767_bib0250","doi-asserted-by":"crossref","first-page":"453","DOI":"10.7717\/peerj-cs.552","article-title":"Hierarchical semantic interaction-based deep hashing network for cross-modal retrieval","volume":"7","author":"Chen","year":"2021","journal-title":"PeerJ Comput. Sci."},{"key":"10.1016\/j.neucom.2026.132767_bib0255","doi-asserted-by":"crossref","first-page":"5927","DOI":"10.1007\/s10489-020-02137-w","article-title":"Multi-attention based semantic deep hashing for cross-modal retrieval","volume":"51","author":"Zhu","year":"2021","journal-title":"Applied Intelligence"},{"key":"10.1016\/j.neucom.2026.132767_bib0260","series-title":"Proceedings of the ACM International Conference on Multimedia","first-page":"453","article-title":"Differentiable cross-modal hashing via multimodal transformers","author":"Tu","year":"2022"},{"issue":"2","key":"10.1016\/j.neucom.2026.132767_bib0265","doi-asserted-by":"crossref","first-page":"1603","DOI":"10.1007\/s40747-021-00615-3","article-title":"Semantic-guided autoencoder adversarial hashing for large-scale cross-modal retrieval","volume":"8","author":"Li","year":"2022","journal-title":"Complex Intell. Syst."}],"container-title":["Neurocomputing"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/api.elsevier.com\/content\/article\/PII:S0925231226001645?httpAccept=text\/xml","content-type":"text\/xml","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/api.elsevier.com\/content\/article\/PII:S0925231226001645?httpAccept=text\/plain","content-type":"text\/plain","content-version":"vor","intended-application":"text-mining"}],"deposited":{"date-parts":[[2026,2,25]],"date-time":"2026-02-25T15:42:52Z","timestamp":1772034172000},"score":1,"resource":{"primary":{"URL":"https:\/\/linkinghub.elsevier.com\/retrieve\/pii\/S0925231226001645"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2026,4]]},"references-count":53,"alternative-id":["S0925231226001645"],"URL":"https:\/\/doi.org\/10.1016\/j.neucom.2026.132767","relation":{},"ISSN":["0925-2312"],"issn-type":[{"value":"0925-2312","type":"print"}],"subject":[],"published":{"date-parts":[[2026,4]]},"assertion":[{"value":"Elsevier","name":"publisher","label":"This article is maintained by"},{"value":"Bi-directional similarity enhancement and adjustment hashing for unsupervised cross-modal retrieval","name":"articletitle","label":"Article Title"},{"value":"Neurocomputing","name":"journaltitle","label":"Journal Title"},{"value":"https:\/\/doi.org\/10.1016\/j.neucom.2026.132767","name":"articlelink","label":"CrossRef DOI link to publisher maintained version"},{"value":"article","name":"content_type","label":"Content Type"},{"value":"\u00a9 2026 Elsevier B.V. All rights are reserved, including those for text and data mining, AI training, and similar technologies.","name":"copyright","label":"Copyright"}],"article-number":"132767"}}