{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,17]],"date-time":"2026-01-17T17:44:23Z","timestamp":1768671863918,"version":"3.49.0"},"reference-count":59,"publisher":"Elsevier BV","license":[{"start":{"date-parts":[[2025,5,1]],"date-time":"2025-05-01T00:00:00Z","timestamp":1746057600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.elsevier.com\/tdm\/userlicense\/1.0\/"},{"start":{"date-parts":[[2025,5,1]],"date-time":"2025-05-01T00:00:00Z","timestamp":1746057600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.elsevier.com\/legal\/tdmrep-license"},{"start":{"date-parts":[[2025,5,1]],"date-time":"2025-05-01T00:00:00Z","timestamp":1746057600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-017"},{"start":{"date-parts":[[2025,5,1]],"date-time":"2025-05-01T00:00:00Z","timestamp":1746057600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"},{"start":{"date-parts":[[2025,5,1]],"date-time":"2025-05-01T00:00:00Z","timestamp":1746057600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-012"},{"start":{"date-parts":[[2025,5,1]],"date-time":"2025-05-01T00:00:00Z","timestamp":1746057600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,5,1]],"date-time":"2025-05-01T00:00:00Z","timestamp":1746057600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-004"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["2022KY0799"],"award-info":[{"award-number":["2022KY0799"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["2024KY1715"],"award-info":[{"award-number":["2024KY1715"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["2024KY0817"],"award-info":[{"award-number":["2024KY0817"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["61966004"],"award-info":[{"award-number":["61966004"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62276073"],"award-info":[{"award-number":["62276073"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["elsevier.com","sciencedirect.com"],"crossmark-restriction":true},"short-container-title":["International Journal of Approximate Reasoning"],"published-print":{"date-parts":[[2025,5]]},"DOI":"10.1016\/j.ijar.2025.109383","type":"journal-article","created":{"date-parts":[[2025,2,10]],"date-time":"2025-02-10T19:14:47Z","timestamp":1739214887000},"page":"109383","update-policy":"https:\/\/doi.org\/10.1016\/elsevier_cm_policy","source":"Crossref","is-referenced-by-count":2,"special_numbering":"C","title":["Efficient parameter-free adaptive hashing for large-scale cross-modal retrieval"],"prefix":"10.1016","volume":"180","author":[{"given":"Bo","family":"Li","sequence":"first","affiliation":[]},{"given":"You","family":"Wu","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-5313-6134","authenticated-orcid":false,"given":"Zhixin","family":"Li","sequence":"additional","affiliation":[]}],"member":"78","reference":[{"key":"10.1016\/j.ijar.2025.109383_br0010","doi-asserted-by":"crossref","first-page":"188","DOI":"10.1016\/j.ijar.2022.08.013","article-title":"Deep evidential fusion network for medical image classification","volume":"150","author":"Xu","year":"2022","journal-title":"Int. J. Approx. Reason."},{"key":"10.1016\/j.ijar.2025.109383_br0020","series-title":"Proceedings of the 30th ACM International Conference on Multimedia","first-page":"395","article-title":"Image-text matching with fine-grained relational dependency and bidirectional attention-based generative networks","author":"Zhu","year":"2022"},{"key":"10.1016\/j.ijar.2025.109383_br0030","doi-asserted-by":"crossref","first-page":"93","DOI":"10.1016\/j.ijar.2020.12.016","article-title":"A semi-supervised deep learning image caption model based on pseudo label and n-gram","volume":"131","author":"Cheng","year":"2021","journal-title":"Int. J. Approx. Reason."},{"key":"10.1016\/j.ijar.2025.109383_br0040","doi-asserted-by":"crossref","first-page":"10","DOI":"10.1016\/j.ijar.2018.06.003","article-title":"Bsmooth: learning from user feedback to disambiguate query terms in interactive data retrieval","volume":"101","author":"Gon\u00e7alves","year":"2018","journal-title":"Int. J. Approx. Reason."},{"key":"10.1016\/j.ijar.2025.109383_br0050","doi-asserted-by":"crossref","first-page":"366","DOI":"10.1016\/j.neucom.2022.09.037","article-title":"Discrete asymmetric zero-shot hashing with application to cross-modal retrieval","volume":"511","author":"Shu","year":"2022","journal-title":"Neurocomputing"},{"key":"10.1016\/j.ijar.2025.109383_br0060","doi-asserted-by":"crossref","first-page":"276","DOI":"10.1016\/j.neunet.2023.12.018","article-title":"Large-scale cross-modal hashing with unified learning and multi-object regional correlation reasoning","volume":"171","author":"Li","year":"2024","journal-title":"Neural Netw."},{"key":"10.1016\/j.ijar.2025.109383_br0070","doi-asserted-by":"crossref","DOI":"10.1016\/j.knosys.2021.106851","article-title":"Task-adaptive asymmetric deep cross-modal hashing","volume":"219","author":"Li","year":"2021","journal-title":"Knowl.-Based Syst."},{"key":"10.1016\/j.ijar.2025.109383_br0080","series-title":"Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition","first-page":"3142","article-title":"Cross-modal center loss for 3d cross-modal retrieval","author":"Jing","year":"2021"},{"key":"10.1016\/j.ijar.2025.109383_br0090","doi-asserted-by":"crossref","first-page":"298","DOI":"10.1016\/j.ins.2020.08.009","article-title":"Drsl: deep relational similarity learning for cross-modal retrieval","volume":"546","author":"Wang","year":"2021","journal-title":"Inf. Sci."},{"key":"10.1016\/j.ijar.2025.109383_br0100","doi-asserted-by":"crossref","DOI":"10.1016\/j.patcog.2022.109211","article-title":"Semi-supervised cross-modal hashing via modality-specific and cross-modal graph convolutional networks","volume":"136","author":"Wu","year":"2023","journal-title":"Pattern Recognit."},{"key":"10.1016\/j.ijar.2025.109383_br0110","series-title":"Proceedings of the 2024 International Conference on Multimedia Retrieval","first-page":"704","article-title":"Team HUGE: image-text matching via hierarchical and unified graph enhancing","author":"Li","year":"2024"},{"key":"10.1016\/j.ijar.2025.109383_br0120","series-title":"Proceedings of the IEEE International Conference on Computer Vision","first-page":"4097","article-title":"Deep binaries: encoding semantic-rich cues for efficient textual-visual cross retrieval","author":"Shen","year":"2017"},{"key":"10.1016\/j.ijar.2025.109383_br0130","series-title":"Proceedings of 2021 IEEE International Conference on Acoustics, Speech and Signal Processing","first-page":"4330","article-title":"Scalable discriminative discrete hashing for large-scale cross-modal retrieval","author":"Qin","year":"2021"},{"issue":"2","key":"10.1016\/j.ijar.2025.109383_br0140","doi-asserted-by":"crossref","first-page":"1","DOI":"10.1145\/3383184","article-title":"Dual-path convolutional image-text embedding with instance loss","volume":"16","author":"Zheng","year":"2020","journal-title":"ACM Trans. Multimed. Comput. Commun. Appl."},{"key":"10.1016\/j.ijar.2025.109383_br0150","doi-asserted-by":"crossref","DOI":"10.1016\/j.eswa.2023.121516","article-title":"Similarity graph-correlation reconstruction network for unsupervised cross-modal hashing","volume":"237","author":"Yao","year":"2024","journal-title":"Expert Syst. Appl."},{"key":"10.1016\/j.ijar.2025.109383_br0160","series-title":"Proceedings of the 18th ACM International Conference on Multimedia","first-page":"251","article-title":"A new approach to cross-modal multimedia retrieval","author":"Rasiwasia","year":"2010"},{"key":"10.1016\/j.ijar.2025.109383_br0170","series-title":"Proceedings of the 37th International ACM SIGIR Conference on Research & Development in Information Retrieval","first-page":"415","article-title":"Latent semantic sparse hashing for cross-modal similarity search","author":"Zhou","year":"2014"},{"key":"10.1016\/j.ijar.2025.109383_br0180","series-title":"Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition","first-page":"2075","article-title":"Collective matrix factorization hashing for multimodal data","author":"Ding","year":"2014"},{"key":"10.1016\/j.ijar.2025.109383_br0190","series-title":"Proceedings of International Joint Conference on Artificial Intelligence","first-page":"3890","article-title":"Semantic topic multimodal hashing for cross-media retrieval","author":"Wang","year":"2015"},{"issue":"5","key":"10.1016\/j.ijar.2025.109383_br0200","doi-asserted-by":"crossref","first-page":"2494","DOI":"10.1109\/TIP.2017.2676345","article-title":"Learning discriminative binary codes for large-scale cross-modal retrieval","volume":"26","author":"Xu","year":"2017","journal-title":"IEEE Trans. Image Process."},{"key":"10.1016\/j.ijar.2025.109383_br0210","series-title":"Proceedings of the 22nd International Joint Conference on Artificial Intelligence","first-page":"1360","article-title":"Learning hash functions for cross-view similarity search","author":"Kumar","year":"2011"},{"key":"10.1016\/j.ijar.2025.109383_br0220","series-title":"Proceedings of the 2013 ACM SIGMOD International Conference on Management of Data","first-page":"785","article-title":"Inter-media hashing for large-scale retrieval from heterogeneous data sources","author":"Song","year":"2013"},{"key":"10.1016\/j.ijar.2025.109383_br0230","doi-asserted-by":"crossref","DOI":"10.1016\/j.neucom.2024.128844","article-title":"Revising similarity relationship hashing for unsupervised cross-modal retrieval","volume":"614","author":"Wu","year":"2025","journal-title":"Neurocomputing"},{"issue":"3","key":"10.1016\/j.ijar.2025.109383_br0240","first-page":"3877","article-title":"Unsupervised contrastive cross-modal hashing","volume":"45","author":"Hu","year":"2022","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"10.1016\/j.ijar.2025.109383_br0250","doi-asserted-by":"crossref","DOI":"10.1016\/j.displa.2023.102489","article-title":"Rich: a rapid method for image-text cross-modal hash retrieval","volume":"79","author":"Li","year":"2023","journal-title":"Displays"},{"key":"10.1016\/j.ijar.2025.109383_br0260","series-title":"Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition","first-page":"3232","article-title":"Deep cross-modal hashing","author":"Jiang","year":"2017"},{"key":"10.1016\/j.ijar.2025.109383_br0270","series-title":"Proceedings of the IEEE International Conference on Computer Vision","first-page":"4077","article-title":"Cross-modal deep variational hashing","author":"Liong","year":"2017"},{"key":"10.1016\/j.ijar.2025.109383_br0280","series-title":"Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition","first-page":"4242","article-title":"Self-supervised adversarial hashing networks for cross-modal retrieval","author":"Li","year":"2018"},{"key":"10.1016\/j.ijar.2025.109383_br0290","series-title":"Proceedings of the IEEE\/CVF International Conference on Computer Vision","first-page":"3027","article-title":"Deep joint-semantics reconstructing hashing for large-scale unsupervised cross-modal retrieval","author":"Su","year":"2019"},{"key":"10.1016\/j.ijar.2025.109383_br0300","series-title":"Proceedings of the 43rd International ACM SIGIR Conference on Research and Development in Information Retrieval","first-page":"1379","article-title":"Joint-modal distribution-based similarity hashing for large-scale unsupervised deep cross-modal retrieval","author":"Liu","year":"2020"},{"key":"10.1016\/j.ijar.2025.109383_br0310","series-title":"Proceedings of the AAAI Conference on Artificial Intelligence","first-page":"4626","article-title":"Deep graph-neighbor coherence preserving network for unsupervised cross-modal hashing","author":"Yu","year":"2021"},{"issue":"3","key":"10.1016\/j.ijar.2025.109383_br0320","doi-asserted-by":"crossref","first-page":"964","DOI":"10.1109\/TPAMI.2019.2940446","article-title":"Mtfh: a matrix tri-factorization hashing framework for efficient cross-modal retrieval","volume":"43","author":"Liu","year":"2021","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"issue":"2","key":"10.1016\/j.ijar.2025.109383_br0330","doi-asserted-by":"crossref","first-page":"560","DOI":"10.1109\/TKDE.2020.2987312","article-title":"Deep cross-modal hashing with hashing functions and unified hash codes jointly learning","volume":"34","author":"Tu","year":"2020","journal-title":"IEEE Trans. Knowl. Data Eng."},{"issue":"7","key":"10.1016\/j.ijar.2025.109383_br0340","first-page":"6798","article-title":"Deep cross-modal proxy hashing","volume":"35","author":"Tu","year":"2023","journal-title":"IEEE Trans. Knowl. Data Eng."},{"key":"10.1016\/j.ijar.2025.109383_br0350","series-title":"Proceedings of the 46th International ACM SIGIR Conference on Research and Development in Information Retrieval","first-page":"686","article-title":"Data-aware proxy hashing for cross-modal retrieval","author":"Tu","year":"2023"},{"key":"10.1016\/j.ijar.2025.109383_br0360","doi-asserted-by":"crossref","first-page":"824","DOI":"10.1109\/TMM.2023.3272169","article-title":"Hierarchical consensus hashing for cross-modal retrieval","volume":"26","author":"Sun","year":"2023","journal-title":"IEEE Trans. Multimed."},{"key":"10.1016\/j.ijar.2025.109383_br0370","series-title":"Proceedings of the 32nd ACM International Conference on Multimedia","first-page":"5623","article-title":"Distribution consistency guided hashing for cross-modal retrieval","author":"Sun","year":"2024"},{"key":"10.1016\/j.ijar.2025.109383_br0380","series-title":"Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition","first-page":"10638","article-title":"Fine-grained video-text retrieval with hierarchical graph reasoning","author":"Chen","year":"2020"},{"key":"10.1016\/j.ijar.2025.109383_br0390","doi-asserted-by":"crossref","first-page":"9962","DOI":"10.1109\/TMM.2024.3402613","article-title":"Mac: masked contrastive pre-training for efficient video-text retrieval","volume":"26","author":"Shu","year":"2024","journal-title":"IEEE Trans. Multimed."},{"key":"10.1016\/j.ijar.2025.109383_br0400","series-title":"European Conference on Computer Vision","first-page":"313","article-title":"Kdpror: a knowledge-decoupling probabilistic framework for video-text retrieval","author":"Zhuang","year":"2025"},{"key":"10.1016\/j.ijar.2025.109383_br0410","series-title":"Proceedings of IEEE International Conference on Acoustics, Speech and Signal Processing","first-page":"4793","article-title":"Audio-text retrieval in context","author":"Lou","year":"2022"},{"key":"10.1016\/j.ijar.2025.109383_br0420","series-title":"Proceedings of IEEE International Conference on Acoustics, Speech and Signal Processing","first-page":"1","article-title":"Improving text-audio retrieval by text-aware attention pooling and prior matrix revised loss","author":"Xin","year":"2023"},{"issue":"6","key":"10.1016\/j.ijar.2025.109383_br0430","doi-asserted-by":"crossref","first-page":"1406","DOI":"10.1109\/TCSVT.2017.2667710","article-title":"Large-scale video retrieval using image queries","volume":"28","author":"Araujo","year":"2017","journal-title":"IEEE Trans. Circuits Syst. Video Technol."},{"issue":"3s","key":"10.1016\/j.ijar.2025.109383_br0440","doi-asserted-by":"crossref","first-page":"1","DOI":"10.1145\/3575658","article-title":"Variational autoencoder with cca for audio\u2013visual cross-modal retrieval","volume":"19","author":"Zhang","year":"2023","journal-title":"ACM Trans. Multimed. Comput. Commun. Appl."},{"key":"10.1016\/j.ijar.2025.109383_br0450","series-title":"2022 IEEE International Conference on Multimedia and Expo","first-page":"1","article-title":"Listen and look: multi-modal aggregation and co-attention network for video-audio retrieval","author":"Hao","year":"2022"},{"key":"10.1016\/j.ijar.2025.109383_br0460","series-title":"Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition","first-page":"11610","article-title":"Rono: robust discriminative learning with noisy labels for 2d-3d cross-modal retrieval","author":"Feng","year":"2023"},{"key":"10.1016\/j.ijar.2025.109383_br0470","author":"Kiros"},{"key":"10.1016\/j.ijar.2025.109383_br0480","series-title":"Proceedings of the British Machine Vision Conference","first-page":"1","article-title":"Vse++: improving visual-semantic embeddings with hard negatives","author":"Faghri","year":"2018"},{"key":"10.1016\/j.ijar.2025.109383_br0490","doi-asserted-by":"crossref","first-page":"1270","DOI":"10.1109\/LSP.2020.3008335","article-title":"Efficient parameter-free adaptive multi-modal hashing","volume":"27","author":"Zheng","year":"2020","journal-title":"IEEE Signal Process. Lett."},{"key":"10.1016\/j.ijar.2025.109383_br0500","series-title":"Proceedings of the International Conference on Machine Learning","first-page":"2250","article-title":"Parameter-free, dynamic, and strongly-adaptive online learning","author":"Cutkosky","year":"2020"},{"issue":"6","key":"10.1016\/j.ijar.2025.109383_br0510","doi-asserted-by":"crossref","first-page":"84","DOI":"10.1145\/3065386","article-title":"Imagenet classification with deep convolutional neural networks","volume":"60","author":"Krizhevsky","year":"2017","journal-title":"Commun. ACM"},{"key":"10.1016\/j.ijar.2025.109383_br0520","series-title":"Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition","first-page":"37","article-title":"Supervised discrete hashing","author":"Shen","year":"2015"},{"key":"10.1016\/j.ijar.2025.109383_br0530","series-title":"Proceedings of the ACM International Conference on Image and Video Retrieval","first-page":"1","article-title":"Nus-wide: a real-world web image database from national university of Singapore","author":"Chua","year":"2009"},{"issue":"4","key":"10.1016\/j.ijar.2025.109383_br0540","doi-asserted-by":"crossref","first-page":"419","DOI":"10.1016\/j.cviu.2009.03.008","article-title":"The segmented and annotated iapr tc-12 benchmark","volume":"114","author":"Escalante","year":"2010","journal-title":"Comput. Vis. Image Underst."},{"key":"10.1016\/j.ijar.2025.109383_br0550","series-title":"Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition","first-page":"5288","article-title":"Msr-vtt: a large video description dataset for bridging video and language","author":"Xu","year":"2016"},{"key":"10.1016\/j.ijar.2025.109383_br0560","series-title":"Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition","first-page":"770","article-title":"Deep residual learning for image recognition","author":"He","year":"2016"},{"key":"10.1016\/j.ijar.2025.109383_br0570","series-title":"Proceedings of the Conference on Empirical Methods in Natural Language Processing","first-page":"1532","article-title":"Glove: global vectors for word representation","author":"Pennington","year":"2014"},{"key":"10.1016\/j.ijar.2025.109383_br0580","author":"Kingma"},{"issue":"7","key":"10.1016\/j.ijar.2025.109383_br0590","first-page":"2121","article-title":"Adaptive subgradient methods for online learning and stochastic optimization","volume":"12","author":"Duchi","year":"2011","journal-title":"J. Mach. Learn. Res."}],"container-title":["International Journal of Approximate Reasoning"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/api.elsevier.com\/content\/article\/PII:S0888613X25000246?httpAccept=text\/xml","content-type":"text\/xml","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/api.elsevier.com\/content\/article\/PII:S0888613X25000246?httpAccept=text\/plain","content-type":"text\/plain","content-version":"vor","intended-application":"text-mining"}],"deposited":{"date-parts":[[2025,10,4]],"date-time":"2025-10-04T20:45:36Z","timestamp":1759610736000},"score":1,"resource":{"primary":{"URL":"https:\/\/linkinghub.elsevier.com\/retrieve\/pii\/S0888613X25000246"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,5]]},"references-count":59,"alternative-id":["S0888613X25000246"],"URL":"https:\/\/doi.org\/10.1016\/j.ijar.2025.109383","relation":{},"ISSN":["0888-613X"],"issn-type":[{"value":"0888-613X","type":"print"}],"subject":[],"published":{"date-parts":[[2025,5]]},"assertion":[{"value":"Elsevier","name":"publisher","label":"This article is maintained by"},{"value":"Efficient parameter-free adaptive hashing for large-scale cross-modal retrieval","name":"articletitle","label":"Article Title"},{"value":"International Journal of Approximate Reasoning","name":"journaltitle","label":"Journal Title"},{"value":"https:\/\/doi.org\/10.1016\/j.ijar.2025.109383","name":"articlelink","label":"CrossRef DOI link to publisher maintained version"},{"value":"article","name":"content_type","label":"Content Type"},{"value":"\u00a9 2025 Elsevier Inc. All rights are reserved, including those for text and data mining, AI training, and similar technologies.","name":"copyright","label":"Copyright"}],"article-number":"109383"}}