{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,28]],"date-time":"2026-03-28T12:51:47Z","timestamp":1774702307327,"version":"3.50.1"},"reference-count":41,"publisher":"Elsevier BV","license":[{"start":{"date-parts":[[2026,5,1]],"date-time":"2026-05-01T00:00:00Z","timestamp":1777593600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.elsevier.com\/tdm\/userlicense\/1.0\/"},{"start":{"date-parts":[[2026,5,1]],"date-time":"2026-05-01T00:00:00Z","timestamp":1777593600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.elsevier.com\/legal\/tdmrep-license"},{"start":{"date-parts":[[2026,5,1]],"date-time":"2026-05-01T00:00:00Z","timestamp":1777593600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-017"},{"start":{"date-parts":[[2026,5,1]],"date-time":"2026-05-01T00:00:00Z","timestamp":1777593600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"},{"start":{"date-parts":[[2026,5,1]],"date-time":"2026-05-01T00:00:00Z","timestamp":1777593600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-012"},{"start":{"date-parts":[[2026,5,1]],"date-time":"2026-05-01T00:00:00Z","timestamp":1777593600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2026,5,1]],"date-time":"2026-05-01T00:00:00Z","timestamp":1777593600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-004"}],"content-domain":{"domain":["elsevier.com","sciencedirect.com"],"crossmark-restriction":true},"short-container-title":["Knowledge-Based Systems"],"published-print":{"date-parts":[[2026,5]]},"DOI":"10.1016\/j.knosys.2026.115672","type":"journal-article","created":{"date-parts":[[2026,3,3]],"date-time":"2026-03-03T17:02:49Z","timestamp":1772557369000},"page":"115672","update-policy":"https:\/\/doi.org\/10.1016\/elsevier_cm_policy","source":"Crossref","is-referenced-by-count":0,"special_numbering":"C","title":["Toward bias-resilient radiology report generation: Hierarchical contrastive learning and adaptive knowledge graph integration"],"prefix":"10.1016","volume":"340","author":[{"ORCID":"https:\/\/orcid.org\/0009-0005-2281-0914","authenticated-orcid":false,"given":"Bo","family":"Wang","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0009-0009-7919-076X","authenticated-orcid":false,"given":"Deming","family":"Guo","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0009-0003-2410-6651","authenticated-orcid":false,"given":"Feiyang","family":"Yang","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0009-0005-5642-3170","authenticated-orcid":false,"given":"Hongda","family":"Zhang","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-8283-5962","authenticated-orcid":false,"given":"Peihong","family":"Teng","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0001-8316-6927","authenticated-orcid":false,"given":"Adriano Jose","family":"Tavares","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0001-8474-0767","authenticated-orcid":false,"given":"Hao","family":"Xu","sequence":"additional","affiliation":[]}],"member":"78","reference":[{"key":"10.1016\/j.knosys.2026.115672_bib0001","doi-asserted-by":"crossref","DOI":"10.1016\/j.inffus.2023.101817","article-title":"Vision-knowledge fusion model for multi-domain medical report generation","volume":"97","author":"Xu","year":"2023","journal-title":"Information Fusion"},{"key":"10.1016\/j.knosys.2026.115672_bib0002","doi-asserted-by":"crossref","DOI":"10.1016\/j.artmed.2020.101878","article-title":"Deep learning in generating radiology reports: a survey","volume":"106","author":"Monshi","year":"2020","journal-title":"Artif. Intell. Med."},{"issue":"8","key":"10.1016\/j.knosys.2026.115672_bib0003","doi-asserted-by":"crossref","first-page":"634","DOI":"10.1016\/j.jacr.2010.03.016","article-title":"Quality of the written radiology report: a review of the literature","volume":"7","author":"Pool","year":"2010","journal-title":"J. Amer. Coll. Radiol."},{"key":"10.1016\/j.knosys.2026.115672_bib0004","series-title":"International Workshop on Machine Learning in Medical Imaging","first-page":"654","article-title":"Clinically correct report generation from chest x-rays using templates","author":"Pino","year":"2021"},{"key":"10.1016\/j.knosys.2026.115672_bib0005","series-title":"Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition","first-page":"19809","article-title":"Kiut: knowledge-injected u-transformer for radiology report generation","author":"Huang","year":"2023"},{"key":"10.1016\/j.knosys.2026.115672_bib0006","series-title":"Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition","first-page":"3334","article-title":"Dynamic graph enhanced contrastive learning for chest x-ray report generation","author":"Li","year":"2023"},{"key":"10.1016\/j.knosys.2026.115672_bib0007","doi-asserted-by":"crossref","unstructured":"F. Liu, C. Yin, X. Wu, S. Ge, Y. Zou, P. Zhang, X. Sun, Contrastive attention for automatic chest x-ray report generation, (2021). arXiv: 2106.06965.","DOI":"10.18653\/v1\/2021.findings-acl.23"},{"key":"10.1016\/j.knosys.2026.115672_bib0008","doi-asserted-by":"crossref","DOI":"10.1016\/j.eswa.2025.126622","article-title":"HKRG: hierarchical knowledge integration for radiology report generation","volume":"271","author":"Wang","year":"2025","journal-title":"Expert Syst. Appl."},{"key":"10.1016\/j.knosys.2026.115672_bib0009","series-title":"Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition","first-page":"13753","article-title":"Exploring and distilling posterior and prior knowledge for radiology report generation","author":"Liu","year":"2021"},{"key":"10.1016\/j.knosys.2026.115672_bib0010","series-title":"Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)","article-title":"On the automatic generation of medical imaging reports","author":"Jing","year":"2018"},{"key":"10.1016\/j.knosys.2026.115672_bib0011","series-title":"Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)","first-page":"1439","article-title":"Generating radiology reports via memory-driven transformer","author":"Chen","year":"2020"},{"key":"10.1016\/j.knosys.2026.115672_bib0012","series-title":"Proceedings of the AAAI Conference on Artificial Intelligence","first-page":"12910","article-title":"When radiology report generation meets knowledge graph","volume":"34","author":"Zhang","year":"2020"},{"key":"10.1016\/j.knosys.2026.115672_bib0013","series-title":"Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers)","article-title":"Competence-based multimodal curriculum learning for medical report generation","author":"Liu","year":"2021"},{"key":"10.1016\/j.knosys.2026.115672_bib0014","unstructured":"A. Dosovitskiy, An image is worth 16x16 words: transformers for image recognition at scale, (2020). arXiv: 2010.11929."},{"issue":"1","key":"10.1016\/j.knosys.2026.115672_bib0015","doi-asserted-by":"crossref","first-page":"4542","DOI":"10.1038\/s41467-023-40260-7","article-title":"Knowledge-enhanced visual-language pre-training on chest radiology images","volume":"14","author":"Zhang","year":"2023","journal-title":"Nat. Commun."},{"issue":"5","key":"10.1016\/j.knosys.2026.115672_bib0016","doi-asserted-by":"crossref","first-page":"829","DOI":"10.1162\/neco_a_01273","article-title":"A survey on deep learning for multimodal data fusion","volume":"32","author":"Gao","year":"2020","journal-title":"Neural. Comput."},{"key":"10.1016\/j.knosys.2026.115672_bib0017","series-title":"Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)","first-page":"2577","article-title":"On the automatic generation of medical imaging reports","author":"Jing","year":"2018"},{"key":"10.1016\/j.knosys.2026.115672_bib0018","unstructured":"H. Tan, M. Bansal, Lxmert: learning cross-modality encoder representations from transformers, (2019). arXiv: 1908.07490."},{"key":"10.1016\/j.knosys.2026.115672_bib0019","unstructured":"Z. Chen, Y. Shen, Y. Song, X. Wan, Cross-modal memory networks for radiology report generation, (2022). arXiv: 2204.13258."},{"issue":"4","key":"10.1016\/j.knosys.2026.115672_bib0020","article-title":"RadBERT: adapting transformer-based language models to radiology","volume":"4","author":"Yan","year":"2022","journal-title":"Radiol."},{"key":"10.1016\/j.knosys.2026.115672_bib0021","doi-asserted-by":"crossref","DOI":"10.1016\/j.inffus.2025.103448","article-title":"KAN-Infused social recommendation: a contrastive graph learning approach with bidirectional feature fusion","volume":"125","author":"Liu","year":"2026","journal-title":"Information Fusion"},{"key":"10.1016\/j.knosys.2026.115672_bib0022","doi-asserted-by":"crossref","unstructured":"A. Yan, Z. He, X. Lu, J. Du, E. Chang, A. Gentili, J. McAuley, C.-N. Hsu, Weakly supervised contrastive learning for chest x-ray report generation, (2021). arXiv: 2109.12242.","DOI":"10.18653\/v1\/2021.findings-emnlp.336"},{"key":"10.1016\/j.knosys.2026.115672_bib0023","series-title":"2009 IEEE Conference on Computer Vision and Pattern Recognition","first-page":"248","article-title":"Imagenet: a large-scale hierarchical image database","author":"Deng","year":"2009"},{"key":"10.1016\/j.knosys.2026.115672_bib0024","first-page":"13266","article-title":"Representing long-range context for graph neural networks with global attention","volume":"34","author":"Wu","year":"2021","journal-title":"Adv. Neural Inf. Process Syst."},{"key":"10.1016\/j.knosys.2026.115672_bib0025","unstructured":"M. Seo, A. Kembhavi, A. Farhadi, H. Hajishirzi, Bidirectional attention flow for machine comprehension, (2016). arXiv: 1611.01603."},{"key":"10.1016\/j.knosys.2026.115672_bib0026","unstructured":"S. Jain, A. Agrawal, A. Saporta, S.Q.H. Truong, D.N. Duong, T. Bui, P. Chambon, Y. Zhang, M.P. Lungren, A.Y. Ng, et al., Radgraph: extracting clinical entities and relations from radiology reports, (2021). arXiv: 2106.14463."},{"key":"10.1016\/j.knosys.2026.115672_bib0027","first-page":"94097","article-title":"Perceiving longer sequences with bi-directional cross-attention transformers","volume":"37","author":"Hiller","year":"2025","journal-title":"Adv. Neural Inf. Process Syst."},{"key":"10.1016\/j.knosys.2026.115672_bib0028","article-title":"Attention is all you need","volume":"30","author":"Vaswani","year":"2017","journal-title":"Adv. Neural. Inf. Process Syst."},{"issue":"2","key":"10.1016\/j.knosys.2026.115672_bib0029","doi-asserted-by":"crossref","first-page":"304","DOI":"10.1093\/jamia\/ocv080","article-title":"Preparing a collection of radiology examinations for distribution and retrieval","volume":"23","author":"Demner-Fushman","year":"2016","journal-title":"J. Am. Med. Inform. Assoc."},{"key":"10.1016\/j.knosys.2026.115672_bib0030","unstructured":"A.E.W. Johnson, T.J. Pollard, N.R. Greenbaum, M.P. Lungren, C.-y. Deng, Y. Peng, Z. Lu, R.G. Mark, S.J. Berkowitz, S. Horng, MIMIC-CXR-JPG, a large publicly available database of labeled chest radiographs, (2019). arXiv: 1901.07042."},{"key":"10.1016\/j.knosys.2026.115672_bib0031","series-title":"Proceedings of the AAAI Conference on Artificial Intelligence","first-page":"590","article-title":"Chexpert: a large chest radiograph dataset with uncertainty labels and expert comparison","volume":"33","author":"Irvin","year":"2019"},{"key":"10.1016\/j.knosys.2026.115672_bib0032","unstructured":"Z. Chen, Y. Shen, Y. Song, X. Wan, Cross-modal memory networks for radiology report generation, (2022). arXiv: 2204.13258."},{"key":"10.1016\/j.knosys.2026.115672_bib0033","series-title":"International Conference on Machine Learning","first-page":"12888","article-title":"Blip: bootstrapping language-image pre-training for unified vision-language understanding and generation","author":"Li","year":"2022"},{"key":"10.1016\/j.knosys.2026.115672_bib0034","series-title":"Proceedings of the AAAI Conference on Artificial Intelligence","first-page":"2982","article-title":"Clinical-bert: vision-language pre-training for radiograph diagnosis and reports generation","volume":"36","author":"Yan","year":"2022"},{"key":"10.1016\/j.knosys.2026.115672_bib0035","series-title":"Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition","first-page":"11558","article-title":"Metransformer: radiology report generation by transformer with multiple learnable expert tokens","author":"Wang","year":"2023"},{"key":"10.1016\/j.knosys.2026.115672_bib0036","series-title":"Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition","first-page":"14194","article-title":"Instance-level expert knowledge and aggregate discriminative attention for radiology report generation","author":"Bu","year":"2024"},{"key":"10.1016\/j.knosys.2026.115672_bib0037","doi-asserted-by":"crossref","first-page":"904","DOI":"10.1109\/TMM.2023.3273390","article-title":"Semi-supervised medical report generation via graph-guided hybrid feature consistency","volume":"26","author":"Zhang","year":"2023","journal-title":"IEEE Trans Multimedia"},{"key":"10.1016\/j.knosys.2026.115672_bib0038","series-title":"International Conference on Medical Image Computing and Computer-Assisted Intervention","first-page":"353","article-title":"ITAdaptor: image-tag","author":"Ding","year":"2025"},{"key":"10.1016\/j.knosys.2026.115672_bib0039","doi-asserted-by":"crossref","DOI":"10.1016\/j.media.2024.103413","article-title":"Dual-modality visual feature flow for medical report generation","volume":"101","author":"Tang","year":"2025","journal-title":"Med. Image. Anal."},{"key":"10.1016\/j.knosys.2026.115672_bib0040","first-page":"18661","article-title":"Supervised contrastive learning","volume":"33","author":"Khosla","year":"2020","journal-title":"Adv. Neural. Inf. Process Syst."},{"key":"10.1016\/j.knosys.2026.115672_bib0041","series-title":"Proceedings of the IEEE\/CVF International Conference on Computer Vision","first-page":"10012","article-title":"Swin transformer: hierarchical vision transformer using shifted windows","author":"Liu","year":"2021"}],"container-title":["Knowledge-Based Systems"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/api.elsevier.com\/content\/article\/PII:S0950705126004120?httpAccept=text\/xml","content-type":"text\/xml","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/api.elsevier.com\/content\/article\/PII:S0950705126004120?httpAccept=text\/plain","content-type":"text\/plain","content-version":"vor","intended-application":"text-mining"}],"deposited":{"date-parts":[[2026,3,28]],"date-time":"2026-03-28T12:14:25Z","timestamp":1774700065000},"score":1,"resource":{"primary":{"URL":"https:\/\/linkinghub.elsevier.com\/retrieve\/pii\/S0950705126004120"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2026,5]]},"references-count":41,"alternative-id":["S0950705126004120"],"URL":"https:\/\/doi.org\/10.1016\/j.knosys.2026.115672","relation":{},"ISSN":["0950-7051"],"issn-type":[{"value":"0950-7051","type":"print"}],"subject":[],"published":{"date-parts":[[2026,5]]},"assertion":[{"value":"Elsevier","name":"publisher","label":"This article is maintained by"},{"value":"Toward bias-resilient radiology report generation: Hierarchical contrastive learning and adaptive knowledge graph integration","name":"articletitle","label":"Article Title"},{"value":"Knowledge-Based Systems","name":"journaltitle","label":"Journal Title"},{"value":"https:\/\/doi.org\/10.1016\/j.knosys.2026.115672","name":"articlelink","label":"CrossRef DOI link to publisher maintained version"},{"value":"article","name":"content_type","label":"Content Type"},{"value":"\u00a9 2026 Elsevier B.V. All rights are reserved, including those for text and data mining, AI training, and similar technologies.","name":"copyright","label":"Copyright"}],"article-number":"115672"}}