{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,1]],"date-time":"2026-01-01T09:49:44Z","timestamp":1767260984400,"version":"build-2065373602"},"reference-count":35,"publisher":"Springer Science and Business Media LLC","issue":"5","license":[{"start":{"date-parts":[[2025,1,31]],"date-time":"2025-01-31T00:00:00Z","timestamp":1738281600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2025,1,31]],"date-time":"2025-01-31T00:00:00Z","timestamp":1738281600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["No. 81860318"],"award-info":[{"award-number":["No. 81860318"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100007301","name":"Kunming University of Science and Technology","doi-asserted-by":"publisher","award":["No. KKZ3202203020"],"award-info":[{"award-number":["No. KKZ3202203020"]}],"id":[{"id":"10.13039\/501100007301","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["J Digit Imaging. Inform. med."],"DOI":"10.1007\/s10278-025-01422-9","type":"journal-article","created":{"date-parts":[[2025,1,31]],"date-time":"2025-01-31T11:32:04Z","timestamp":1738323124000},"page":"2646-2663","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":1,"title":["Denoising Multi-Level Cross-Attention and Contrastive Learning for Chest Radiology Report Generation"],"prefix":"10.1007","volume":"38","author":[{"ORCID":"https:\/\/orcid.org\/0009-0006-4183-9779","authenticated-orcid":false,"given":"Deng","family":"Zhu","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-4543-0111","authenticated-orcid":false,"given":"Lijun","family":"Liu","sequence":"additional","affiliation":[]},{"given":"Xiaobing","family":"Yang","sequence":"additional","affiliation":[]},{"given":"Li","family":"Liu","sequence":"additional","affiliation":[]},{"given":"Wei","family":"Peng","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2025,1,31]]},"reference":[{"key":"1422_CR1","doi-asserted-by":"crossref","unstructured":"Jing B, Xie P, Xing E: On the automatic generation of medical imaging reports. In Association for Computational Linguistics (ACL), Melbourne: Australia, 1:2577\u20132586, 2018","DOI":"10.18653\/v1\/P18-1240"},{"key":"1422_CR2","unstructured":"Li Y, Liang X, Hu Z, Xing E: Hybrid Retrieval-Generation Reinforced Agent for Medical Image Report Generation. In Neural information processing systems foundation, Montreal: Canada, 31, 2018"},{"key":"1422_CR3","doi-asserted-by":"crossref","unstructured":"Wang Z, Zhou L, Wang L, Li X: A self-boosting framework for automated radiographic report generation. In Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, Nashville: USA, 2433\u20132442, 2021","DOI":"10.1109\/CVPR46437.2021.00246"},{"issue":"6","key":"1422_CR4","doi-asserted-by":"publisher","first-page":"84","DOI":"10.1145\/3065386","volume":"60","author":"A Krizhevsky","year":"2017","unstructured":"Krizhevsky A, Sutskever I, Hinton GE: ImageNet classification with deep convolutional neural networks. Commun ACM, 60(6):84-90, 2017","journal-title":"Commun ACM"},{"key":"1422_CR5","unstructured":"Dosovitskiy A, Beyer L, Kolesnikov A, Weissenborn D, Zhai X, Unterthiner T, Dehghani M, Minderer M, Heigold G, Gelly S, Uszkoreit J, Houlsby N: AN IMAGE IS WORTH 16X16 WORDS: TRANSFORMERS FOR IMAGE RECOGNITION AT SCALE. In 9th International Conference on Learning Representations, ICLR 2021, May 3, 2021- May 7, 2021, Virtual: Online, 2021"},{"key":"1422_CR6","doi-asserted-by":"crossref","unstructured":"Liang X, Hu Z, Zhang H, Gan C, Xing E: Recurrent Topic-Transition GAN for Visual Paragraph Generation. In Proceedings of the IEEE international conference on computer vision, Venice: Italy, 3362\u20133371, 2017","DOI":"10.1109\/ICCV.2017.364"},{"key":"1422_CR7","doi-asserted-by":"crossref","unstructured":"Gajbhiye G, Nandedkar A, Faye I: Automatic report generation for chest X-Ray images: A multilevel multi-attention approach. In Computer Vision and Image Processing: 4th International Conference, CVIP 2019, Jaipur: India, 174\u2013182, 2020","DOI":"10.1007\/978-981-15-4015-8_15"},{"key":"1422_CR8","doi-asserted-by":"crossref","unstructured":"Zhang Z, Xie Y, Xing F, McGough M, Yang L: MDNet: A semantically and visually interpretable medical image diagnosis network. In Proceedings of the IEEE conference on computer vision and pattern recognition, Honolulu: United states, 6428\u20136436, 2017","DOI":"10.1109\/CVPR.2017.378"},{"key":"1422_CR9","doi-asserted-by":"crossref","unstructured":"Jing B, Wang Z, Xing E: Show, describe and conclude: On exploiting the structure information of chest X-ray reports. In 57th Annual Meeting of the Association for Computational Linguistics, ACL 2019, July 28, 2019 - August 2, 2019, Florence: ltaly, 6570\u20136580, 2019","DOI":"10.18653\/v1\/P19-1657"},{"key":"1422_CR10","doi-asserted-by":"crossref","unstructured":"Wang Z, Tang M, Wang L, Li X, Zhou L: A medical semantic-assisted transformer for radiographic report generation. In International Conference on Medical Image Computing and Computer-Assisted Intervention, Cham: Springer Nature Switzerland, 655\u2013664, 2022","DOI":"10.1007\/978-3-031-16437-8_63"},{"key":"1422_CR11","doi-asserted-by":"crossref","unstructured":"Gale W, Oakden-Rayner L, Carneiro G, Palmer L, Bradley A: Producing radiologist-quality reports for interpretable deep learning. In IEEE Computer Society, Venice: Italy, 1275\u20131279, 2019","DOI":"10.1109\/ISBI.2019.8759236"},{"key":"1422_CR12","doi-asserted-by":"crossref","unstructured":"Zhang Y, Wang X, Xu Z, Yu Q, Yuille A, Xu D: When radiology report generation meets knowledge graph. In Proceedings of the AAAI conference on artificial intelligence, New York: NY, 34(07):12910-12917, 2020","DOI":"10.1609\/aaai.v34i07.6989"},{"key":"1422_CR13","doi-asserted-by":"crossref","unstructured":"Nooralahzadeh F, Gonzalez N, Frauenfelder T, Fujimoto K, Krauthammer M: Progressive transformer-based generation of radiology reports. In 2021 Findings of the Association for Computational Linguistics, Findings of ACL: EMNLP 2021, November 7, 2021 - November 11, 2021, Punta Cana: Dominican republic, 2824-2832, 2021","DOI":"10.18653\/v1\/2021.findings-emnlp.241"},{"key":"1422_CR14","doi-asserted-by":"crossref","unstructured":"Aksoy N, Ravikumar N, Frangi A: Radiology report generation using transformers conditioned with non-imaging data. In Medical Imaging 2023: Imaging Informatics for Healthcare, San Diego: United states, 86:146-153, 2023","DOI":"10.1117\/12.2653672"},{"key":"1422_CR15","unstructured":"Wang ZH, Li ML, Xu RC, Zhou L, Lei J, Lin XD, Wang SH, Yang Z, Zhu CG, Hoiem D, Chang S-F, Bansal M, Ji H: Language models with image descriptors are strong few-shot video-language learners. In 36th Conference on Neural Information Processing Systems, NeurIPS 2022, November 28, 2022 - December 9, 2022,, New Orleans, LA: United states, 35:8483\u20138497, 2022"},{"key":"1422_CR16","doi-asserted-by":"crossref","unstructured":"Balntas V, Riba E, Ponsa D, Mikolajczyk K: Learning local feature descriptors with triplets and shallow convolutional neural networks. In BMVC, York: United kingdom, 119:1-9, 2016","DOI":"10.5244\/C.30.119"},{"key":"1422_CR17","doi-asserted-by":"publisher","first-page":"121260","DOI":"10.1016\/j.eswa.2023.121260","volume":"237","author":"Y Xue","year":"2024","unstructured":"Xue Y, Tan Y, Tan L, Qin J, Xiang X: Generating radiology reports via auxiliary signal guidance and a memory-driven network. Expert Syst Appl, 237:121260, 2024","journal-title":"Expert Syst Appl"},{"key":"1422_CR18","doi-asserted-by":"crossref","unstructured":"Li M, Lin B, Chen Z, Lin H, Liang X, Chang X: Dynamic graph enhanced contrastive learning for chest x-ray report generation. In Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, Vancouver: Canada, 3334\u20133343, 2023","DOI":"10.1109\/CVPR52729.2023.00325"},{"key":"1422_CR19","doi-asserted-by":"publisher","first-page":"100557","DOI":"10.1016\/j.imu.2021.100557","volume":"24","author":"O Alfarghaly","year":"2021","unstructured":"Alfarghaly O, Khaled R, Elkorany A, Helal M, Fahmy A: Automated radiology report generation using conditioned transformers. Informatics in Medicine Unlocked, 24:100557, 2021","journal-title":"Informatics in Medicine Unlocked"},{"key":"1422_CR20","unstructured":"Brown TB, Mann B, Ryder N, Subbiah M, Kaplan J, Dhariwal P, Neelakantan A, Shyam P, Sastry G, Askell A, Agarwal S, Herbert-Voss A, Krueger G, Henighan T, Child R, Ramesh A, Ziegler DM, Wu J, Winter C, Hesse C, Chen M, Sigler E, Litwin M, Gray S, Chess B, Clark J, Berner C, McCandlish S, Radford A, Sutskever I, Amodei D: Language models are few-shot learners. In 34th Conference on Neural Information Processing Systems, NeurIPS 2020, December 6, 2020 - December 12, 2020, Virtual: Online, 1877\u20131901, 2020"},{"key":"1422_CR21","doi-asserted-by":"crossref","unstructured":"Vinyals O, Toshev A, Bengio S, Erhan D: Show and tell: A neural image caption generator. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), Boston: United states, 3156\u20133164, 2015","DOI":"10.1109\/CVPR.2015.7298935"},{"key":"1422_CR22","doi-asserted-by":"crossref","unstructured":"Chen Z, Song Y, Chang T, Wan X: Generating radiology reports via memory-driven transformer. In 2020 Conference on Empirical Methods in Natural Language Processing, EMNLP 2020, November 16, 2020 - November 20, 2020, Virtual: Online, 1439\u20131449, 2020","DOI":"10.18653\/v1\/2020.emnlp-main.112"},{"key":"1422_CR23","doi-asserted-by":"publisher","first-page":"102798","DOI":"10.1016\/j.media.2023.102798","volume":"86","author":"S Yang","year":"2023","unstructured":"Yang S, Wu X, Ge S, Zheng Z, Zhou SK, Xiao L: Radiology report generation with a learned knowledge base and multi-modal alignment. Med Image Anal, 86:102798, 2023","journal-title":"Med Image Anal"},{"key":"1422_CR24","doi-asserted-by":"publisher","first-page":"104974","DOI":"10.1016\/j.imavis.2024.104974","volume":"144","author":"C Bai","year":"2024","unstructured":"Bai C, Han X: MRFormer: Multiscale retractable transformer for medical image progressive denoising via noise level estimation. Image Vision Comput, 144:104974, 2024","journal-title":"Image Vision Comput"},{"key":"1422_CR25","doi-asserted-by":"crossref","unstructured":"Hu J, Shen L, Albanie S, Sun G, Wu E: Squeeze-and-excitation networks. In Proceedings of the IEEE conference on computer vision and pattern recognition, Honolulu: USA, 42:2011-2023, 2018","DOI":"10.1109\/TPAMI.2019.2913372"},{"issue":"1","key":"1422_CR26","doi-asserted-by":"publisher","first-page":"317","DOI":"10.1038\/s41597-019-0322-0","volume":"6","author":"AEW Johnson","year":"2019","unstructured":"Johnson AEW, Pollard TJ, Berkowitz SJ, Greenbaum NR, Lungren MP, Deng CY, Mark RG, Horng S: MIMIC-CXR, a de-identified publicly available database of chest radiographs with free-text reports. Sci Data, 6(1):317, 2019","journal-title":"Sci Data"},{"issue":"2","key":"1422_CR27","doi-asserted-by":"publisher","first-page":"304","DOI":"10.1093\/jamia\/ocv080","volume":"23","author":"D Demner-Fushman","year":"2016","unstructured":"Demner-Fushman D, Kohli MD, Rosenman MB, Shooshan SE, Rodriguez L, Antani S, Thoma GR, McDonald CJ: Preparing a collection of radiology examinations for distribution and retrieval. J Am Med Inform Assoc, 23(2):304-10, 2016","journal-title":"J Am Med Inform Assoc"},{"key":"1422_CR28","doi-asserted-by":"crossref","unstructured":"Liu F, Wu X, Ge S, Fan W, Zou Y: Exploring and distilling posterior and prior knowledge for radiology report generation. In Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition, Nashville: USA, 13753\u201313762, 2021","DOI":"10.1109\/CVPR46437.2021.01354"},{"key":"1422_CR29","doi-asserted-by":"publisher","first-page":"904","DOI":"10.1109\/TMM.2023.3273390","volume":"26","author":"K Zhang","year":"2024","unstructured":"Zhang K, Jiang H, Zhang J, Huang Q, Fan J, Yu J, Han W: Semi-Supervised Medical Report Generation via Graph-Guided Hybrid Feature Consistency. IEEE T Multimedia, 26:904-915, 2024","journal-title":"IEEE T Multimedia"},{"key":"1422_CR30","doi-asserted-by":"crossref","unstructured":"Cao Y, Cui L, Yu F, Zhang L, Li Z, Liu N, Xu Y: Kdtnet: medical image report generation via knowledge-driven transformer. In International Conference on Database Systems for Advanced Applications, Gifu: Japan, 117\u2013132, 2022","DOI":"10.1007\/978-3-031-00129-1_8"},{"key":"1422_CR31","doi-asserted-by":"publisher","first-page":"101817","DOI":"10.1016\/j.inffus.2023.101817","volume":"97","author":"D Xu","year":"2023","unstructured":"Xu D, Zhu H, Huang Y, Jin Z, Ding W, Li H, Ran M: Vision-knowledge fusion model for multi-domain medical report generation. Inform Fusion, 97:101817, 2023","journal-title":"Inform Fusion"},{"issue":"10","key":"1422_CR32","doi-asserted-by":"publisher","first-page":"2803","DOI":"10.1109\/TMI.2022.3171661","volume":"41","author":"Z Wang","year":"2022","unstructured":"Wang Z, Han H, Wang L, Li X, Zhou L: Automated Radiographic Report Generation Purely on Transformer: A Multicriteria Supervised Approach. IEEE T Med Imaging, 41(10):2803-2813, 2022","journal-title":"IEEE T Med Imaging"},{"key":"1422_CR33","doi-asserted-by":"crossref","unstructured":"Cao Y, Cui L, Zhang L, Yu F, Li Z, Xu Y: MMTN: multi-modal memory transformer network for image-report consistent medical report generation. In Proceedings of the AAAI Conference on Artificial Intelligence, Washington: United states, 37:277-285, 2023","DOI":"10.1609\/aaai.v37i1.25100"},{"key":"1422_CR34","doi-asserted-by":"crossref","unstructured":"You D, Liu F, Ge S, Xie X, Zhang J, Wu X: Aligntransformer: Hierarchical alignment of visual regions and disease tags for medical report generation. In Medical Image Computing and Computer Assisted Intervention\u2013MICCAI 2021: 24th International Conference, Strasbourg: France, 72\u201382, 2021","DOI":"10.1007\/978-3-030-87199-4_7"},{"issue":"11","key":"1422_CR35","doi-asserted-by":"publisher","first-page":"5631","DOI":"10.1109\/JBHI.2022.3197162","volume":"26","author":"B Yan","year":"2022","unstructured":"Yan B, Pei M, Zhao M, Shan C, Tian Z: Prior Guided Transformer for Accurate Radiology Reports Generation. IEEE J Biomed Health Inform, 26(11):5631-5640, 2022","journal-title":"IEEE J Biomed Health Inform"}],"container-title":["Journal of Imaging Informatics in Medicine"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s10278-025-01422-9.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s10278-025-01422-9\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s10278-025-01422-9.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,10,29]],"date-time":"2025-10-29T22:48:26Z","timestamp":1761778106000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s10278-025-01422-9"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,1,31]]},"references-count":35,"journal-issue":{"issue":"5","published-online":{"date-parts":[[2025,10]]}},"alternative-id":["1422"],"URL":"https:\/\/doi.org\/10.1007\/s10278-025-01422-9","relation":{},"ISSN":["2948-2933"],"issn-type":[{"type":"electronic","value":"2948-2933"}],"subject":[],"published":{"date-parts":[[2025,1,31]]},"assertion":[{"value":"18 September 2024","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"29 December 2024","order":2,"name":"revised","label":"Revised","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"20 January 2025","order":3,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"31 January 2025","order":4,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"We proposed an innovative deep-learning method to generate corresponding medical reports based on medical chest images. The medical chest images and chest medical reports in this study are from the publicly accessible MIMIC-CXR and IU-Xray datasets. We have obtained the necessary permissions to use these data through a license agreement.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Ethics Approval and Consent to Participate"}},{"value":"The authors declare no competing interests.","order":3,"name":"Ethics","group":{"name":"EthicsHeading","label":"Competing Interests"}}]}}