{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,22]],"date-time":"2026-02-22T07:00:53Z","timestamp":1771743653929,"version":"3.50.1"},"publisher-location":"Cham","reference-count":47,"publisher":"Springer Nature Switzerland","isbn-type":[{"value":"9783032049704","type":"print"},{"value":"9783032049711","type":"electronic"}],"license":[{"start":{"date-parts":[[2025,9,20]],"date-time":"2025-09-20T00:00:00Z","timestamp":1758326400000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2025,9,20]],"date-time":"2025-09-20T00:00:00Z","timestamp":1758326400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2026]]},"DOI":"10.1007\/978-3-032-04971-1_52","type":"book-chapter","created":{"date-parts":[[2025,9,19]],"date-time":"2025-09-19T17:09:43Z","timestamp":1758301783000},"page":"552-562","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["RRG-DPO: Direct Preference Optimization for\u00a0Clinically Accurate Radiology Report Generation"],"prefix":"10.1007","author":[{"given":"Hong","family":"Liu","sequence":"first","affiliation":[]},{"given":"Dong","family":"Wei","sequence":"additional","affiliation":[]},{"given":"Zhe","family":"Xu","sequence":"additional","affiliation":[]},{"given":"Xian","family":"Wu","sequence":"additional","affiliation":[]},{"given":"Yefeng","family":"Zheng","sequence":"additional","affiliation":[]},{"given":"Liansheng","family":"Wang","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2025,9,20]]},"reference":[{"key":"52_CR1","unstructured":"Bai, Y., et\u00a0al.: Training a helpful and harmless assistant with reinforcement learning from human feedback. arXiv preprint arXiv:2204.05862 (2022)"},{"key":"52_CR2","unstructured":"Banerjee, O., Zhou, H.Y., Wu, K., Adithan, S., Kwak, S., Rajpurkar, P.: Direct preference optimization for suppressing hallucinated prior exams in radiology report generation. In: Machine Learning for Healthcare Conference. PMLR (2024)"},{"key":"52_CR3","unstructured":"Banerjee, S., Lavie, A.: METEOR: an automatic metric for MT evaluation with improved correlation with human judgments. In: Proceedings of the ACL Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and\/or Summarization, pp. 65\u201372 (2005)"},{"key":"52_CR4","unstructured":"Bannur, S., et\u00a0al.: MAIRA-2: grounded radiology report generation. arXiv preprint arXiv:2406.04449 (2024)"},{"key":"52_CR5","doi-asserted-by":"crossref","unstructured":"Boecking, B., et\u00a0al.: Making the most of text semantics to improve biomedical vision\u2013language processing. In: ECCV, pp. 1\u201321. Springer (2022)","DOI":"10.1007\/978-3-031-20059-5_1"},{"issue":"1","key":"52_CR6","first-page":"3","volume":"81","author":"A Brady","year":"2012","unstructured":"Brady, A., et al.: Discrepancy and error in radiology: concepts, causes and consequences. Ulst. Med. J. 81(1), 3 (2012)","journal-title":"Ulst. Med. J."},{"key":"52_CR7","doi-asserted-by":"crossref","unstructured":"Bu, S., et\u00a0al.: Instance-level expert knowledge and aggregate discriminative attention for radiology report generation. In: CVPR, pp. 14194\u201314204 (2024)","DOI":"10.1109\/CVPR52733.2024.01346"},{"key":"52_CR8","unstructured":"Chaves, J.M.Z., et\u00a0al.: Towards a clinically accessible radiology foundation model: open-access and lightweight, with automated evaluation. arXiv preprint arXiv:2403.08002 (2024)"},{"key":"52_CR9","unstructured":"Chen, H., et\u00a0al.: 3D-CT-GPT: generating 3D radiology reports through integration of large vision-language models. arXiv preprint arXiv:2409.19330 (2024)"},{"key":"52_CR10","doi-asserted-by":"crossref","unstructured":"Chen, Z., et\u00a0al.: Cross-modal memory networks for radiology report generation. In: ACL-IJCNLP (Volume 1: Long Papers), pp. 5904\u20135914 (2021)","DOI":"10.18653\/v1\/2021.acl-long.459"},{"key":"52_CR11","doi-asserted-by":"crossref","unstructured":"Chen, Z., Song, Y., Chang, T.H., Wan, X.: Generating radiology reports via memory-driven transformer. In: EMNLP, pp. 1439\u20131449 (2020)","DOI":"10.18653\/v1\/2020.emnlp-main.112"},{"key":"52_CR12","doi-asserted-by":"crossref","unstructured":"Donahue, J., et\u00a0al.: Long-term recurrent convolutional networks for visual recognition and description. In: CVPR, pp. 2625\u20132634 (2015)","DOI":"10.1109\/CVPR.2015.7298878"},{"key":"52_CR13","unstructured":"Ethem\u00a0Hamamci, I., et\u00a0al.: A foundation model utilizing chest CT volumes and radiology reports for supervised-level zero-shot detection of abnormalities. arXiv e-prints, pp. arXiv\u20132403 (2024)"},{"key":"52_CR14","doi-asserted-by":"crossref","unstructured":"Hamamci, I.E., Er, S., Menze, B.: CT2Rep: automated radiology report generation for 3D medical imaging. In: MICCAI, pp. 476\u2013486. Springer (2024)","DOI":"10.1007\/978-3-031-72390-2_45"},{"key":"52_CR15","unstructured":"Hein, D., et\u00a0al.: Preference fine-tuning for factuality in chest X-ray interpretation models without human feedback. arXiv preprint arXiv:2410.07025 (2024)"},{"key":"52_CR16","doi-asserted-by":"crossref","unstructured":"Huang, Z., Zhang, X., Zhang, S.: KiUT: knowledge-injected U-Transformer for radiology report generation. In: CVPR, pp. 19809\u201319818 (2023)","DOI":"10.1109\/CVPR52729.2023.01897"},{"key":"52_CR17","unstructured":"Jain, S., et\u00a0al.: RadGraph: extracting clinical entities and relations from radiology reports. arXiv preprint arXiv:2106.14463 (2021)"},{"key":"52_CR18","doi-asserted-by":"crossref","unstructured":"Jin, H., Che, H., Lin, Y., Chen, H.: PromptMRG: diagnosis-driven prompts for medical report generation. In: AAAI, vol.\u00a038, pp. 2607\u20132615 (2024)","DOI":"10.1609\/aaai.v38i3.28038"},{"issue":"1","key":"52_CR19","doi-asserted-by":"publisher","first-page":"317","DOI":"10.1038\/s41597-019-0322-0","volume":"6","author":"AE Johnson","year":"2019","unstructured":"Johnson, A.E., et al.: MIMIC-CXR, a de-identified publicly available database of chest radiographs with free-text reports. Sci. Data 6(1), 317 (2019)","journal-title":"Sci. Data"},{"key":"52_CR20","doi-asserted-by":"crossref","unstructured":"Kale, K., et\u00a0al.: \u201cKnowledge is power\u201d: constructing knowledge graph of abdominal organs and using them for automatic radiology report generation. In: ACL, pp. 11\u201324 (2023)","DOI":"10.18653\/v1\/2023.acl-industry.2"},{"key":"52_CR21","doi-asserted-by":"crossref","unstructured":"Lee, S., Youn, J., Kim, H., et\u00a0al.: CXR-LLAVA: a multimodal large language model for interpreting chest X-ray images. Eur. Radiol. 1\u201313 (2025)","DOI":"10.1007\/s00330-024-11339-6"},{"key":"52_CR22","unstructured":"Lin, C.Y.: ROUGE: a package for automatic evaluation of summaries. In: Text Summarization Branches Out, pp. 74\u201381 (2004)"},{"key":"52_CR23","doi-asserted-by":"crossref","unstructured":"Liu, C., Tian, Y., Chen, W., Song, Y., Zhang, Y.: Bootstrapping large language models for radiology report generation. In: AAAI, vol.\u00a038, pp. 18635\u201318643 (2024)","DOI":"10.1609\/aaai.v38i17.29826"},{"key":"52_CR24","unstructured":"Loshchilov, I., Hutter, F.: Decoupled weight decay regularization. In: ICLR (2019)"},{"key":"52_CR25","doi-asserted-by":"crossref","unstructured":"Lu, J., Xiong, C., Parikh, D., Socher, R.: Knowing when to look: adaptive attention via a visual sentinel for image captioning. In: CVPR, pp. 375\u2013383 (2017)","DOI":"10.1109\/CVPR.2017.345"},{"key":"52_CR26","doi-asserted-by":"publisher","first-page":"101585","DOI":"10.1016\/j.imu.2024.101585","volume":"50","author":"A Nicolson","year":"2024","unstructured":"Nicolson, A., Dowling, J., Anderson, D., Koopman, B.: Longitudinal data and a semantic similarity reward for chest X-ray report generation. Inform. Med. Unlock. 50, 101585 (2024)","journal-title":"Inform. Med. Unlock."},{"key":"52_CR27","doi-asserted-by":"publisher","first-page":"102633","DOI":"10.1016\/j.artmed.2023.102633","volume":"144","author":"A Nicolson","year":"2023","unstructured":"Nicolson, A., Dowling, J., Koopman, B.: Improving chest X-ray report generation by leveraging warm starting. Artif. Intell. Med. 144, 102633 (2023)","journal-title":"Artif. Intell. Med."},{"key":"52_CR28","doi-asserted-by":"crossref","unstructured":"Ouali, Y., Bulat, A., Martinez, B., Tzimiropoulos, G.: CLIP-DPO: vision-language models as a source of preference for fixing hallucinations in LVLMs. In: ECCV, pp. 395\u2013413. Springer (2025)","DOI":"10.1007\/978-3-031-73116-7_23"},{"key":"52_CR29","unstructured":"Pal, A., et\u00a0al.: Smaug: fixing failure modes of preference optimisation with DPO-positive. arXiv preprint arXiv:2402.13228 (2024)"},{"key":"52_CR30","doi-asserted-by":"crossref","unstructured":"Papineni, K., Roukos, S., Ward, T., Zhu, W.J.: BLEU: a method for automatic evaluation of machine translation. In: ACL, pp. 311\u2013318 (2002)","DOI":"10.3115\/1073083.1073135"},{"key":"52_CR31","unstructured":"Paszke, A., et\u00a0al.: Pytorch: an imperative style, high-performance deep learning library. In: NeurIPS, pp. 8024\u20138035 (2019)"},{"key":"52_CR32","unstructured":"Radford, A., et\u00a0al.: Learning transferable visual models from natural language supervision. In: ICML, pp. 8748\u20138763. PMLR (2021)"},{"key":"52_CR33","unstructured":"Rafailov, R., et\u00a0al.: Direct preference optimization: your language model is secretly a reward model. In: NeurIPS, vol. 36 (2024)"},{"key":"52_CR34","doi-asserted-by":"crossref","unstructured":"Shen, H., Pei, M., Liu, J., Tian, Z.: Automatic radiology reports generation via memory alignment network. In: AAAI, vol.\u00a038, pp. 4776\u20134783 (2024)","DOI":"10.1609\/aaai.v38i5.28279"},{"key":"52_CR35","doi-asserted-by":"crossref","unstructured":"Smit, A., et\u00a0al.: Combining automatic labelers and expert annotations for accurate radiology report labeling using BERT. In: EMNLP, pp. 1500\u20131519 (2020)","DOI":"10.18653\/v1\/2020.emnlp-main.117"},{"key":"52_CR36","unstructured":"Song, X., Zhang, X., Ji, J., Liu, Y., Wei, P.: Cross-modal contrastive attention model for medical report generation. In: COLING, pp. 2388\u20132397 (2022)"},{"key":"52_CR37","doi-asserted-by":"crossref","unstructured":"Sun, G., et\u00a0al.: STLLaVA-med: self-training large language and vision assistant for medical question-answering. arXiv preprint arXiv:2406.19973 (2024)","DOI":"10.18653\/v1\/2024.emnlp-main.1119"},{"key":"52_CR38","doi-asserted-by":"crossref","unstructured":"Vinyals, O., Toshev, A., Bengio, S., Erhan, D.: Show and tell: a neural image caption generator. In: CVPR, pp. 3156\u20133164 (2015)","DOI":"10.1109\/CVPR.2015.7298935"},{"key":"52_CR39","doi-asserted-by":"crossref","unstructured":"Wang, J., Bhalerao, A., He, Y.: Cross-modal prototype driven network for radiology report generation. In: ECCV, pp. 563\u2013579. Springer (2022)","DOI":"10.1007\/978-3-031-19833-5_33"},{"issue":"3","key":"52_CR40","doi-asserted-by":"publisher","first-page":"100033","DOI":"10.1016\/j.metrad.2023.100033","volume":"1","author":"Z Wang","year":"2023","unstructured":"Wang, Z., Liu, L., Wang, L., Zhou, L.: R2GenGPT: radiology report generation with frozen LLMs. Meta-Radiol. 1(3), 100033 (2023)","journal-title":"Meta-Radiol."},{"key":"52_CR41","unstructured":"Xiao, T., Shi, L., Liu, P., Wang, Z., Bai, C.: Radiology report generation via multi-objective preference optimization. arXiv preprint arXiv:2412.08901 (2024)"},{"key":"52_CR42","first-page":"102798","volume":"86","author":"S Yang","year":"2023","unstructured":"Yang, S., Wu, X., Ge, S., et al.: Radiology report generation with a learned knowledge base and multi-modal alignment. Media 86, 102798 (2023)","journal-title":"Media"},{"key":"52_CR43","first-page":"102510","volume":"80","author":"S Yang","year":"2022","unstructured":"Yang, S., Wu, X., Ge, S., Zhou, S.K., Xiao, L.: Knowledge matters: chest radiology report generation with general and specific knowledge. Media 80, 102510 (2022)","journal-title":"Media"},{"key":"52_CR44","doi-asserted-by":"crossref","unstructured":"You, D., Liu, F., Ge, S., Xie, X., Zhang, J., Wu, X.: AlignTransformer: hierarchical alignment of visual regions and disease tags for medical report generation. In: MICCAI, pp. 72\u201382. Springer (2021)","DOI":"10.1007\/978-3-030-87199-4_7"},{"issue":"9","key":"52_CR45","doi-asserted-by":"publisher","first-page":"100802","DOI":"10.1016\/j.patter.2023.100802","volume":"4","author":"F Yu","year":"2023","unstructured":"Yu, F., et al.: Evaluating progress in automatic chest X-ray radiology report generation. Patterns 4(9), 100802 (2023)","journal-title":"Patterns"},{"key":"52_CR46","doi-asserted-by":"crossref","unstructured":"Zhao, W., Wu, C., Zhang, X., Zhang, Y., Wang, Y., Xie, W.: RaTEScore: a metric for radiology report generation. arXiv preprint arXiv:2406.16845 (2024)","DOI":"10.1101\/2024.06.24.24309405"},{"key":"52_CR47","unstructured":"Zhu, K., Xia, P., Li, Y., Zhu, H., Wang, S., Yao, H.: MMedPO: aligning medical vision-language models with clinical-aware multimodal preference optimization. arXiv preprint arXiv:2412.06141 (2024)"}],"container-title":["Lecture Notes in Computer Science","Medical Image Computing and Computer Assisted Intervention \u2013 MICCAI 2025"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-032-04971-1_52","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,2,22]],"date-time":"2026-02-22T06:45:30Z","timestamp":1771742730000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-032-04971-1_52"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,9,20]]},"ISBN":["9783032049704","9783032049711"],"references-count":47,"URL":"https:\/\/doi.org\/10.1007\/978-3-032-04971-1_52","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025,9,20]]},"assertion":[{"value":"20 September 2025","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"The authors have no competing interests to declare that are relevant to the content of this article.","order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Disclosure of Interests"}},{"value":"MICCAI","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Conference on Medical Image Computing and Computer-Assisted Intervention","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Daejeon","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Korea (Republic of)","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2025","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"23 September 2025","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"27 September 2025","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"28","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"miccai2025","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/conferences.miccai.org\/2025\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}