{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,11]],"date-time":"2026-02-11T21:36:21Z","timestamp":1770845781873,"version":"3.50.1"},"reference-count":21,"publisher":"IEEE","license":[{"start":{"date-parts":[[2025,10,5]],"date-time":"2025-10-05T00:00:00Z","timestamp":1759622400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,10,5]],"date-time":"2025-10-05T00:00:00Z","timestamp":1759622400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025,10,5]]},"DOI":"10.1109\/smc58881.2025.11342600","type":"proceedings-article","created":{"date-parts":[[2026,1,28]],"date-time":"2026-01-28T20:54:44Z","timestamp":1769633684000},"page":"2059-2065","source":"Crossref","is-referenced-by-count":0,"title":["KEREM: Enhancing Reliability and Transparency in Medical QA through LLM and Knowledge Graph Fusion"],"prefix":"10.1109","author":[{"given":"Shaojie","family":"Dong","sequence":"first","affiliation":[{"name":"Qilu University of Technology (Shandong Academy of Sciences),Faculty of Computer Science and Technology,Jinan,China"}]},{"given":"Zhe","family":"Zhu","sequence":"additional","affiliation":[{"name":"Qilu University of Technology (Shandong Academy of Sciences),Shandong Artifcial Intelligence Institute,Jinan,China"}]},{"given":"Pengyao","family":"Xu","sequence":"additional","affiliation":[{"name":"Qilu University of Technology (Shandong Academy of Sciences),Shandong Artifcial Intelligence Institute,Jinan,China"}]},{"given":"Ke","family":"Shan","sequence":"additional","affiliation":[{"name":"Qilu University of Technology (Shandong Academy of Sciences),Shandong Artifcial Intelligence Institute,Jinan,China"}]},{"given":"Shuwang","family":"Zhou","sequence":"additional","affiliation":[{"name":"Qilu University of Technology (Shandong Academy of Sciences),Shandong Artifcial Intelligence Institute,Jinan,China"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1186\/s13054-023-04393-x"},{"key":"ref2","article-title":"A comprehensive survey of hallucination mitigation techniques in large language models","volume":"6","author":"Tonmoy","year":"2024"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.findings-acl.354"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.emnlp-main.99"},{"key":"ref5","article-title":"ROSA: Accurate parameter-efficient fine-tuning via robust adaptation","author":"Nikdan","year":"2024"},{"key":"ref6","first-page":"22199","article-title":"Large language models are zero-shot reasoners","volume":"35","author":"Kojima","year":"2022","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1007\/s41019-025-00335-5"},{"key":"ref8","first-page":"24824","article-title":"Chain-of-thought prompting elicits reasoning in large language models","volume":"35","author":"Wei","year":"2022","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2025.emnlp-main.567"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.ijcnlp-demo.4"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.naacl-main.45"},{"key":"ref12","article-title":"Efficient long-decoding inference with reasoning-aware attention sparsity","author":"Hu","year":"2025"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/D19-1282"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2024.emnlp-main.432"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.18653\/vl\/N19-142"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v38i17.29919"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.emnlp-main.519"},{"key":"ref18","article-title":"Relational graph attention networks","author":"Busbridge","year":"2019"},{"key":"ref19","article-title":"Infinite-LLM: Efficient LLM service for long context with distattention and distributed KVCache","author":"Lin","year":"2024"},{"key":"ref20","article-title":"Llama: Open and efficient foundation language models","author":"Touvron","year":"2023"},{"key":"ref21","article-title":"Injecting new knowledge into large language models via supervised fine-tuning","author":"Mecklenburg","year":"2024"}],"event":{"name":"2025 IEEE International Conference on Systems, Man, and Cybernetics (SMC)","location":"Vienna, Austria","start":{"date-parts":[[2025,10,5]]},"end":{"date-parts":[[2025,10,8]]}},"container-title":["2025 IEEE International Conference on Systems, Man, and Cybernetics (SMC)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/11342430\/11342431\/11342600.pdf?arnumber=11342600","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,2,11]],"date-time":"2026-02-11T20:51:39Z","timestamp":1770843099000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11342600\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,10,5]]},"references-count":21,"URL":"https:\/\/doi.org\/10.1109\/smc58881.2025.11342600","relation":{},"subject":[],"published":{"date-parts":[[2025,10,5]]}}}