{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,8]],"date-time":"2026-03-08T01:59:43Z","timestamp":1772935183847,"version":"3.50.1"},"reference-count":22,"publisher":"IEEE","license":[{"start":{"date-parts":[[2025,12,8]],"date-time":"2025-12-08T00:00:00Z","timestamp":1765152000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,12,8]],"date-time":"2025-12-08T00:00:00Z","timestamp":1765152000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025,12,8]]},"DOI":"10.1109\/bigdata66926.2025.11401062","type":"proceedings-article","created":{"date-parts":[[2026,3,6]],"date-time":"2026-03-06T20:57:57Z","timestamp":1772830677000},"page":"672-678","source":"Crossref","is-referenced-by-count":0,"title":["A Complete Evaluation Method for Factual Knowledge Base on AMR"],"prefix":"10.1109","author":[{"given":"Peng","family":"Hu","sequence":"first","affiliation":[{"name":"University of Tianjin Normal,Tianjing,China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-9472-9884","authenticated-orcid":false,"given":"ZhengTao","family":"Li","sequence":"additional","affiliation":[{"name":"University of Tianjin Normal,Tianjing,China"}]},{"given":"LuLu","family":"Li","sequence":"additional","affiliation":[{"name":"University of Tianjin Normal,Tianjing,China"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1145\/3649506"},{"key":"ref2","volume-title":"Siren\u2019s Song in the AI Ocean: A Survey on Hallucination in Large Language Models","author":"Zhang","year":"2023"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.acl-long.546"},{"key":"ref4","volume-title":"COPEN: Probing Conceptual Knowledge in Pre-trained Language Models","author":"Peng","year":"2022"},{"key":"ref5","doi-asserted-by":"crossref","DOI":"10.18653\/v1\/2024.eacl-long.4","volume-title":"Generating Benchmarks for Factuality Evaluation of Language Models","author":"Muhlgay","year":"2024"},{"key":"ref6","volume-title":"Large Linguistic Models: Investigating LLMs\u2019 metalinguistic abilities","author":"Begu\u0161","year":"2025"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1145\/3641289"},{"key":"ref8","first-page":"178","article-title":"Abstract Meaning Representation for Sembanking","volume-title":"Proceedings of the 7th Linguistic Annotation Workshop and Interoperability with Discourse","author":"Banarescu","year":"2013"},{"key":"ref9","volume-title":"FActScore: Fine-grained Atomic Evaluation of Factual Precision in Long Form Text Generation","author":"Min","year":"2023"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/d18-1424"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1016\/j.cogsys.2023.101188"},{"key":"ref12","doi-asserted-by":"crossref","DOI":"10.18653\/v1\/2022.findings-acl.168","volume-title":"Question Generation for Reading Comprehension Assessment by Modeling How and What to Ask","author":"Ghanem","year":"2022"},{"key":"ref13","volume-title":"Can LLMs Ask Good Questions?","author":"Zhang","year":"2025"},{"key":"ref14","volume-title":"The Llama 3 Herd of Models","author":"Grattafiori","year":"2024"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.acl-main.703"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v35i14.17489"},{"key":"ref17","volume-title":"GPT-4 Technical Report","author":"Achiam","year":"2024"},{"key":"ref18","volume-title":"GLM: General Language Model Pretraining with Autoregressive Blank Infilling","author":"Du","year":"2022"},{"key":"ref19","volume-title":"Yi: Open Foundation Models by 01.AI","author":"Young","year":"2025"},{"key":"ref20","volume-title":"DeepSeek LLM: Scaling Open-Source Language Models with Longtermism","author":"Bi","year":"2024"},{"key":"ref21","volume-title":"Llama 2: Open Foundation and Fine-Tuned Chat Models","author":"Touvron","year":"2023"},{"key":"ref22","volume-title":"DeepSeek-R1: Incentivizing Reasoning Capability in LLMs via Reinforcement Learning","author":"Guo","year":"2025"}],"event":{"name":"2025 IEEE International Conference on Big Data (BigData)","location":"Macau, China","start":{"date-parts":[[2025,12,8]]},"end":{"date-parts":[[2025,12,11]]}},"container-title":["2025 IEEE International Conference on Big Data (BigData)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/11400704\/11400712\/11401062.pdf?arnumber=11401062","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,3,7]],"date-time":"2026-03-07T07:24:44Z","timestamp":1772868284000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11401062\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,12,8]]},"references-count":22,"URL":"https:\/\/doi.org\/10.1109\/bigdata66926.2025.11401062","relation":{},"subject":[],"published":{"date-parts":[[2025,12,8]]}}}