{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,7,12]],"date-time":"2025-07-12T06:10:01Z","timestamp":1752300601244,"version":"3.41.2"},"reference-count":19,"publisher":"IEEE","license":[{"start":{"date-parts":[[2024,11,5]],"date-time":"2024-11-05T00:00:00Z","timestamp":1730764800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2024,11,5]],"date-time":"2024-11-05T00:00:00Z","timestamp":1730764800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2024,11,5]]},"DOI":"10.1109\/kse63888.2024.11063515","type":"proceedings-article","created":{"date-parts":[[2025,7,11]],"date-time":"2025-07-11T17:41:44Z","timestamp":1752255704000},"page":"198-204","source":"Crossref","is-referenced-by-count":0,"title":["Investigating Recent Large Language Models for Vietnamese Machine Reading Comprehension"],"prefix":"10.1109","author":[{"given":"Anh Duc","family":"Nguyen","sequence":"first","affiliation":[{"name":"University of Engineering and Technology - Vietnam National University"}]},{"given":"Hieu Minh","family":"Phi","sequence":"additional","affiliation":[{"name":"University of Engineering and Technology - Vietnam National University"}]},{"given":"Anh Viet","family":"Ngo","sequence":"additional","affiliation":[{"name":"University of Engineering and Technology - Vietnam National University"}]},{"given":"Long Hai","family":"Trieu","sequence":"additional","affiliation":[{"name":"University of Engineering and Technology - Vietnam National University"}]},{"given":"Thai Phuong","family":"Nguyen","sequence":"additional","affiliation":[{"name":"University of Engineering and Technology - Vietnam National University"}]}],"member":"263","reference":[{"key":"ref1","first-page":"8784","article-title":"English machine reading comprehension datasets: A survey","volume-title":"Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing","author":"Dzendzik"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2021.3138683"},{"key":"ref3","article-title":"A survey of large language models","volume":"abs\/2303.18223","author":"Zhao","year":"2023","journal-title":"ArXiv"},{"key":"ref4","article-title":"A comprehensive survey on pretrained foundation models: A history from bert to chatgpt","volume":"abs\/2302.09419","author":"Zhou","year":"2023","journal-title":"ArXiv"},{"key":"ref5","article-title":"On the opportunities and risks of foundation models","author":"Bommasani","year":"2021","journal-title":"ArXiv"},{"key":"ref6","article-title":"Do generative large language models need billions of parameters?","volume":"abs\/2309.06589","author":"Gholami","year":"2023","journal-title":"ArXiv"},{"key":"ref7","article-title":"A pilot study on multiple choice machine reading comprehension for vietnamese texts","volume":"abs\/2001.05687","author":"Nguyen","year":"2020","journal-title":"ArXiv"},{"volume-title":"Llama 3 model card","year":"2024","key":"ref8"},{"key":"ref9","article-title":"Gemma: Open models based on gemini research and technology","author":"Team","year":"2024","journal-title":"arXiv preprint arXiv"},{"volume-title":"Llama 2: Open foundation and fine-tuned chat models","year":"2023","author":"Touvron","key":"ref10"},{"volume-title":"Qlora: Efficient finetuning of quantized llms","year":"2023","author":"Dettmers","key":"ref11"},{"volume-title":"Unsloth AI","year":"2024","author":"Han","key":"ref12"},{"volume-title":"A multiple choices reading comprehension corpus for vietnamese language education","year":"2023","author":"Luu","key":"ref13"},{"key":"ref14","article-title":"Improving sequence tagging for vietnamese text using transformer-based neural models","volume-title":"Pacific Asia Conference on Language, Information and Computation","author":"The","year":"2020"},{"key":"ref15","article-title":"Mmm: Multi-stage multi-task learning for multi-choice reading comprehension","volume-title":"AAAI Conference on Artificial Intelligence","author":"Jin","year":"2019"},{"volume-title":"Language models are few-shot learners","year":"2020","author":"Brown","key":"ref16"},{"key":"ref17","doi-asserted-by":"crossref","DOI":"10.18653\/v1\/2023.acl-long.891","volume-title":"Crosslingual generalization through multitask finetuning","author":"Muennighoff","year":"2023"},{"key":"ref18","doi-asserted-by":"crossref","DOI":"10.1145\/3628797.3628837","volume-title":"Evaluating the symbol binding ability of large language models for multiple-choice questions in vietnamese general education","author":"Nguyen","year":"2023"},{"volume-title":"Chain-of-thought prompting elicits reasoning in large language models","year":"2022","author":"Wei","key":"ref19"}],"event":{"name":"2024 16th International Conference on Knowledge and System Engineering (KSE)","start":{"date-parts":[[2024,11,5]]},"location":"Kuala Lumpur, Malaysia","end":{"date-parts":[[2024,11,7]]}},"container-title":["2024 16th International Conference on Knowledge and System Engineering (KSE)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/11063473\/11063476\/11063515.pdf?arnumber=11063515","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,7,12]],"date-time":"2025-07-12T05:36:10Z","timestamp":1752298570000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11063515\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,11,5]]},"references-count":19,"URL":"https:\/\/doi.org\/10.1109\/kse63888.2024.11063515","relation":{},"subject":[],"published":{"date-parts":[[2024,11,5]]}}}