{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,3]],"date-time":"2026-01-03T15:20:18Z","timestamp":1767453618608},"reference-count":27,"publisher":"IEEE","license":[{"start":{"date-parts":[[2024,5,8]],"date-time":"2024-05-08T00:00:00Z","timestamp":1715126400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2024,5,8]],"date-time":"2024-05-08T00:00:00Z","timestamp":1715126400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2024,5,8]]},"DOI":"10.1109\/cscwd61410.2024.10580146","type":"proceedings-article","created":{"date-parts":[[2024,7,10]],"date-time":"2024-07-10T17:21:49Z","timestamp":1720632109000},"page":"3086-3091","source":"Crossref","is-referenced-by-count":3,"title":["Are Large Language Models Table-based Fact-Checkers?"],"prefix":"10.1109","author":[{"given":"Hanwen","family":"Zhang","sequence":"first","affiliation":[{"name":"Chinese Academy of Sciences,Institute of Information Engineering,Beijing,China"}]},{"given":"Qingyi","family":"Si","sequence":"additional","affiliation":[{"name":"Chinese Academy of Sciences,Institute of Information Engineering,Beijing,China"}]},{"given":"Peng","family":"Fu","sequence":"additional","affiliation":[{"name":"Chinese Academy of Sciences,Institute of Information Engineering,Beijing,China"}]},{"given":"Zheng","family":"Lin","sequence":"additional","affiliation":[{"name":"Chinese Academy of Sciences,Institute of Information Engineering,Beijing,China"}]},{"given":"Weiping","family":"Wang","sequence":"additional","affiliation":[{"name":"Chinese Academy of Sciences,Institute of Information Engineering,Beijing,China"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/n18-1074"},{"key":"ref2","article-title":"Tabfact: A large-scale dataset for table-based fact verification","author":"Chen","year":"2020","journal-title":"ICLR"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.acl-main.210"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.acl-main.539"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.emnlp-main.628"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.emnlp-main.16"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.acl-main.398"},{"key":"ref8","article-title":"TAPEX: table pre-training via learning a neural SQL executor","author":"Liu","year":"2022","journal-title":"ICLR"},{"key":"ref9","article-title":"Table-former: Robust transformer modeling for table-text encoding","volume-title":"ACL","author":"Yang","year":"2022"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.findings-acl.13"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.emnlp-main.331"},{"key":"ref12","article-title":"Language models are few-shot learners","volume-title":"NeurIPS","author":"Brown","year":"2020"},{"key":"ref13","article-title":"Evaluating large language models trained on code","author":"Chen","year":"2021","journal-title":"CoRR"},{"key":"ref14","article-title":"How would stance detection techniques evolve after the launch of chatgpt?","author":"Zhang","year":"2022","journal-title":"CoRR"},{"key":"ref15","article-title":"Cross-lingual summarization via chatgpt","author":"Wang","year":"2023","journal-title":"CoRR"},{"key":"ref16","article-title":"How good are GPT models at machine translation? A comprehensive evaluation","author":"Hendy","year":"2023","journal-title":"CoRR"},{"article-title":"Llama: Open and efficient foundation language models","year":"2023","author":"Touvron","key":"ref17"},{"article-title":"Stanford alpaca: An instruction-following llama model","year":"2023","author":"Taori","key":"ref18"},{"key":"ref19","article-title":"Training language models to follow instructions with human feedback","author":"Ouyang","year":"2022","journal-title":"NeurIPS"},{"key":"ref20","article-title":"Binding language models in symbolic languages","author":"Cheng","year":"2022","journal-title":"CoRR"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1145\/3539618.3591708"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.findings-emnlp.269"},{"article-title":"Lora: Low-rank adaptation of large language models","year":"2021","author":"Hu","key":"ref23"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1016\/j.aiopen.2023.08.012"},{"article-title":"Llama 2: Open foundation and fine-tuned chat models","year":"2023","author":"Touvron","key":"ref25"},{"key":"ref26","article-title":"Is chatgpt A good translator? A preliminary study","author":"Jiao","year":"2023","journal-title":"CoRR"},{"key":"ref27","article-title":"Chain-of-thought prompting elicits reasoning in large language models","author":"Wei","year":"2022","journal-title":"NeurIPS"}],"event":{"name":"2024 27th International Conference on Computer Supported Cooperative Work in Design (CSCWD)","start":{"date-parts":[[2024,5,8]]},"location":"Tianjin, China","end":{"date-parts":[[2024,5,10]]}},"container-title":["2024 27th International Conference on Computer Supported Cooperative Work in Design (CSCWD)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/10579968\/10579982\/10580146.pdf?arnumber=10580146","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,7,17]],"date-time":"2024-07-17T04:52:12Z","timestamp":1721191932000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10580146\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,5,8]]},"references-count":27,"URL":"https:\/\/doi.org\/10.1109\/cscwd61410.2024.10580146","relation":{},"subject":[],"published":{"date-parts":[[2024,5,8]]}}}