{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,7]],"date-time":"2026-03-07T18:00:31Z","timestamp":1772906431648,"version":"3.50.1"},"reference-count":29,"publisher":"IEEE","license":[{"start":{"date-parts":[[2024,12,15]],"date-time":"2024-12-15T00:00:00Z","timestamp":1734220800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2024,12,15]],"date-time":"2024-12-15T00:00:00Z","timestamp":1734220800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2024,12,15]]},"DOI":"10.1109\/bigdata62323.2024.10826092","type":"proceedings-article","created":{"date-parts":[[2025,1,16]],"date-time":"2025-01-16T18:31:23Z","timestamp":1737052283000},"page":"3965-3974","source":"Crossref","is-referenced-by-count":3,"title":["Large Language Models in Data Governance: Multi-source Data Tables Merging"],"prefix":"10.1109","author":[{"given":"Linfeng","family":"Li","sequence":"first","affiliation":[{"name":"University of Electronic Science and Technology of China,School of SCSE,Chengdu,China"}]},{"given":"Hong","family":"Chen","sequence":"additional","affiliation":[{"name":"University of Electronic Science and Technology of China,School of SCSE,Chengdu,China"}]},{"given":"Zhijie","family":"Qiu","sequence":"additional","affiliation":[{"name":"University of Electronic Science and Technology of China,School of SCSE,Chengdu,China"}]},{"given":"Lei","family":"Luo","sequence":"additional","affiliation":[{"name":"University of Electronic Science and Technology of China,School of SCSE,Chengdu,China"}]}],"member":"263","reference":[{"key":"ref1","article-title":"Palm 2 technical report","author":"Anil","year":"2023"},{"key":"ref2","article-title":"Language models are few-shot learners","author":"Brown","year":"2020"},{"issue":"240","key":"ref3","first-page":"1","article-title":"Palm: Scaling language modeling with pathways","volume":"24","author":"Chowdhery","year":"2023","journal-title":"Journal of Machine Learning Research"},{"key":"ref4","article-title":"A survey of large language models","author":"Zhao","year":"2023"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.14778\/3611479.3611527"},{"key":"ref6","article-title":"Column type annotation using chatgpt","author":"Korini","year":"2023"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.14778\/3574245.3574258"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-42941-5_20"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1145\/3560815"},{"key":"ref10","article-title":"Lawgpt: A chinese legal knowledge-enhanced large language model","author":"Zhou","year":"2024"},{"key":"ref11","article-title":"Baichuan 2: Open large-scale language models","author":"Yang","year":"2023"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.2139\/ssrn.4489826"},{"key":"ref13","article-title":"Doctorglm: Fine-tuning your chinese doctor is not a herculean task","author":"Xiong","year":"2023"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.findings-emnlp.725"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.findings-emnlp.83"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.findings-acl.308"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2024.findings-acl.362"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1145\/3654979"},{"key":"ref19","article-title":"Gpt-4 technical report","author":"Achiam","year":"2023"},{"key":"ref20","first-page":"2","article-title":"Bert: Pre-training of deep bidirectional transformers for language understanding","volume-title":"Proceedings of naacL-HLT","volume":"1","author":"Kenton"},{"key":"ref21","article-title":"Llama: Open and efficient foundation language models","author":"Touvron","year":"2023"},{"key":"ref22","article-title":"Chatglm: A family of large language models from glm-130b to glm-4 all tools","author":"GLM","year":"2024"},{"key":"ref23","first-page":"2790","article-title":"Parameter-efficient transfer learning for nlp","volume-title":"International conference on machine learning","author":"Houlsby"},{"key":"ref24","article-title":"Lora: Low-rank adaptation of large language models","author":"Hu","year":"2021"},{"key":"ref25","article-title":"P-tuning v2: Prompt tuning can be comparable to fine-tuning universally across scales and tasks","author":"Liu","year":"2021"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1145\/3542700.3542709"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.14778\/3421424.3421431"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.acl-main.745"},{"key":"ref29","first-page":"3982","article-title":"Sentence-BERT: Sentence embeddings using Siamese BERT-networks","volume-title":"Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)","author":"Reimers"}],"event":{"name":"2024 IEEE International Conference on Big Data (BigData)","location":"Washington, DC, USA","start":{"date-parts":[[2024,12,15]]},"end":{"date-parts":[[2024,12,18]]}},"container-title":["2024 IEEE International Conference on Big Data (BigData)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/10824975\/10824942\/10826092.pdf?arnumber=10826092","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,1,17]],"date-time":"2025-01-17T08:16:02Z","timestamp":1737101762000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10826092\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,12,15]]},"references-count":29,"URL":"https:\/\/doi.org\/10.1109\/bigdata62323.2024.10826092","relation":{},"subject":[],"published":{"date-parts":[[2024,12,15]]}}}