{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,8,21]],"date-time":"2025-08-21T16:46:44Z","timestamp":1755794804889,"version":"3.44.0"},"publisher-location":"New York, NY, USA","reference-count":62,"publisher":"ACM","funder":[{"name":"National Key R\\&D Program of China","award":["2022ZD0160501"],"award-info":[{"award-number":["2022ZD0160501"]}]},{"name":"Natural Science Foundation of China","award":["62372390,62432011"],"award-info":[{"award-number":["62372390,62432011"]}]}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2025,8,3]]},"DOI":"10.1145\/3711896.3737405","type":"proceedings-article","created":{"date-parts":[[2025,8,3]],"date-time":"2025-08-03T20:52:41Z","timestamp":1754254361000},"page":"5960-5971","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":0,"title":["Revolutionizing Database Q&amp;A with Large Language Models: Comprehensive Benchmark and Evaluation"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0009-0005-9277-3661","authenticated-orcid":false,"given":"Yihang","family":"Zheng","sequence":"first","affiliation":[{"name":"Xiamen University, Xiamen, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0002-2279-1491","authenticated-orcid":false,"given":"Bo","family":"Li","sequence":"additional","affiliation":[{"name":"Xiamen University, Xiamen, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-9172-1628","authenticated-orcid":false,"given":"Zhenghao","family":"Lin","sequence":"additional","affiliation":[{"name":"Xiamen University, Xiamen, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0004-4279-4109","authenticated-orcid":false,"given":"Yi","family":"Luo","sequence":"additional","affiliation":[{"name":"Xiamen University, Xiamen, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-2285-7836","authenticated-orcid":false,"given":"Xuanhe","family":"Zhou","sequence":"additional","affiliation":[{"name":"Shanghai Jiaotong University, Shanghai, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-2275-997X","authenticated-orcid":false,"given":"Chen","family":"Lin","sequence":"additional","affiliation":[{"name":"Xiamen University, Xiamen, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-1398-0621","authenticated-orcid":false,"given":"Guoliang","family":"Li","sequence":"additional","affiliation":[{"name":"Tsinghua University, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-5606-7122","authenticated-orcid":false,"given":"Jinsong","family":"Su","sequence":"additional","affiliation":[{"name":"Xiamen University, Xiamen, China"}]}],"member":"320","published-online":{"date-parts":[[2025,8,3]]},"reference":[{"key":"e_1_3_2_2_1_1","unstructured":"Rohan Anil Andrew M Dai Orhan Firat Melvin Johnson Dmitry Lepikhin Alexandre Passos Siamak Shakeri Emanuel Taropa Paige Bailey Zhifeng Chen et al. 2023. Palm 2 technical report. arXiv preprint arXiv:2305.10403 (2023)."},{"key":"e_1_3_2_2_2_1","volume-title":"Self-rag: Learning to retrieve, generate, and critique through self-reflection. arXiv preprint arXiv:2310.11511","author":"Asai Akari","year":"2023","unstructured":"Akari Asai, Zeqiu Wu, Yizhong Wang, Avirup Sil, and Hannaneh Hajishirzi. 2023. Self-rag: Learning to retrieve, generate, and critique through self-reflection. arXiv preprint arXiv:2310.11511 (2023)."},{"key":"e_1_3_2_2_3_1","unstructured":"Jinze Bai Shuai Bai Yunfei Chu Zeyu Cui Kai Dang Xiaodong Deng Yang Fan Wenbin Ge Yu Han Fei Huang et al. 2023a. Qwen technical report. arXiv preprint arXiv:2309.16609 (2023)."},{"key":"e_1_3_2_2_4_1","volume-title":"Multitask Benchmark for Long Context Understanding. arXiv preprint arXiv:2308.14508","author":"Bai Yushi","year":"2023","unstructured":"Yushi Bai, Xin Lv, Jiajie Zhang, Hongchang Lyu, Jiankai Tang, Zhidian Huang, Zhengxiao Du, Xiao Liu, Aohan Zeng, Lei Hou, Yuxiao Dong, Jie Tang, and Juanzi Li. 2023b. LongBench: A Bilingual, Multitask Benchmark for Long Context Understanding. arXiv preprint arXiv:2308.14508 (2023)."},{"key":"e_1_3_2_2_5_1","volume-title":"Api-blend: A comprehensive corpora for training and benchmarking api llms. arXiv preprint arXiv:2402.15491","author":"Basu Kinjal","year":"2024","unstructured":"Kinjal Basu, Ibrahim Abdelaziz, Subhajit Chaudhury, Soham Dan, Maxwell Crouse, Asim Munawar, Sadhana Kumaravel, Vinod Muthusamy, Pavan Kapanipathi, and Luis A Lastras. 2024. Api-blend: A comprehensive corpora for training and benchmarking api llms. arXiv preprint arXiv:2402.15491 (2024)."},{"key":"e_1_3_2_2_6_1","first-page":"533","volume-title":"Nature","volume":"619","author":"Bi Kaifeng","year":"2023","unstructured":"Kaifeng Bi, Lingxi Xie, Hengheng Zhang, Xin Chen, Xiaotao Gu, and Qi Tian. 2023. Accurate medium-range global weather forecasting with 3D neural networks. Nature, Vol. 619, 7970 (2023), 533-538."},{"key":"e_1_3_2_2_7_1","volume-title":"https:\/\/github.com\/hwchase17\/langchain","author":"Chase Harrison","year":"2022","unstructured":"Harrison Chase. 2022. LangChain. (2022). https:\/\/github.com\/hwchase17\/langchain"},{"key":"e_1_3_2_2_8_1","unstructured":"Zhoujun Cheng Tianbao Xie Peng Shi Chengzu Li Rahul Nadkarni Yushi Hu Caiming Xiong Dragomir Radev Mari Ostendorf Luke Zettlemoyer et al. 2022. Binding language models in symbolic languages. arXiv preprint arXiv:2210.02875 (2022)."},{"key":"e_1_3_2_2_9_1","volume-title":"Malik Boudiaf, Dominic Culver, Rui Melo, Caio Corro, Andre FT Martins, Fabrizio Esposito, Vera L\u00facia Raposo, Sofia Morgado, et al.","author":"Colombo Pierre","year":"2024","unstructured":"Pierre Colombo, Telmo Pessoa Pires, Malik Boudiaf, Dominic Culver, Rui Melo, Caio Corro, Andre FT Martins, Fabrizio Esposito, Vera L\u00facia Raposo, Sofia Morgado, et al. 2024. Saullm-7b: A pioneering large language model for law. arXiv preprint arXiv:2403.03883 (2024)."},{"key":"e_1_3_2_2_10_1","doi-asserted-by":"publisher","DOI":"10.14778\/3611479.3611527"},{"key":"e_1_3_2_2_11_1","volume-title":"Text-to-sql empowered by large language models: A benchmark evaluation. arXiv preprint arXiv:2308.15363","author":"Gao Dawei","year":"2023","unstructured":"Dawei Gao, Haibin Wang, Yaliang Li, Xiuyu Sun, Yichen Qian, Bolin Ding, and Jingren Zhou. 2023a. Text-to-sql empowered by large language models: A benchmark evaluation. arXiv preprint arXiv:2308.15363 (2023)."},{"key":"e_1_3_2_2_12_1","volume-title":"Retrieval-augmented generation for large language models: A survey. arXiv preprint arXiv:2312.10997","author":"Gao Yunfan","year":"2023","unstructured":"Yunfan Gao, Yun Xiong, Xinyu Gao, Kangxiang Jia, Jinliu Pan, Yuxi Bi, Yi Dai, Jiawei Sun, and Haofen Wang. 2023b. Retrieval-augmented generation for large language models: A survey. arXiv preprint arXiv:2312.10997 (2023)."},{"key":"e_1_3_2_2_13_1","first-page":"134721","article-title":"Can llms solve molecule puzzles? a multimodal benchmark for molecular structure elucidation","volume":"37","author":"Guo Kehan","year":"2025","unstructured":"Kehan Guo, Bozhao Nan, Yujun Zhou, Taicheng Guo, Zhichun Guo, Mihir Surve, Zhenwen Liang, Nitesh Chawla, Olaf Wiest, and Xiangliang Zhang. 2025. Can llms solve molecule puzzles? a multimodal benchmark for molecular structure elucidation. Advances in Neural Information Processing Systems, Vol. 37 (2025), 134721-134746.","journal-title":"Advances in Neural Information Processing Systems"},{"key":"e_1_3_2_2_14_1","first-page":"32336","article-title":"Large Language Models' Expert-level Global History Knowledge Benchmark (HiST-LLM)","volume":"37","author":"Hauser Jakob","year":"2025","unstructured":"Jakob Hauser, D\u00e1niel Kondor, Jenny Reddish, Majid Benam, Enrico Cioni, Federica Villa, James Bennett, Daniel Hoyer, Pieter Francois, Peter Turchin, et al. 2025. Large Language Models' Expert-level Global History Knowledge Benchmark (HiST-LLM). Advances in Neural Information Processing Systems, Vol. 37 (2025), 32336-32369.","journal-title":"Advances in Neural Information Processing Systems"},{"key":"e_1_3_2_2_15_1","doi-asserted-by":"publisher","DOI":"10.1145\/2872427.2883037"},{"key":"e_1_3_2_2_16_1","volume-title":"Measuring massive multitask language understanding. arXiv preprint arXiv:2009.03300","author":"Hendrycks Dan","year":"2020","unstructured":"Dan Hendrycks, Collin Burns, Steven Basart, Andy Zou, Mantas Mazeika, Dawn Song, and Jacob Steinhardt. 2020. Measuring massive multitask language understanding. arXiv preprint arXiv:2009.03300 (2020)."},{"key":"e_1_3_2_2_17_1","volume-title":"Saku Sugawara, and Akiko Aizawa.","author":"Ho Xanh","year":"2020","unstructured":"Xanh Ho, Anh-Khoa Duong Nguyen, Saku Sugawara, and Akiko Aizawa. 2020. Constructing a multi-hop QA dataset for comprehensive evaluation of reasoning steps. arXiv preprint arXiv:2011.01060 (2020)."},{"key":"e_1_3_2_2_18_1","volume-title":"BeaverTails: Towards Improved Safety Alignment of LLM via a Human-Preference Dataset. arXiv preprint arXiv:2307.04657","author":"Ji Jiaming","year":"2023","unstructured":"Jiaming Ji, Mickel Liu, Juntao Dai, Xuehai Pan, Chi Zhang, Ce Bian, Chi Zhang, Ruiyang Sun, Yizhou Wang, and Yaodong Yang. 2023. BeaverTails: Towards Improved Safety Alignment of LLM via a Human-Preference Dataset. arXiv preprint arXiv:2307.04657 (2023)."},{"key":"e_1_3_2_2_19_1","volume-title":"Diego de las Casas, Florian Bressand, Gianna Lengyel, Guillaume Lample, Lucile Saulnier, et al.","author":"Jiang Albert Q","year":"2023","unstructured":"Albert Q Jiang, Alexandre Sablayrolles, Arthur Mensch, Chris Bamford, Devendra Singh Chaplot, Diego de las Casas, Florian Bressand, Gianna Lengyel, Guillaume Lample, Lucile Saulnier, et al. 2023a. Mistral 7B. arXiv preprint arXiv:2310.06825 (2023)."},{"key":"e_1_3_2_2_20_1","volume-title":"Active retrieval augmented generation. arXiv preprint arXiv:2305.06983","author":"Jiang Zhengbao","year":"2023","unstructured":"Zhengbao Jiang, Frank F Xu, Luyu Gao, Zhiqing Sun, Qian Liu, Jane Dwivedi-Yu, Yiming Yang, Jamie Callan, and Graham Neubig. 2023b. Active retrieval augmented generation. arXiv preprint arXiv:2305.06983 (2023)."},{"key":"e_1_3_2_2_21_1","volume-title":"Triviaqa: A large scale distantly supervised challenge dataset for reading comprehension. arXiv preprint arXiv:1705.03551","author":"Joshi Mandar","year":"2017","unstructured":"Mandar Joshi, Eunsol Choi, Daniel S Weld, and Luke Zettlemoyer. 2017. Triviaqa: A large scale distantly supervised challenge dataset for reading comprehension. arXiv preprint arXiv:1705.03551 (2017)."},{"key":"e_1_3_2_2_22_1","volume-title":"Soyeon Caren Han, and Minseok Song","author":"Lee Jean","year":"2024","unstructured":"Jean Lee, Nicholas Stevens, Soyeon Caren Han, and Minseok Song. 2024. A Survey of Large Language Models in Finance (FinLLMs). arXiv preprint arXiv:2402.02315 (2024)."},{"key":"e_1_3_2_2_23_1","volume-title":"Advances in Neural Information Processing Systems","volume":"36","author":"Li Jinyang","year":"2024","unstructured":"Jinyang Li, Binyuan Hui, Ge Qu, Jiaxi Yang, Binhua Li, Bowen Li, Bailin Wang, Bowen Qin, Ruiying Geng, Nan Huo, et al. 2024b. Can llm already serve as a database interface? a big bench for large-scale database grounded text-to-sqls. Advances in Neural Information Processing Systems, Vol. 36 (2024)."},{"key":"e_1_3_2_2_24_1","volume-title":"Advances in Neural Information Processing Systems","volume":"36","author":"Li Jinyang","year":"2024","unstructured":"Jinyang Li, Binyuan Hui, Ge Qu, Jiaxi Yang, Binhua Li, Bowen Li, Bailin Wang, Bowen Qin, Ruiying Geng, Nan Huo, et al. 2024c. Can llm already serve as a database interface? a big bench for large-scale database grounded text-to-sqls. Advances in Neural Information Processing Systems, Vol. 36 (2024)."},{"key":"e_1_3_2_2_25_1","volume-title":"Pang Wei Koh, and Yulia Tsvetkov","author":"Li Shuyue Stella","year":"2024","unstructured":"Shuyue Stella Li, Vidhisha Balachandran, Shangbin Feng, Jonathan S Ilgen, Emma Pierson, Pang Wei Koh, and Yulia Tsvetkov. 2024a. MediQ: Question-Asking LLMs and a Benchmark for Reliable Interactive Clinical Reasoning. arXiv preprint arXiv:2406.00922 (2024)."},{"key":"e_1_3_2_2_26_1","volume-title":"Query Rewriting via Large Language Models. arXiv preprint arXiv:2403.09060","author":"Liu Jie","year":"2024","unstructured":"Jie Liu and Barzan Mozafari. 2024. Query Rewriting via Large Language Models. arXiv preprint arXiv:2403.09060 (2024)."},{"key":"e_1_3_2_2_27_1","volume-title":"Datasets for large language models: A comprehensive survey. arXiv preprint arXiv:2402.18041","author":"Liu Yang","year":"2024","unstructured":"Yang Liu, Jiahuan Cao, Chongyu Liu, Kai Ding, and Lianwen Jin. 2024. Datasets for large language models: A comprehensive survey. arXiv preprint arXiv:2402.18041 (2024)."},{"key":"e_1_3_2_2_28_1","volume-title":"Ensuring Safe and High-Quality Outputs: A Guideline Library Approach for Language Models. CoRR","author":"Luo Yi","year":"1838","unstructured":"Yi Luo, Zhenghao Lin, Yuhao Zhang, Jiashuo Sun, Chen Lin, Chengjin Xu, Xiangdong Su, Yelong Shen, Jian Guo, and Yeyun Gong. 2024. Ensuring Safe and High-Quality Outputs: A Guideline Library Approach for Language Models. CoRR, Vol. abs\/2403.11838 (2024). showeprint[arXiv]2403.11838"},{"key":"e_1_3_2_2_29_1","volume-title":"Query rewriting for retrieval-augmented large language models. arXiv preprint arXiv:2305.14283","author":"Ma Xinbei","year":"2023","unstructured":"Xinbei Ma, Yeyun Gong, Pengcheng He, Hai Zhao, and Nan Duan. 2023. Query rewriting for retrieval-augmented large language models. arXiv preprint arXiv:2305.14283 (2023)."},{"key":"e_1_3_2_2_30_1","volume-title":"When not to trust language models: Investigating effectiveness of parametric and non-parametric memories. arXiv preprint arXiv:2212.10511","author":"Mallen Alex","year":"2022","unstructured":"Alex Mallen, Akari Asai, Victor Zhong, Rajarshi Das, Daniel Khashabi, and Hannaneh Hajishirzi. 2022. When not to trust language models: Investigating effectiveness of parametric and non-parametric memories. arXiv preprint arXiv:2212.10511 (2022)."},{"key":"e_1_3_2_2_31_1","unstructured":"OpenAI. 2022. OpenAI: Introducing ChatGPT. (2022). https:\/\/openai.com\/blog\/chatgpt"},{"key":"e_1_3_2_2_32_1","unstructured":"OpenAI. 2023. GPT-4 Technical Report. CoRR Vol. abs\/2303.08774 (2023). doi:10.48550\/arXiv.2303.08774 showeprint[arXiv]2303.08774"},{"key":"e_1_3_2_2_33_1","volume-title":"Advances in Neural Information Processing Systems","volume":"36","author":"Pourreza Mohammadreza","year":"2024","unstructured":"Mohammadreza Pourreza and Davood Rafiei. 2024. Din-sql: Decomposed in-context learning of text-to-sql with self-correction. Advances in Neural Information Processing Systems, Vol. 36 (2024)."},{"key":"e_1_3_2_2_34_1","volume-title":"Measuring and narrowing the compositionality gap in language models. arXiv preprint arXiv:2210.03350","author":"Press Ofir","year":"2022","unstructured":"Ofir Press, Muru Zhang, Sewon Min, Ludwig Schmidt, Noah A Smith, and Mike Lewis. 2022. Measuring and narrowing the compositionality gap in language models. arXiv preprint arXiv:2210.03350 (2022)."},{"key":"e_1_3_2_2_35_1","volume-title":"Know what you don't know: Unanswerable questions for SQuAD. arXiv preprint arXiv:1806.03822","author":"Rajpurkar Pranav","year":"2018","unstructured":"Pranav Rajpurkar, Robin Jia, and Percy Liang. 2018. Know what you don't know: Unanswerable questions for SQuAD. arXiv preprint arXiv:1806.03822 (2018)."},{"key":"e_1_3_2_2_36_1","unstructured":"Reddit. 2015. Reddit Comments Dataset. https:\/\/www.reddit.com\/r\/datasets\/."},{"key":"e_1_3_2_2_37_1","volume-title":"Maria Lomeli, Eric Hambro, Luke Zettlemoyer, Nicola Cancedda, and Thomas Scialom.","author":"Schick Timo","year":"2024","unstructured":"Timo Schick, Jane Dwivedi-Yu, Roberto Dess`i, Roberta Raileanu, Maria Lomeli, Eric Hambro, Luke Zettlemoyer, Nicola Cancedda, and Thomas Scialom. 2024. Toolformer: Language models can teach themselves to use tools. Advances in Neural Information Processing Systems, Vol. 36 (2024)."},{"key":"e_1_3_2_2_38_1","volume-title":"Enhancing retrieval-augmented large language models with iterative retrieval-generation synergy. arXiv preprint arXiv:2305.15294","author":"Shao Zhihong","year":"2023","unstructured":"Zhihong Shao, Yeyun Gong, Yelong Shen, Minlie Huang, Nan Duan, and Weizhu Chen. 2023. Enhancing retrieval-augmented large language models with iterative retrieval-generation synergy. arXiv preprint arXiv:2305.15294 (2023)."},{"key":"e_1_3_2_2_39_1","volume-title":"Safety Assessment of Chinese Large Language Models. arXiv preprint arXiv:2304.10436","author":"Sun Hao","year":"2023","unstructured":"Hao Sun, Zhexin Zhang, Jiawen Deng, Jiale Cheng, and Minlie Huang. 2023b. Safety Assessment of Chinese Large Language Models. arXiv preprint arXiv:2304.10436 (2023)."},{"key":"e_1_3_2_2_40_1","volume-title":"Think-on-graph: Deep and responsible reasoning of large language model with knowledge graph. arXiv preprint arXiv:2307.07697","author":"Sun Jiashuo","year":"2023","unstructured":"Jiashuo Sun, Chengjin Xu, Lumingyuan Tang, Saizhuo Wang, Chen Lin, Yeyun Gong, Heung-Yeung Shum, and Jian Guo. 2023a. Think-on-graph: Deep and responsible reasoning of large language model with knowledge graph. arXiv preprint arXiv:2307.07697 (2023)."},{"key":"e_1_3_2_2_41_1","volume-title":"Hashimoto","author":"Taori Rohan","year":"2023","unstructured":"Rohan Taori, Ishaan Gulrajani, Tianyi Zhang, Yann Dubois, Xuechen Li, Carlos Guestrin, Percy Liang, and Tatsunori B. Hashimoto. 2023. Stanford Alpaca: An Instruction-following LLaMA model. https:\/\/github.com\/tatsu-lab\/stanford_alpaca."},{"key":"e_1_3_2_2_42_1","volume-title":"Llama: Open and efficient foundation language models. arXiv preprint arXiv:2302.13971","author":"Touvron Hugo","year":"2023","unstructured":"Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timoth\u00e9e Lacroix, Baptiste Rozi\u00e8re, Naman Goyal, Eric Hambro, Faisal Azhar, et al. 2023a. Llama: Open and efficient foundation language models. arXiv preprint arXiv:2302.13971 (2023)."},{"key":"e_1_3_2_2_43_1","unstructured":"Hugo Touvron Louis Martin Kevin Stone Peter Albert Amjad Almahairi Yasmine Babaei Nikolay Bashlykov Soumya Batra Prajjwal Bhargava Shruti Bhosale et al. 2023b. Llama 2: Open foundation and fine-tuned chat models. arXiv preprint arXiv:2307.09288 (2023)."},{"key":"e_1_3_2_2_44_1","doi-asserted-by":"publisher","DOI":"10.1007\/s11704-024-40231-1"},{"key":"e_1_3_2_2_45_1","volume-title":"Denny Zhou, et al.","author":"Wei Jason","year":"2022","unstructured":"Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Fei Xia, Ed Chi, Quoc V Le, Denny Zhou, et al. 2022. Chain-of-thought prompting elicits reasoning in large language models. Advances in neural information processing systems, Vol. 35 (2022), 24824-24837."},{"key":"e_1_3_2_2_46_1","unstructured":"Shaohua Wu Xudong Zhao Shenling Wang Jiangang Luo Lingjun Li Xi Chen Bing Zhao Wei Wang Tong Yu Rongguo Zhang et al. 2023b. YUAN 2.0: A Large Language Model with Localized Filtering-based Attention. arXiv preprint arXiv:2311.15786 (2023)."},{"key":"e_1_3_2_2_47_1","volume-title":"Smartplay: A benchmark for llms as intelligent agents. arXiv preprint arXiv:2310.01557","author":"Wu Yue","year":"2023","unstructured":"Yue Wu, Xuan Tang, Tom M Mitchell, and Yuanzhi Li. 2023a. Smartplay: A benchmark for llms as intelligent agents. arXiv preprint arXiv:2310.01557 (2023)."},{"key":"e_1_3_2_2_48_1","volume-title":"FOFO: A Benchmark to Evaluate LLMs' Format-Following Capability. arXiv preprint arXiv:2402.18667","author":"Xia Congying","year":"2024","unstructured":"Congying Xia, Chen Xing, Jiangshu Du, Xinyi Yang, Yihao Feng, Ran Xu, Wenpeng Yin, and Caiming Xiong. 2024. FOFO: A Benchmark to Evaluate LLMs' Format-Following Capability. arXiv preprint arXiv:2402.18667 (2024)."},{"key":"e_1_3_2_2_49_1","first-page":"95716","article-title":"Finben: A holistic financial benchmark for large language models","volume":"37","author":"Xie Qianqian","year":"2025","unstructured":"Qianqian Xie, Weiguang Han, Zhengyu Chen, Ruoyu Xiang, Xiao Zhang, Yueru He, Mengxi Xiao, Dong Li, Yongfu Dai, Duanyu Feng, et al. 2025. Finben: A holistic financial benchmark for large language models. Advances in Neural Information Processing Systems, Vol. 37 (2025), 95716-95743.","journal-title":"Advances in Neural Information Processing Systems"},{"key":"e_1_3_2_2_50_1","volume-title":"Db-gpt: Empowering database interactions with private large language models. arXiv preprint arXiv:2312.17449","author":"Xue Siqiao","year":"2023","unstructured":"Siqiao Xue, Caigao Jiang, Wenhui Shi, Fangyin Cheng, Keting Chen, Hongjun Yang, Zhiping Zhang, Jianshan He, Hongyang Zhang, Ganglin Wei, et al. 2023. Db-gpt: Empowering database interactions with private large language models. arXiv preprint arXiv:2312.17449 (2023)."},{"key":"e_1_3_2_2_51_1","unstructured":"Aiyuan Yang Bin Xiao Bingning Wang Borong Zhang Ce Bian Chao Yin Chenxu Lv Da Pan Dian Wang Dong Yan et al. 2023. Baichuan 2: Open large-scale language models. arXiv preprint arXiv:2309.10305 (2023)."},{"key":"e_1_3_2_2_52_1","doi-asserted-by":"publisher","DOI":"10.31193\/ssap.01.9787509752807"},{"key":"e_1_3_2_2_53_1","volume-title":"ReAct: Synergizing Reasoning and Acting in Language Models. In The Eleventh International Conference on Learning Representations, ICLR 2023","author":"Yao Shunyu","year":"2023","unstructured":"Shunyu Yao, Jeffrey Zhao, Dian Yu, Nan Du, Izhak Shafran, Karthik R. Narasimhan, and Yuan Cao. 2023. ReAct: Synergizing Reasoning and Acting in Language Models. In The Eleventh International Conference on Learning Representations, ICLR 2023, Kigali, Rwanda, May 1-5, 2023. OpenReview.net. https:\/\/openreview.net\/pdf?id=WE_vluYUL-X"},{"key":"e_1_3_2_2_54_1","volume-title":"Spider: A large-scale human-labeled dataset for complex and cross-domain semantic parsing and text-to-sql task. arXiv preprint arXiv:1809.08887","author":"Yu Tao","year":"2018","unstructured":"Tao Yu, Rui Zhang, Kai Yang, Michihiro Yasunaga, Dongxu Wang, Zifan Li, James Ma, Irene Li, Qingning Yao, Shanelle Roman, et al. 2018. Spider: A large-scale human-labeled dataset for complex and cross-domain semantic parsing and text-to-sql task. arXiv preprint arXiv:1809.08887 (2018)."},{"key":"e_1_3_2_2_55_1","doi-asserted-by":"crossref","unstructured":"Mingze Yuan Peng Bao Jiajia Yuan Yunhao Shen Zifan Chen Yi Xie Jie Zhao Yang Chen Li Zhang Lin Shen et al. 2023. Large Language Models Illuminate a Progressive Pathway to Artificial Healthcare Assistant: A Review. arXiv preprint arXiv:2311.01918 (2023).","DOI":"10.1016\/j.medp.2024.100030"},{"key":"e_1_3_2_2_56_1","volume-title":"Benchmarking AI in Mental Health: A Critical Examination of LLMs Across Key Performance and Ethical Metrics. In International Conference on Pattern Recognition. Springer, 351-366","author":"Yuan Rui","year":"2024","unstructured":"Rui Yuan, Wanting Hao, and Chun Yuan. 2024. Benchmarking AI in Mental Health: A Critical Examination of LLMs Across Key Performance and Ethical Metrics. In International Conference on Pattern Recognition. Springer, 351-366."},{"key":"e_1_3_2_2_57_1","unstructured":"Aohan Zeng Xiao Liu Zhengxiao Du Zihan Wang Hanyu Lai Ming Ding Zhuoyi Yang Yifan Xu Wendi Zheng Xiao Xia et al. 2022. Glm-130b: An open bilingual pre-trained model. arXiv preprint arXiv:2210.02414 (2022)."},{"key":"e_1_3_2_2_58_1","doi-asserted-by":"crossref","unstructured":"Hongbo Zhang Junying Chen Feng Jiang Fei Yu Zhihong Chen Jianquan Li Guiming Chen Xiangbo Wu Zhiyi Zhang Qingying Xiao et al. 2023. Huatuogpt towards taming language model to be a doctor. arXiv preprint arXiv:2305.15075 (2023).","DOI":"10.18653\/v1\/2023.findings-emnlp.725"},{"key":"e_1_3_2_2_59_1","volume-title":"RAGLAB: A Modular and Research-Oriented Unified Framework for Retrieval-Augmented Generation. arXiv preprint arXiv:2408.11381","author":"Zhang Xuanwang","year":"2024","unstructured":"Xuanwang Zhang, Yunze Song, Yidong Wang, Shuyun Tang, Xinfeng Li, Zhengran Zeng, Zhen Wu, Wei Ye, Wenyuan Xu, Yue Zhang, et al. 2024. RAGLAB: A Modular and Research-Oriented Unified Framework for Retrieval-Augmented Generation. arXiv preprint arXiv:2408.11381 (2024)."},{"key":"e_1_3_2_2_60_1","volume-title":"Llm as dba. arXiv preprint arXiv:2308.05481","author":"Zhou Xuanhe","year":"2023","unstructured":"Xuanhe Zhou, Guoliang Li, and Zhiyuan Liu. 2023a. Llm as dba. arXiv preprint arXiv:2308.05481 (2023)."},{"key":"e_1_3_2_2_61_1","volume-title":"D-bot: Database diagnosis system using large language models. arXiv preprint arXiv:2312.01454","author":"Zhou Xuanhe","year":"2023","unstructured":"Xuanhe Zhou, Guoliang Li, Zhaoyan Sun, Zhiyuan Liu, Weize Chen, Jianming Wu, Jiesi Liu, Ruohang Feng, and Guoyang Zeng. 2023b. D-bot: Database diagnosis system using large language models. arXiv preprint arXiv:2312.01454 (2023)."},{"key":"e_1_3_2_2_62_1","volume-title":"DB-GPT: Large Language Model Meets Database. Data Science and Engineering","author":"Zhou Xuanhe","year":"2024","unstructured":"Xuanhe Zhou, Zhaoyan Sun, and Guoliang Li. 2024. DB-GPT: Large Language Model Meets Database. Data Science and Engineering (2024), 1-10."}],"event":{"name":"KDD '25: The 31st ACM SIGKDD Conference on Knowledge Discovery and Data Mining","sponsor":["SIGMOD ACM Special Interest Group on Management of Data","SIGKDD ACM Special Interest Group on Knowledge Discovery in Data"],"location":"Toronto ON Canada","acronym":"KDD '25"},"container-title":["Proceedings of the 31st ACM SIGKDD Conference on Knowledge Discovery and Data Mining V.2"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3711896.3737405","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,8,16]],"date-time":"2025-08-16T14:27:31Z","timestamp":1755354451000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3711896.3737405"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,8,3]]},"references-count":62,"alternative-id":["10.1145\/3711896.3737405","10.1145\/3711896"],"URL":"https:\/\/doi.org\/10.1145\/3711896.3737405","relation":{},"subject":[],"published":{"date-parts":[[2025,8,3]]},"assertion":[{"value":"2025-08-03","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}