{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,14]],"date-time":"2026-01-14T20:42:44Z","timestamp":1768423364067,"version":"3.49.0"},"publisher-location":"New York, NY, USA","reference-count":18,"publisher":"ACM","license":[{"start":{"date-parts":[[2024,10,21]],"date-time":"2024-10-21T00:00:00Z","timestamp":1729468800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by\/4.0\/"}],"funder":[{"name":"Postdoctoral Fellowship Program of CPSF","award":["GZC20232873"],"award-info":[{"award-number":["GZC20232873"]}]},{"DOI":"10.13039\/https:\/\/doi.org\/10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62376262"],"award-info":[{"award-number":["62376262"]}],"id":[{"id":"10.13039\/https:\/\/doi.org\/10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/https:\/\/doi.org\/10.13039\/501100002858","name":"China Postdoctoral Science Foundation","doi-asserted-by":"publisher","award":["2024M753398"],"award-info":[{"award-number":["2024M753398"]}],"id":[{"id":"10.13039\/https:\/\/doi.org\/10.13039\/501100002858","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/https:\/\/doi.org\/10.13039\/501100012166","name":"National Key Research and Development Program of China","doi-asserted-by":"publisher","award":["2022YFF0902100"],"award-info":[{"award-number":["2022YFF0902100"]}],"id":[{"id":"10.13039\/https:\/\/doi.org\/10.13039\/501100012166","id-type":"DOI","asserted-by":"publisher"}]},{"name":"Shenzhen Basic Research Foundation","award":["JCYJ20210324115614039"],"award-info":[{"award-number":["JCYJ20210324115614039"]}]},{"name":"GuangDong Basic and Applied Basic Research Foundation","award":["2023A1515110718, 2024A1515012003 and 2024A1515030166"],"award-info":[{"award-number":["2023A1515110718, 2024A1515012003 and 2024A1515030166"]}]}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2024,10,21]]},"DOI":"10.1145\/3627673.3679219","type":"proceedings-article","created":{"date-parts":[[2024,10,20]],"date-time":"2024-10-20T19:34:21Z","timestamp":1729452861000},"page":"5299-5303","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":4,"title":["DeliLaw: A Chinese Legal Counselling System Based on a Large Language Model"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0009-0003-8646-0613","authenticated-orcid":false,"given":"Nan","family":"Xie","sequence":"first","affiliation":[{"name":"University of Chinese Academy of Sciences &amp; Shenzhen Institute of Advanced Technology, CAS, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0007-9043-9963","authenticated-orcid":false,"given":"Yuelin","family":"Bai","sequence":"additional","affiliation":[{"name":"Shenzhen Institute of Advanced Technology, CAS, Shenzhen, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0001-5099-8126","authenticated-orcid":false,"given":"Hengyuan","family":"Gao","sequence":"additional","affiliation":[{"name":"Hebei University, Baoding, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0000-5788-188X","authenticated-orcid":false,"given":"Ziqiang","family":"Xue","sequence":"additional","affiliation":[{"name":"Hebei University, Baoding, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0002-2971-9572","authenticated-orcid":false,"given":"Feiteng","family":"Fang","sequence":"additional","affiliation":[{"name":"University of Science and Technology of China, Hefei, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0005-4467-5389","authenticated-orcid":false,"given":"Qixuan","family":"Zhao","sequence":"additional","affiliation":[{"name":"University of Science and Technology of China, Hefei, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0008-5062-4248","authenticated-orcid":false,"given":"Zhijian","family":"Li","sequence":"additional","affiliation":[{"name":"University of Science and Technology of China, Hefei, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0005-6198-0035","authenticated-orcid":false,"given":"Liang","family":"Zhu","sequence":"additional","affiliation":[{"name":"South University of Science and Technology of China, Shenzhen, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-4986-4446","authenticated-orcid":false,"given":"Shiwen","family":"Ni","sequence":"additional","affiliation":[{"name":"Shenzhen Institute of Advanced Technology, CAS &amp; SIAT-DELI AI and Law Joint Lab, Shenzhen, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-7345-5071","authenticated-orcid":false,"given":"Min","family":"Yang","sequence":"additional","affiliation":[{"name":"Shenzhen Institute of Advanced Technology, CAS &amp; SIAT-DELI AI and Law Joint Lab, Shenzhen, China"}]}],"member":"320","published-online":{"date-parts":[[2024,10,21]]},"reference":[{"key":"e_1_3_2_1_1_1","unstructured":"Ebtesam Almazrouei Hamza Alobeidli Abdulaziz Alshamsi Alessandro Cappelli Ruxandra Cojocaru Merouane Debbah Etienne Goffinet Daniel Heslow Julien Launay Quentin Malartic et al. 2023. Falcon-40B: an open large language model with state-of-the-art performance. Technical Report. Technical report Technology Innovation Institute."},{"key":"e_1_3_2_1_2_1","volume-title":"Instruction mining: High- quality instruction data selection for large language models. arXiv preprint arXiv:2307.06290","author":"Cao Yihan","year":"2023","unstructured":"Yihan Cao, Yanbin Kang, and Lichao Sun. 2023. Instruction mining: High- quality instruction data selection for large language models. arXiv preprint arXiv:2307.06290 (2023)."},{"key":"e_1_3_2_1_3_1","volume-title":"et al","author":"Chiang WL","year":"2023","unstructured":"WL Chiang, Z Li, Z Lin, Y Sheng, Z Wu, H Zhang, L Zheng, S Zhuang, Y Zhuang, JE Gonzalez, et al . 2023. Vicuna: An open-source chatbot impressing gpt-4 with 90%* chatgpt quality."},{"key":"e_1_3_2_1_4_1","volume-title":"Chatlaw: Open- source legal large language model with integrated external knowledge bases. arXiv preprint arXiv:2306.16092","author":"Cui Jiaxi","year":"2023","unstructured":"Jiaxi Cui, Zongjian Li, Yang Yan, Bohua Chen, and Li Yuan. 2023. Chatlaw: Open- source legal large language model with integrated external knowledge bases. arXiv preprint arXiv:2306.16092 (2023)."},{"key":"e_1_3_2_1_5_1","volume-title":"Promptagator: Few-shot dense retrieval from 8 examples. arXiv preprint arXiv:2209.11755","author":"Dai Zhuyun","year":"2022","unstructured":"Zhuyun Dai, Vincent Y Zhao, Ji Ma, Yi Luan, Jianmo Ni, Jing Lu, Anton Bakalov, Kelvin Guu, Keith B Hall, and Ming-Wei Chang. 2022. Promptagator: Few-shot dense retrieval from 8 examples. arXiv preprint arXiv:2209.11755 (2022)."},{"key":"e_1_3_2_1_6_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.acl-long.26"},{"key":"e_1_3_2_1_7_1","unstructured":"Hongcheng Yusheng Liu Yutong Liao Yuhao Meng and Wang. 2023. LawGPT: Language Model for Chinese Legal Dialogue. https:\/\/github.com\/LiuHC0428\/ LAW_GPT."},{"key":"e_1_3_2_1_9_1","volume-title":"Retromae: Pre-training retrieval-oriented transformers via masked auto-encoder. arXiv preprint arXiv:2205.12035","author":"Liu Zheng","year":"2022","unstructured":"Zheng Liu and Yingxia Shao. 2022. Retromae: Pre-training retrieval-oriented transformers via masked auto-encoder. arXiv preprint arXiv:2205.12035 (2022)."},{"key":"e_1_3_2_1_11_1","volume-title":"Md Arafat Sultan, and Christopher Potts","author":"Saad-Falcon Jon","year":"2023","unstructured":"Jon Saad-Falcon, Omar Khattab, Keshav Santhanam, Radu Florian, Martin Franz, Salim Roukos, Avirup Sil, Md Arafat Sultan, and Christopher Potts. 2023. UDAPDR: Unsupervised Domain Adaptation via LLM Prompting and Distillation of Rerankers. arXiv preprint arXiv:2303.00807 (2023)."},{"key":"e_1_3_2_1_12_1","doi-asserted-by":"publisher","DOI":"10.1016\/j.is.2021.101967"},{"key":"e_1_3_2_1_13_1","volume-title":"MOSS: Training Conversational Language Models from Synthetic Data.","author":"Sun Tianxiang","year":"2023","unstructured":"Tianxiang Sun, Xiaotian Zhang, Zhengfu He, Peng Li, Qinyuan Cheng, Hang Yan, Xiangyang Liu, Yunfan Shao, Qiong Tang, Xingjian Zhao, Ke Chen, Yining Zheng, Zhejian Zhou, Ruixiao Li, Jun Zhan, Yunhua Zhou, Linyang Li, Xiaogui Yang, Lingling Wu, Zhangyue Yin, Xuanjing Huang, and Xipeng Qiu. 2023. MOSS: Training Conversational Language Models from Synthetic Data. (2023)."},{"key":"e_1_3_2_1_14_1","volume-title":"Llama: Open and efficient foundation language models. arXiv preprint arXiv:2302.13971","author":"Touvron Hugo","year":"2023","unstructured":"Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timoth\u00e9e Lacroix, Baptiste Rozi\u00e8re, Naman Goyal, Eric Hambro, Faisal Azhar, et al. 2023. Llama: Open and efficient foundation language models. arXiv preprint arXiv:2302.13971 (2023)."},{"key":"e_1_3_2_1_15_1","volume-title":"Nikolay Bashlykov, Soumya Batra, Prajjwal Bhargava, Shruti Bhos- ale, et al.","author":"Touvron Hugo","year":"2023","unstructured":"Hugo Touvron, Louis Martin, Kevin Stone, Peter Albert, Amjad Almahairi, Yas- mine Babaei, Nikolay Bashlykov, Soumya Batra, Prajjwal Bhargava, Shruti Bhos- ale, et al. 2023. Llama 2: Open foundation and fine-tuned chat models. arXiv preprint arXiv:2307.09288 (2023)."},{"key":"e_1_3_2_1_16_1","doi-asserted-by":"publisher","DOI":"10.1145\/3448016.3457550"},{"key":"e_1_3_2_1_17_1","volume-title":"C-pack: Packaged resources to advance general chinese embedding. arXiv preprint arXiv:2309.07597","author":"Xiao Shitao","year":"2023","unstructured":"Shitao Xiao, Zheng Liu, Peitian Zhang, and Niklas Muennighof. 2023. C-pack: Packaged resources to advance general chinese embedding. arXiv preprint arXiv:2309.07597 (2023)."},{"key":"e_1_3_2_1_18_1","volume-title":"Wizardlm: Empowering large language models to follow complex instructions. arXiv preprint arXiv:2304.12244","author":"Xu Can","year":"2023","unstructured":"Can Xu, Qingfeng Sun, Kai Zheng, Xiubo Geng, Pu Zhao, Jiazhan Feng, Chongyang Tao, and Daxin Jiang. 2023. Wizardlm: Empowering large language models to follow complex instructions. arXiv preprint arXiv:2304.12244 (2023)."},{"key":"e_1_3_2_1_19_1","doi-asserted-by":"crossref","unstructured":"Hongbo Zhang Junying Chen Feng Jiang Fei Yu Zhihong Chen Jianquan Li Guiming Chen Xiangbo Wu Zhiyi Zhang Qingying Xiao et al. 2023. HuatuoGPT towards Taming Language Model to Be a Doctor. arXiv preprint arXiv:2305.15075 (2023).","DOI":"10.18653\/v1\/2023.findings-emnlp.725"},{"key":"e_1_3_2_1_20_1","volume-title":"Lima: Less is more for alignment. arXiv preprint arXiv:2305.11206","author":"Zhou Chunting","year":"2023","unstructured":"Chunting Zhou, Pengfei Liu, Puxin Xu, Srini Iyer, Jiao Sun, Yuning Mao, Xuezhe Ma, Avia Efrat, Ping Yu, Lili Yu, et al. 2023. Lima: Less is more for alignment. arXiv preprint arXiv:2305.11206 (2023)."}],"event":{"name":"CIKM '24: The 33rd ACM International Conference on Information and Knowledge Management","location":"Boise ID USA","acronym":"CIKM '24","sponsor":["SIGIR ACM Special Interest Group on Information Retrieval"]},"container-title":["Proceedings of the 33rd ACM International Conference on Information and Knowledge Management"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3627673.3679219","content-type":"unspecified","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3627673.3679219","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,6,19]],"date-time":"2025-06-19T00:03:28Z","timestamp":1750291408000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3627673.3679219"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,10,21]]},"references-count":18,"alternative-id":["10.1145\/3627673.3679219","10.1145\/3627673"],"URL":"https:\/\/doi.org\/10.1145\/3627673.3679219","relation":{},"subject":[],"published":{"date-parts":[[2024,10,21]]},"assertion":[{"value":"2024-10-21","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}