{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,12,12]],"date-time":"2025-12-12T02:26:38Z","timestamp":1765506398284,"version":"3.48.0"},"publisher-location":"New York, NY, USA","reference-count":34,"publisher":"ACM","content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2025,11,10]]},"DOI":"10.1145\/3746252.3761613","type":"proceedings-article","created":{"date-parts":[[2025,11,8]],"date-time":"2025-11-08T00:52:37Z","timestamp":1762563157000},"page":"6461-6465","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":0,"title":["ECKGBench: Benchmarking Large Language Models in E-commerce Leveraging Knowledge Graph"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0000-0003-1995-3381","authenticated-orcid":false,"given":"Langming","family":"Liu","sequence":"first","affiliation":[{"name":"Taobao &amp; Tmall Group of Alibaba, Hangzhou, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-1039-1119","authenticated-orcid":false,"given":"Haibin","family":"Chen","sequence":"additional","affiliation":[{"name":"Taobao &amp; Tmall Group of Alibaba, Hangzhou, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-6051-8659","authenticated-orcid":false,"given":"Yuhao","family":"Wang","sequence":"additional","affiliation":[{"name":"City University of Hong Kong, Hong Kong, Hong Kong"}]},{"ORCID":"https:\/\/orcid.org\/0009-0008-9459-6488","authenticated-orcid":false,"given":"Yujin","family":"Yuan","sequence":"additional","affiliation":[{"name":"Taobao &amp; Tmall Group of Alibaba, Hangzhou, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-2976-6256","authenticated-orcid":false,"given":"Shilei","family":"Liu","sequence":"additional","affiliation":[{"name":"Taobao &amp; Tmall Group of Alibaba, Hangzhou, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0009-3800-7543","authenticated-orcid":false,"given":"Wenbo","family":"Su","sequence":"additional","affiliation":[{"name":"Taobao &amp; Tmall Group of Alibaba, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-2926-4416","authenticated-orcid":false,"given":"Xiangyu","family":"Zhao","sequence":"additional","affiliation":[{"name":"City University of Hong Kong, Hong Kong, Hong Kong"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-4037-6315","authenticated-orcid":false,"given":"Bo","family":"Zheng","sequence":"additional","affiliation":[{"name":"Taobao &amp; Tmall Group of Alibaba, Beijing, China"}]}],"member":"320","published-online":{"date-parts":[[2025,11,10]]},"reference":[{"key":"e_1_3_2_1_1_1","doi-asserted-by":"publisher","DOI":"10.1145\/3604915.3608857"},{"key":"e_1_3_2_1_2_1","volume-title":"Translating embeddings for modeling multi-relational data. Advances in neural information processing systems","author":"Bordes Antoine","year":"2013","unstructured":"Antoine Bordes, Nicolas Usunier, Alberto Garcia-Duran, Jason Weston, and Oksana Yakhnenko. 2013. Translating embeddings for modeling multi-relational data. Advances in neural information processing systems, Vol. 26 (2013)."},{"key":"e_1_3_2_1_3_1","doi-asserted-by":"publisher","DOI":"10.1145\/3711896.3737374"},{"key":"e_1_3_2_1_4_1","volume-title":"Beyond factuality: A comprehensive evaluation of large language models as knowledge generators. arXiv preprint arXiv:2310.07289","author":"Chen Liang","year":"2023","unstructured":"Liang Chen, Yang Deng, Yatao Bian, Zeyu Qin, Bingzhe Wu, Tat-Seng Chua, and Kam-Fai Wong. 2023. Beyond factuality: A comprehensive evaluation of large language models as knowledge generators. arXiv preprint arXiv:2310.07289 (2023)."},{"key":"e_1_3_2_1_5_1","volume-title":"Minghao Liu, Tianqing Fang, Jiaxin Bai, Junxian He, and Yangqiu Song.","author":"Ding Wenxuan","year":"2024","unstructured":"Wenxuan Ding, Weiqi Wang, Sze Heng Douglas Kwok, Minghao Liu, Tianqing Fang, Jiaxin Bai, Junxian He, and Yangqiu Song. 2024. IntentionQA: A Benchmark for Evaluating Purchase Intention Comprehension Abilities of Language Models in E-commerce. arXiv preprint arXiv:2406.10173 (2024)."},{"key":"e_1_3_2_1_6_1","volume-title":"Advances in Neural Information Processing Systems","volume":"36","author":"Dong Qingxiu","year":"2024","unstructured":"Qingxiu Dong, Jingjing Xu, Lingpeng Kong, Zhifang Sui, and Lei Li. 2024. Statistical Knowledge Assessment for Large Language Models. Advances in Neural Information Processing Systems, Vol. 36 (2024)."},{"key":"e_1_3_2_1_7_1","volume-title":"Revisiting text decomposition methods for NLI-based factuality scoring of summaries. arXiv preprint arXiv:2211.16853","author":"Glover John","year":"2022","unstructured":"John Glover, Federico Fancellu, Vasudevan Jagannathan, Matthew R Gormley, and Thomas Schaaf. 2022. Revisiting text decomposition methods for NLI-based factuality scoring of summaries. arXiv preprint arXiv:2211.16853 (2022)."},{"key":"e_1_3_2_1_8_1","volume-title":"International conference on machine learning. PMLR, 3929-3938","author":"Guu Kelvin","year":"2020","unstructured":"Kelvin Guu, Kenton Lee, Zora Tung, Panupong Pasupat, and Mingwei Chang. 2020. Retrieval augmented language model pre-training. In International conference on machine learning. PMLR, 3929-3938."},{"key":"e_1_3_2_1_9_1","volume-title":"Yaliang Li, and Ji-Rong Wen.","author":"Jiang Jinhao","year":"2023","unstructured":"Jinhao Jiang, Kun Zhou, Wayne Xin Zhao, Yaliang Li, and Ji-Rong Wen. 2023. ReasoningLM: Enabling Structural Subgraph Reasoning in Pre-trained Language Models for Question Answering over Knowledge Graph. arXiv preprint arXiv:2401.00158 (2023)."},{"key":"e_1_3_2_1_10_1","unstructured":"Yilun Jin Zheng Li Chenwei Zhang Tianyu Cao Yifan Gao Pratik Jayarao Mao Li Xin Liu Ritesh Sarkhel Xianfeng Tang et al. 2024. Shopping MMLU: A Massive Multi-Task Online Shopping Benchmark for Large Language Models. arXiv preprint arXiv:2410.20745 (2024)."},{"key":"e_1_3_2_1_11_1","first-page":"34586","article-title":"Factuality enhanced language models for open-ended text generation","volume":"35","author":"Lee Nayeon","year":"2022","unstructured":"Nayeon Lee, Wei Ping, Peng Xu, Mostofa Patwary, Pascale N Fung, Mohammad Shoeybi, and Bryan Catanzaro. 2022. Factuality enhanced language models for open-ended text generation. Advances in Neural Information Processing Systems, Vol. 35 (2022), 34586-34599.","journal-title":"Advances in Neural Information Processing Systems"},{"key":"e_1_3_2_1_12_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.emnlp-main.397"},{"key":"e_1_3_2_1_13_1","unstructured":"Aixin Liu Bei Feng Bing Xue Bingxuan Wang Bochao Wu Chengda Lu Chenggang Zhao Chengqi Deng Chenyu Zhang Chong Ruan et al. 2024a. Deepseek-v3 technical report. arXiv preprint arXiv:2412.19437 (2024)."},{"key":"e_1_3_2_1_14_1","doi-asserted-by":"publisher","DOI":"10.1145\/3539618.3591717"},{"key":"e_1_3_2_1_15_1","doi-asserted-by":"publisher","DOI":"10.1145\/3711896.3737385"},{"key":"e_1_3_2_1_16_1","doi-asserted-by":"publisher","DOI":"10.1145\/3711896.3737250"},{"key":"e_1_3_2_1_17_1","volume-title":"Evaluating the Factuality of Large Language Models using Large-Scale Knowledge Graphs. arXiv preprint arXiv:2404.00942","author":"Liu Xiaoze","year":"2024","unstructured":"Xiaoze Liu, Feijie Wu, Tianyang Xu, Zhuo Chen, Yichi Zhang, Xiaoqian Wang, and Jing Gao. 2024b. Evaluating the Factuality of Large Language Models using Large-Scale Knowledge Graphs. arXiv preprint arXiv:2404.00942 (2024)."},{"key":"e_1_3_2_1_18_1","volume-title":"Multi-stage prompting for knowledgeable dialogue generation. arXiv preprint arXiv:2203.08745","author":"Liu Zihan","year":"2022","unstructured":"Zihan Liu, Mostofa Patwary, Ryan Prenger, Shrimai Prabhumoye, Wei Ping, Mohammad Shoeybi, and Bryan Catanzaro. 2022. Multi-stage prompting for knowledgeable dialogue generation. arXiv preprint arXiv:2203.08745 (2022)."},{"key":"e_1_3_2_1_19_1","volume-title":"Reasoning on graphs: Faithful and interpretable large language model reasoning. arXiv preprint arXiv:2310.01061","author":"Luo Linhao","year":"2023","unstructured":"Linhao Luo, Yuan-Fang Li, Gholamreza Haffari, and Shirui Pan. 2023. Reasoning on graphs: Faithful and interpretable large language model reasoning. arXiv preprint arXiv:2310.01061 (2023)."},{"key":"e_1_3_2_1_20_1","volume-title":"NeurIPS Efficient Natural Language and Speech Processing Workshop.","author":"Muhamed Aashiq","year":"2021","unstructured":"Aashiq Muhamed, Iman Keivanloo, Sujan Perera, James Mracek, Yi Xu, Qingjun Cui, Santosh Rajagopalan, Belinda Zeng, and Trishul Chilimbi. 2021. CTR-BERT: Cost-effective knowledge distillation for billion-parameter teacher models. In NeurIPS Efficient Natural Language and Speech Processing Workshop."},{"key":"e_1_3_2_1_21_1","unstructured":"OpenAI. 2023. GPT-4 technical report. (2023)."},{"key":"e_1_3_2_1_22_1","volume-title":"Hanwen Zha, Yue Liu, and Xin Luna Dong.","author":"Sun Kai","year":"2023","unstructured":"Kai Sun, Yifan Ethan Xu, Hanwen Zha, Yue Liu, and Xin Luna Dong. 2023. Head-to-tail: How knowledgeable are large language models (llm)? AKA will llms replace knowledge graphs? arXiv preprint arXiv:2308.10168 (2023)."},{"key":"e_1_3_2_1_23_1","volume-title":"Rotate: Knowledge graph embedding by relational rotation in complex space. arXiv preprint arXiv:1902.10197","author":"Sun Zhiqing","year":"2019","unstructured":"Zhiqing Sun, Zhi-Hong Deng, Jian-Yun Nie, and Jian Tang. 2019. Rotate: Knowledge graph embedding by relational rotation in complex space. arXiv preprint arXiv:1902.10197 (2019)."},{"key":"e_1_3_2_1_24_1","volume-title":"Ryan Burnell, Libin Bai, Anmol Gulati, Garrett Tanzer, Damien Vincent, Zhufeng Pan, Shibo Wang, et al.","author":"Team Gemini","year":"2024","unstructured":"Gemini Team, Petko Georgiev, Ving Ian Lei, Ryan Burnell, Libin Bai, Anmol Gulati, Garrett Tanzer, Damien Vincent, Zhufeng Pan, Shibo Wang, et al., 2024. Gemini 1.5: Unlocking multimodal understanding across millions of tokens of context. arXiv preprint arXiv:2403.05530 (2024)."},{"key":"e_1_3_2_1_25_1","volume-title":"Llama: Open and efficient foundation language models. arXiv preprint arXiv:2302.13971","author":"Touvron Hugo","year":"2023","unstructured":"Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timoth\u00e9e Lacroix, Baptiste Rozi\u00e8re, Naman Goyal, Eric Hambro, Faisal Azhar, et al., 2023. Llama: Open and efficient foundation language models. arXiv preprint arXiv:2302.13971 (2023)."},{"key":"e_1_3_2_1_26_1","volume-title":"Denny Zhou, et al.","author":"Wei Jason","year":"2022","unstructured":"Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Fei Xia, Ed Chi, Quoc V Le, Denny Zhou, et al., 2022. Chain-of-thought prompting elicits reasoning in large language models. Advances in neural information processing systems, Vol. 35 (2022), 24824-24837."},{"key":"e_1_3_2_1_27_1","volume-title":"C-pack: Packaged resources to advance general chinese embedding. arXiv preprint arXiv:2309.07597","author":"Xiao Shitao","year":"2023","unstructured":"Shitao Xiao, Zheng Liu, Peitian Zhang, and Niklas Muennighof. 2023. C-pack: Packaged resources to advance general chinese embedding. arXiv preprint arXiv:2309.07597 (2023)."},{"key":"e_1_3_2_1_28_1","unstructured":"An Yang Anfeng Li Baosong Yang Beichen Zhang Binyuan Hui Bo Zheng Bowen Yu Chang Gao Chengen Huang Chenxu Lv et al. 2025. Qwen3 technical report. arXiv preprint arXiv:2505.09388 (2025)."},{"key":"e_1_3_2_1_29_1","unstructured":"An Yang Baosong Yang Binyuan Hui Bo Zheng Bowen Yu Chang Zhou Chengpeng Li Chengyuan Li Dayiheng Liu Fei Huang et al. 2024. Qwen2 technical report. arXiv preprint arXiv:2407.10671 (2024)."},{"key":"e_1_3_2_1_30_1","first-page":"37309","article-title":"Deep bidirectional language-knowledge graph pretraining","volume":"35","author":"Yasunaga Michihiro","year":"2022","unstructured":"Michihiro Yasunaga, Antoine Bosselut, Hongyu Ren, Xikun Zhang, Christopher D Manning, Percy S Liang, and Jure Leskovec. 2022. Deep bidirectional language-knowledge graph pretraining. Advances in Neural Information Processing Systems, Vol. 35 (2022), 37309-37323.","journal-title":"Advances in Neural Information Processing Systems"},{"key":"e_1_3_2_1_31_1","volume-title":"Yi: Open foundation models by 01. ai. arXiv preprint arXiv:2403.04652","author":"Young Alex","year":"2024","unstructured":"Alex Young, Bei Chen, Chao Li, Chengen Huang, Ge Zhang, Guanwei Zhang, Heng Li, Jiangcheng Zhu, Jianqun Chen, Jing Chang, et al., 2024. Yi: Open foundation models by 01. ai. arXiv preprint arXiv:2403.04652 (2024)."},{"key":"e_1_3_2_1_32_1","volume-title":"Folkscope: Intention knowledge graph construction for e-commerce commonsense discovery. arXiv preprint arXiv:2211.08316","author":"Yu Changlong","year":"2022","unstructured":"Changlong Yu, Weiqi Wang, Xin Liu, Jiaxin Bai, Yangqiu Song, Zheng Li, Yifan Gao, Tianyu Cao, and Bing Yin. 2022b. Folkscope: Intention knowledge graph construction for e-commerce commonsense discovery. arXiv preprint arXiv:2211.08316 (2022)."},{"key":"e_1_3_2_1_33_1","volume-title":"Generate rather than retrieve: Large language models are strong context generators. arXiv preprint arXiv:2209.10063","author":"Yu Wenhao","year":"2022","unstructured":"Wenhao Yu, Dan Iter, Shuohang Wang, Yichong Xu, Mingxuan Ju, Soumya Sanyal, Chenguang Zhu, Michael Zeng, and Meng Jiang. 2022a. Generate rather than retrieve: Large language models are strong context generators. arXiv preprint arXiv:2209.10063 (2022)."},{"key":"e_1_3_2_1_34_1","unstructured":"Yue Zhang Yafu Li Leyang Cui Deng Cai Lemao Liu Tingchen Fu Xinting Huang Enbo Zhao Yu Zhang Yulong Chen et al. 2023. Siren's song in the AI ocean: a survey on hallucination in large language models. arXiv preprint arXiv:2309.01219 (2023)."}],"event":{"name":"CIKM '25: The 34th ACM International Conference on Information and Knowledge Management","sponsor":["SIGIR ACM Special Interest Group on Information Retrieval","SIGWEB ACM Special Interest Group on Hypertext, Hypermedia, and Web"],"location":"Seoul Republic of Korea","acronym":"CIKM '25"},"container-title":["Proceedings of the 34th ACM International Conference on Information and Knowledge Management"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3746252.3761613","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,12,12]],"date-time":"2025-12-12T02:25:03Z","timestamp":1765506303000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3746252.3761613"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,11,10]]},"references-count":34,"alternative-id":["10.1145\/3746252.3761613","10.1145\/3746252"],"URL":"https:\/\/doi.org\/10.1145\/3746252.3761613","relation":{},"subject":[],"published":{"date-parts":[[2025,11,10]]},"assertion":[{"value":"2025-11-10","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}