{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,15]],"date-time":"2026-01-15T22:40:18Z","timestamp":1768516818420,"version":"3.49.0"},"publisher-location":"New York, NY, USA","reference-count":60,"publisher":"ACM","license":[{"start":{"date-parts":[[2025,7,20]],"date-time":"2025-07-20T00:00:00Z","timestamp":1752969600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.acm.org\/publications\/policies\/copyright_policy#Background"}],"funder":[{"name":"Education Bureau of Guangzhou Municipality"},{"name":"National Key R&D Program of China","award":["Grant No.2023YFF0725001"],"award-info":[{"award-number":["Grant No.2023YFF0725001"]}]},{"DOI":"10.13039\/https:\/\/doi.org\/10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["Grant No.92370204"],"award-info":[{"award-number":["Grant No.92370204"]}],"id":[{"id":"10.13039\/https:\/\/doi.org\/10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"name":"Guangzhou-HKUST(GZ) Joint Funding Program","award":["Grant No.2023A03J0008"],"award-info":[{"award-number":["Grant No.2023A03J0008"]}]}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2025,7,20]]},"DOI":"10.1145\/3690624.3709312","type":"proceedings-article","created":{"date-parts":[[2025,4,4]],"date-time":"2025-04-04T18:44:43Z","timestamp":1743792283000},"page":"1960-1971","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":2,"title":["LLM-Eraser: Optimizing Large Language Model Unlearning through Selective Pruning"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0009-0001-4549-3262","authenticated-orcid":false,"given":"Shengming","family":"Zhang","sequence":"first","affiliation":[{"name":"Chinese Academy of Medical Sciences &amp; Peking Union Medical College, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-0894-9651","authenticated-orcid":false,"given":"Le","family":"Zhang","sequence":"additional","affiliation":[{"name":"Baidu Research, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-2677-7021","authenticated-orcid":false,"given":"Jingbo","family":"Zhou","sequence":"additional","affiliation":[{"name":"Baidu Research, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-7758-8904","authenticated-orcid":false,"given":"Zhi","family":"Zheng","sequence":"additional","affiliation":[{"name":"University of Science and Technology of China, Hefei, Anhui, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-6016-6465","authenticated-orcid":false,"given":"Hui","family":"Xiong","sequence":"additional","affiliation":[{"name":"Hong Kong University of Science and Technology (Guangzhou), Guangzhou, China"}]}],"member":"320","published-online":{"date-parts":[[2025,7,20]]},"reference":[{"key":"e_1_3_2_1_1_1","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-44041-1_114"},{"key":"e_1_3_2_1_2_1","first-page":"38176","article-title":"Fine-tuning language models to find agreement among humans with diverse preferences","volume":"35","author":"Bakker Michiel","year":"2022","unstructured":"Michiel Bakker, Martin Chadwick, Hannah Sheahan, Michael Tessler, Lucy Campbell-Gillingham, Jan Balaguer, Nat McAleese, Amelia Glaese, John Aslanides, Matt Botvinick, et al. 2022. Fine-tuning language models to find agreement among humans with diverse preferences. Advances in Neural Information Processing Systems, Vol. 35 (2022), 38176--38189.","journal-title":"Advances in Neural Information Processing Systems"},{"key":"e_1_3_2_1_3_1","volume-title":"Mihai Christodorescu, Anupam Datta, Soheil Feizi, et al.","author":"Barrett Clark","year":"2023","unstructured":"Clark Barrett, Brad Boyd, Elie Bursztein, Nicholas Carlini, Brad Chen, Jihye Choi, Amrita Roy Chowdhury, Mihai Christodorescu, Anupam Datta, Soheil Feizi, et al. 2023. Identifying and mitigating the security risks of generative ai. Foundations and Trends\u00ae in Privacy and Security, Vol. 6, 1 (2023), 1--52."},{"key":"e_1_3_2_1_4_1","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v34i05.6239"},{"key":"e_1_3_2_1_5_1","doi-asserted-by":"publisher","DOI":"10.1109\/SP40001.2021.00019"},{"key":"e_1_3_2_1_6_1","volume-title":"Language Models are Few-Shot Learners. (2020). arxiv","author":"Brown Tom B.","year":"2005","unstructured":"Tom B. Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, Sandhini Agarwal, Ariel Herbert-Voss, Gretchen Krueger, Tom Henighan, Rewon Child, Aditya Ramesh, Daniel M. Ziegler, Jeffrey Wu, Clemens Winter, Christopher Hesse, Mark Chen, Eric Sigler, Mateusz Litwin, Scott Gray, Benjamin Chess, Jack Clark, Christopher Berner, Sam McCandlish, Alec Radford, Ilya Sutskever, and Dario Amodei. 2020. Language Models are Few-Shot Learners. (2020). arxiv: 2005.14165 [cs.CL]"},{"key":"e_1_3_2_1_7_1","volume-title":"Yuanzhi Li, Scott Lundberg, et al.","author":"Bubeck S\u00e9bastien","year":"2023","unstructured":"S\u00e9bastien Bubeck, Varun Chandrasekaran, Ronen Eldan, Johannes Gehrke, Eric Horvitz, Ece Kamar, Peter Lee, Yin Tat Lee, Yuanzhi Li, Scott Lundberg, et al. 2023. Sparks of artificial general intelligence: Early experiments with gpt-4. arXiv preprint arXiv:2303.12712 (2023)."},{"key":"e_1_3_2_1_8_1","doi-asserted-by":"publisher","DOI":"10.1109\/SP.2015.35"},{"key":"e_1_3_2_1_9_1","first-page":"22285","article-title":"One-shot neural backdoor erasing via adversarial weight masking","volume":"35","author":"Chai Shuwen","year":"2022","unstructured":"Shuwen Chai and Jinghui Chen. 2022. One-shot neural backdoor erasing via adversarial weight masking. Advances in Neural Information Processing Systems, Vol. 35 (2022), 22285--22299.","journal-title":"Advances in Neural Information Processing Systems"},{"key":"e_1_3_2_1_10_1","volume-title":"Charles Sutton, Sebastian Gehrmann, et al.","author":"Chowdhery Aakanksha","year":"2022","unstructured":"Aakanksha Chowdhery, Sharan Narang, Jacob Devlin, Maarten Bosma, Gaurav Mishra, Adam Roberts, Paul Barham, Hyung Won Chung, Charles Sutton, Sebastian Gehrmann, et al. 2022. Palm: Scaling language modeling with pathways. arXiv preprint arXiv:2204.02311 (2022)."},{"key":"e_1_3_2_1_11_1","unstructured":"Christopher Clark Kenton Lee Ming-Wei Chang Tom Kwiatkowski Michael Collins and Kristina Toutanova. 2019. BoolQ: Exploring the Surprising Difficulty of Natural Yes\/No Questions. In NAACL."},{"key":"e_1_3_2_1_12_1","doi-asserted-by":"publisher","DOI":"10.1162\/tacl_a_00644"},{"key":"e_1_3_2_1_13_1","volume-title":"Chatting and cheating: Ensuring academic integrity in the era of ChatGPT. Innovations in Education and Teaching International","author":"Cotton Debby RE","year":"2023","unstructured":"Debby RE Cotton, Peter A Cotton, and J Reuben Shipway. 2023. Chatting and cheating: Ensuring academic integrity in the era of ChatGPT. Innovations in Education and Teaching International (2023), 1--12."},{"key":"e_1_3_2_1_14_1","volume-title":"Editing factual knowledge in language models. arXiv preprint arXiv:2104.08164","author":"Cao Nicola De","year":"2021","unstructured":"Nicola De Cao, Wilker Aziz, and Ivan Titov. 2021. Editing factual knowledge in language models. arXiv preprint arXiv:2104.08164 (2021)."},{"key":"e_1_3_2_1_15_1","volume-title":"Salun: Empowering machine unlearning via gradient-based weight saliency in both image classification and generation. arXiv preprint arXiv:2310.12508","author":"Fan Chongyu","year":"2023","unstructured":"Chongyu Fan, Jiancheng Liu, Yihua Zhang, Dennis Wei, Eric Wong, and Sijia Liu. 2023. Salun: Empowering machine unlearning via gradient-based weight saliency in both image classification and generation. arXiv preprint arXiv:2310.12508 (2023)."},{"key":"e_1_3_2_1_16_1","doi-asserted-by":"publisher","DOI":"10.1145\/3580305.3599894"},{"key":"e_1_3_2_1_17_1","volume-title":"Practical unlearning for large language models. arXiv preprint arXiv:2407.10223","author":"Gao Chongyang","year":"2024","unstructured":"Chongyang Gao, Lixu Wang, Chenkai Weng, Xiao Wang, and Qi Zhu. 2024. Practical unlearning for large language models. arXiv preprint arXiv:2407.10223 (2024)."},{"key":"e_1_3_2_1_18_1","doi-asserted-by":"publisher","unstructured":"Leo Gao Jonathan Tow Stella Biderman Sid Black Anthony DiPofi Charles Foster Laurence Golding Jeffrey Hsu Kyle McDonell Niklas Muennighoff Jason Phang Laria Reynolds Eric Tang Anish Thite Ben Wang Kevin Wang and Andy Zou. 2021. A framework for few-shot language model evaluation. https:\/\/doi.org\/10.5281\/zenodo.5371628","DOI":"10.5281\/zenodo.5371628"},{"key":"e_1_3_2_1_19_1","volume-title":"Knowledge unlearning for mitigating privacy risks in language models. arXiv preprint arXiv:2210.01504","author":"Jang Joel","year":"2022","unstructured":"Joel Jang, Dongkeun Yoon, Sohee Yang, Sungmin Cha, Moontae Lee, Lajanugen Logeswaran, and Minjoon Seo. 2022. Knowledge unlearning for mitigating privacy risks in language models. arXiv preprint arXiv:2210.01504 (2022)."},{"key":"e_1_3_2_1_20_1","doi-asserted-by":"publisher","DOI":"10.1145\/3637528.3671742"},{"key":"e_1_3_2_1_21_1","volume-title":"Supervised contrastive learning. Advances in neural information processing systems","author":"Khosla Prannay","year":"2020","unstructured":"Prannay Khosla, Piotr Teterwak, Chen Wang, Aaron Sarna, Yonglong Tian, Phillip Isola, Aaron Maschinot, Ce Liu, and Dilip Krishnan. 2020. Supervised contrastive learning. Advances in neural information processing systems, Vol. 33 (2020), 18661--18673."},{"key":"e_1_3_2_1_22_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2024.acl-long.716"},{"key":"e_1_3_2_1_23_1","unstructured":"Sijia Liu Yuanshun Yao Jinghan Jia Stephen Casper Nathalie Baracaldo Peter Hase Xiaojun Xu Yuguang Yao Hang Li Kush R Varshney et al. 2024b. Rethinking Machine Unlearning for Large Language Models. arXiv preprint arXiv:2402.08787 (2024)."},{"key":"e_1_3_2_1_24_1","volume-title":"Towards Safer Large Language Models through Machine Unlearning. arXiv preprint arXiv:2402.10058","author":"Liu Zheyuan","year":"2024","unstructured":"Zheyuan Liu, Guangyao Dou, Zhaoxuan Tan, Yijun Tian, and Meng Jiang. 2024a. Towards Safer Large Language Models through Machine Unlearning. arXiv preprint arXiv:2402.10058 (2024)."},{"key":"e_1_3_2_1_25_1","volume-title":"Quark: Controllable text generation with reinforced unlearning. Advances in neural information processing systems","author":"Lu Ximing","year":"2022","unstructured":"Ximing Lu, Sean Welleck, Jack Hessel, Liwei Jiang, Lianhui Qin, Peter West, Prithviraj Ammanabrolu, and Yejin Choi. 2022. Quark: Controllable text generation with reinforced unlearning. Advances in neural information processing systems, Vol. 35 (2022), 27591--27609."},{"key":"e_1_3_2_1_26_1","volume-title":"LLM-Pruner: On the Structural Pruning of Large Language Models. arXiv preprint arXiv:2305.11627","author":"Ma Xinyin","year":"2023","unstructured":"Xinyin Ma, Gongfan Fang, and Xinchao Wang. 2023. LLM-Pruner: On the Structural Pruning of Large Language Models. arXiv preprint arXiv:2305.11627 (2023)."},{"key":"e_1_3_2_1_27_1","volume-title":"Memory-assisted prompt editing to improve gpt-3 after deployment. arXiv preprint arXiv:2201.06009","author":"Madaan Aman","year":"2022","unstructured":"Aman Madaan, Niket Tandon, Peter Clark, and Yiming Yang. 2022. Memory-assisted prompt editing to improve gpt-3 after deployment. arXiv preprint arXiv:2201.06009 (2022)."},{"key":"e_1_3_2_1_28_1","volume-title":"Tofu: A task of fictitious unlearning for llms. arXiv preprint arXiv:2401.06121","author":"Maini Pratyush","year":"2024","unstructured":"Pratyush Maini, Zhili Feng, Avi Schwarzschild, Zachary C Lipton, and J Zico Kolter. 2024. Tofu: A task of fictitious unlearning for llms. arXiv preprint arXiv:2401.06121 (2024)."},{"key":"e_1_3_2_1_29_1","volume-title":"The radicalization risks of GPT-3 and advanced neural language models. arXiv preprint arXiv:2009.06807","author":"McGuffie Kris","year":"2020","unstructured":"Kris McGuffie and Alex Newhouse. 2020. The radicalization risks of GPT-3 and advanced neural language models. arXiv preprint arXiv:2009.06807 (2020)."},{"key":"e_1_3_2_1_30_1","first-page":"17359","article-title":"Locating and editing factual associations in GPT","volume":"35","author":"Meng Kevin","year":"2022","unstructured":"Kevin Meng, David Bau, Alex Andonian, and Yonatan Belinkov. 2022a. Locating and editing factual associations in GPT. Advances in Neural Information Processing Systems, Vol. 35 (2022), 17359--17372.","journal-title":"Advances in Neural Information Processing Systems"},{"key":"e_1_3_2_1_31_1","volume-title":"Alex Andonian, Yonatan Belinkov, and David Bau.","author":"Meng Kevin","year":"2022","unstructured":"Kevin Meng, Arnab Sen Sharma, Alex Andonian, Yonatan Belinkov, and David Bau. 2022b. Mass-editing memory in a transformer. arXiv preprint arXiv:2210.07229 (2022)."},{"key":"e_1_3_2_1_32_1","doi-asserted-by":"crossref","unstructured":"Todor Mihaylov Peter Clark Tushar Khot and Ashish Sabharwal. 2018. Can a Suit of Armor Conduct Electricity? A New Dataset for Open Book Question Answering. In EMNLP.","DOI":"10.18653\/v1\/D18-1260"},{"key":"e_1_3_2_1_33_1","volume-title":"International Conference on Machine Learning. PMLR, 15817--15831","author":"Mitchell Eric","year":"2022","unstructured":"Eric Mitchell, Charles Lin, Antoine Bosselut, Christopher D Manning, and Chelsea Finn. 2022. Memory-based model editing at scale. In International Conference on Machine Learning. PMLR, 15817--15831."},{"key":"e_1_3_2_1_34_1","volume-title":"Pruning convolutional neural networks for resource efficient inference. arXiv preprint arXiv:1611.06440","author":"Molchanov Pavlo","year":"2016","unstructured":"Pavlo Molchanov, Stephen Tyree, Tero Karras, Timo Aila, and Jan Kautz. 2016. Pruning convolutional neural networks for resource efficient inference. arXiv preprint arXiv:1611.06440 (2016)."},{"key":"e_1_3_2_1_35_1","volume-title":"M Saiful Bari, Sheng Shen, Zheng-Xin Yong, Hailey Schoelkopf, et al.","author":"Muennighoff Niklas","year":"2022","unstructured":"Niklas Muennighoff, Thomas Wang, Lintang Sutawika, Adam Roberts, Stella Biderman, Teven Le Scao, M Saiful Bari, Sheng Shen, Zheng-Xin Yong, Hailey Schoelkopf, et al. 2022. Crosslingual generalization through multitask finetuning. arXiv preprint arXiv:2211.01786 (2022)."},{"key":"e_1_3_2_1_36_1","volume-title":"Phi Le Nguyen, Alan Wee-Chung Liew, Hongzhi Yin, and Quoc Viet Hung Nguyen.","author":"Nguyen Thanh Tam","year":"2022","unstructured":"Thanh Tam Nguyen, Thanh Trung Huynh, Phi Le Nguyen, Alan Wee-Chung Liew, Hongzhi Yin, and Quoc Viet Hung Nguyen. 2022. A survey of machine unlearning. arXiv preprint arXiv:2209.02299 (2022)."},{"key":"e_1_3_2_1_37_1","volume-title":"Meta-KD: A meta knowledge distillation framework for language model compression across domains. arXiv preprint arXiv:2012.01266","author":"Pan Haojie","year":"2020","unstructured":"Haojie Pan, Chengyu Wang, Minghui Qiu, Yichang Zhang, Yaliang Li, and Jun Huang. 2020. Meta-KD: A meta knowledge distillation framework for language model compression across domains. arXiv preprint arXiv:2012.01266 (2020)."},{"key":"e_1_3_2_1_38_1","volume-title":"Advances in Neural Information Processing Systems","volume":"36","author":"Rafailov Rafael","year":"2024","unstructured":"Rafael Rafailov, Archit Sharma, Eric Mitchell, Christopher D Manning, Stefano Ermon, and Chelsea Finn. 2024. Direct preference optimization: Your language model is secretly a reward model. Advances in Neural Information Processing Systems, Vol. 36 (2024)."},{"key":"e_1_3_2_1_39_1","volume-title":"a distilled version of BERT: smaller, faster, cheaper and lighter. arXiv preprint arXiv:1910.01108","author":"Sanh Victor","year":"2019","unstructured":"Victor Sanh, Lysandre Debut, Julien Chaumond, and Thomas Wolf. 2019. DistilBERT, a distilled version of BERT: smaller, faster, cheaper and lighter. arXiv preprint arXiv:1910.01108 (2019)."},{"key":"e_1_3_2_1_40_1","volume-title":"Contrastive distillation on intermediate representations for language model compression. arXiv preprint arXiv:2009.14167","author":"Sun Siqi","year":"2020","unstructured":"Siqi Sun, Zhe Gan, Yu Cheng, Yuwei Fang, Shuohang Wang, and Jingjing Liu. 2020. Contrastive distillation on intermediate representations for language model compression. arXiv preprint arXiv:2009.14167 (2020)."},{"key":"e_1_3_2_1_41_1","volume-title":"Hashimoto","author":"Taori Rohan","year":"2023","unstructured":"Rohan Taori, Ishaan Gulrajani, Tianyi Zhang, Yann Dubois, Xuechen Li, Carlos Guestrin, Percy Liang, and Tatsunori B. Hashimoto. 2023. Stanford Alpaca: An Instruction-following LLaMA model. https:\/\/github.com\/tatsu-lab\/stanford_alpaca."},{"key":"e_1_3_2_1_42_1","volume-title":"To forget or not? towards practical knowledge unlearning for large language models. arXiv preprint arXiv:2407.01920","author":"Tian Bozhong","year":"2024","unstructured":"Bozhong Tian, Xiaozhuan Liang, Siyuan Cheng, Qingbin Liu, Mengru Wang, Dianbo Sui, Xi Chen, Huajun Chen, and Ningyu Zhang. 2024. To forget or not? towards practical knowledge unlearning for large language models. arXiv preprint arXiv:2407.01920 (2024)."},{"key":"e_1_3_2_1_43_1","volume-title":"Llama: Open and efficient foundation language models. arXiv preprint arXiv:2302.13971","author":"Touvron Hugo","year":"2023","unstructured":"Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timoth\u00e9e Lacroix, Baptiste Rozi\u00e8re, Naman Goyal, Eric Hambro, Faisal Azhar, et al. 2023. Llama: Open and efficient foundation language models. arXiv preprint arXiv:2302.13971 (2023)."},{"key":"e_1_3_2_1_44_1","volume-title":"Kga: A general machine unlearning framework based on knowledge gap alignment. arXiv preprint arXiv:2305.06535","author":"Wang Lingzhi","year":"2023","unstructured":"Lingzhi Wang, Tong Chen, Wei Yuan, Xingshan Zeng, Kam-Fai Wong, and Hongzhi Yin. 2023. Kga: A general machine unlearning framework based on knowledge gap alignment. arXiv preprint arXiv:2305.06535 (2023)."},{"key":"e_1_3_2_1_45_1","unstructured":"Jason Wei Yi Tay Rishi Bommasani Colin Raffel Barret Zoph Sebastian Borgeaud Dani Yogatama Maarten Bosma Denny Zhou Donald Metzler et al. 2022a. Emergent abilities of large language models. arXiv preprint arXiv:2206.07682 (2022)."},{"key":"e_1_3_2_1_46_1","first-page":"24824","article-title":"Chain-of-thought prompting elicits reasoning in large language models","volume":"35","author":"Wei Jason","year":"2022","unstructured":"Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Fei Xia, Ed Chi, Quoc V Le, Denny Zhou, et al. 2022b. Chain-of-thought prompting elicits reasoning in large language models. Advances in Neural Information Processing Systems, Vol. 35 (2022), 24824--24837.","journal-title":"Advances in Neural Information Processing Systems"},{"key":"e_1_3_2_1_47_1","first-page":"16913","article-title":"Adversarial neuron pruning purifies backdoored deep models","volume":"34","author":"Wu Dongxian","year":"2021","unstructured":"Dongxian Wu and Yisen Wang. 2021. Adversarial neuron pruning purifies backdoored deep models. Advances in Neural Information Processing Systems, Vol. 34 (2021), 16913--16925.","journal-title":"Advances in Neural Information Processing Systems"},{"key":"e_1_3_2_1_48_1","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v38i8.28769"},{"key":"e_1_3_2_1_49_1","doi-asserted-by":"publisher","DOI":"10.1007\/s11280-024-01291-2"},{"key":"e_1_3_2_1_50_1","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v36i10.21408"},{"key":"e_1_3_2_1_51_1","volume-title":"Editing large language models: Problems, methods, and opportunities. arXiv preprint arXiv:2305.13172","author":"Yao Yunzhi","year":"2023","unstructured":"Yunzhi Yao, Peng Wang, Bozhong Tian, Siyuan Cheng, Zhoubo Li, Shumin Deng, Huajun Chen, and Ningyu Zhang. 2023a. Editing large language models: Problems, methods, and opportunities. arXiv preprint arXiv:2305.13172 (2023)."},{"key":"e_1_3_2_1_52_1","volume-title":"Large language model unlearning. arXiv preprint arXiv:2310.10683","author":"Yao Yuanshun","year":"2023","unstructured":"Yuanshun Yao, Xiaojun Xu, and Yang Liu. 2023b. Large language model unlearning. arXiv preprint arXiv:2310.10683 (2023)."},{"key":"e_1_3_2_1_53_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.findings-acl.375"},{"key":"e_1_3_2_1_54_1","doi-asserted-by":"publisher","DOI":"10.1145\/3604237.3626839"},{"key":"e_1_3_2_1_55_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/P19-1472"},{"key":"e_1_3_2_1_56_1","unstructured":"Andy Zeng Maria Attarian Brian Ichter Krzysztof Choromanski Adrian Wong Stefan Welker Federico Tombari Aveek Purohit Michael Ryoo Vikas Sindhwani et al. 2022. Socratic models: Composing zero-shot multimodal reasoning with language. arXiv preprint arXiv:2204.00598 (2022)."},{"key":"e_1_3_2_1_57_1","volume-title":"E-BERT: A phrase and product knowledge enhanced language model for e-commerce. arXiv preprint arXiv:2009.02835","author":"Zhang Denghui","year":"2020","unstructured":"Denghui Zhang, Zixuan Yuan, Yanchi Liu, Fuzhen Zhuang, Haifeng Chen, and Hui Xiong. 2020. E-BERT: A phrase and product knowledge enhanced language model for e-commerce. arXiv preprint arXiv:2009.02835 (2020)."},{"key":"e_1_3_2_1_58_1","doi-asserted-by":"publisher","DOI":"10.1145\/3534678.3539155"},{"key":"e_1_3_2_1_59_1","volume-title":"Good One: Seeking High-potential Startups using Heterogeneous Venture Information Networks.","author":"Zhang Shengming","year":"2023","unstructured":"Shengming Zhang, Hao Zhong, Yong Ge, Hui Xiong, et al. 2023. Bring Me a Good One: Seeking High-potential Startups using Heterogeneous Venture Information Networks. (2023)."},{"key":"e_1_3_2_1_60_1","doi-asserted-by":"publisher","DOI":"10.1145\/3589334.3645358"}],"event":{"name":"KDD '25: The 31st ACM SIGKDD Conference on Knowledge Discovery and Data Mining","location":"Toronto ON Canada","acronym":"KDD '25","sponsor":["SIGMOD ACM Special Interest Group on Management of Data","SIGKDD ACM Special Interest Group on Knowledge Discovery in Data"]},"container-title":["Proceedings of the 31st ACM SIGKDD Conference on Knowledge Discovery and Data Mining V.1"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3690624.3709312","content-type":"unspecified","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3690624.3709312","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,8,16]],"date-time":"2025-08-16T15:44:38Z","timestamp":1755359078000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3690624.3709312"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,7,20]]},"references-count":60,"alternative-id":["10.1145\/3690624.3709312","10.1145\/3690624"],"URL":"https:\/\/doi.org\/10.1145\/3690624.3709312","relation":{},"subject":[],"published":{"date-parts":[[2025,7,20]]},"assertion":[{"value":"2025-07-20","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}