{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,26]],"date-time":"2026-02-26T23:31:21Z","timestamp":1772148681473,"version":"3.50.1"},"publisher-location":"New York, NY, USA","reference-count":42,"publisher":"ACM","funder":[{"name":"Ministry of Trade, Industry and Energy &#x28;MOTIE, Korea&#x29;","award":["RS-2025-02314885"],"award-info":[{"award-number":["RS-2025-02314885"]}]},{"name":"Institute of Information &amp; Communications Technology Planning &amp; Evaluation &#x28;IITP&#x29;","award":["IITP-2026-RS-2020-II201819"],"award-info":[{"award-number":["IITP-2026-RS-2020-II201819"]}]},{"name":"Institute of Information &amp; Communications Technology Planning &amp; Evaluation &#x28;IITP&#x29;","award":["IITP-2026-RS-2024-00436857"],"award-info":[{"award-number":["IITP-2026-RS-2024-00436857"]}]},{"name":"National Research Foundation of Korea &#x28;NRF&#x29;","award":["RS-2025-25435830"],"award-info":[{"award-number":["RS-2025-25435830"]}]}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2026,2,22]]},"DOI":"10.1145\/3773966.3779395","type":"proceedings-article","created":{"date-parts":[[2026,2,16]],"date-time":"2026-02-16T17:50:01Z","timestamp":1771264201000},"page":"1155-1159","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":1,"title":["Metadata Meets LLMs: Constructing Knowledge-Rich Citation Networks with CoT-Enhanced Representations"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0000-0001-8071-7340","authenticated-orcid":false,"given":"Soohwan","family":"Jeong","sequence":"first","affiliation":[{"name":"Institute for Advanced Engineering, Yongin, Republic of Korea"}]},{"ORCID":"https:\/\/orcid.org\/0009-0006-2909-150X","authenticated-orcid":false,"given":"Mingyu","family":"Choi","sequence":"additional","affiliation":[{"name":"Chungnam National University, Daejeon, Republic of Korea"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-3960-2311","authenticated-orcid":false,"given":"Joon-Young","family":"Kim","sequence":"additional","affiliation":[{"name":"Institute for Advanced Engineering, Yongin, Republic of Korea"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-5596-4972","authenticated-orcid":false,"given":"Susik","family":"Yoon","sequence":"additional","affiliation":[{"name":"Korea University, Seoul, Republic of Korea"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-5924-3398","authenticated-orcid":false,"given":"Sungsu","family":"Lim","sequence":"additional","affiliation":[{"name":"Chungnam National University, Daejeon, Republic of Korea"}]}],"member":"320","published-online":{"date-parts":[[2026,2,21]]},"reference":[{"key":"e_1_3_2_1_1_1","volume-title":"EMNLP Workshop (GEM). 181\u2013191","author":"Abdullin Yelaman","year":"2023","unstructured":"Yelaman Abdullin, Diego Molla-Aliod, Bahadorreza Ofoghi, John Yearwood, and Qingyang Li. 2023. Synthetic Dialogue Dataset Generation using LLM Agents. In EMNLP Workshop (GEM). 181\u2013191."},{"key":"e_1_3_2_1_2_1","first-page":"23716","article-title":"Flamingo: a Visual Language Model for Few-Shot Learning","author":"Alayrac Jean-Baptiste","year":"2022","unstructured":"Jean-Baptiste Alayrac, Jeff Donahue, Pauline Luc, Antoine Miech, Iain Barr, Yana Hasson, Karel Lenc, Arthur Mensch, Katherine Millican, Malcolm Reynolds, et al., 2022. Flamingo: a Visual Language Model for Few-Shot Learning. In NeurIPS. 23716-23736.","journal-title":"NeurIPS."},{"key":"e_1_3_2_1_3_1","first-page":"889","article-title":"Incorporating Structured Sentences with Time-enhanced BERT for Fully-inductive Temporal Relation Prediction","author":"Chen Zhongwu","year":"2023","unstructured":"Zhongwu Chen, Chengjin Xu, Fenglong Su, Zhen Huang, and Yong Dou. 2023. Incorporating Structured Sentences with Time-enhanced BERT for Fully-inductive Temporal Relation Prediction. In SIGIR. 889-899.","journal-title":"SIGIR."},{"key":"e_1_3_2_1_4_1","doi-asserted-by":"publisher","DOI":"10.1088\/1742-5468\/2005\/09\/P09008"},{"key":"e_1_3_2_1_5_1","first-page":"4171","article-title":"BERT","author":"Devlin Jacob","year":"2019","unstructured":"Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding. In NAACL-HLT. 4171-4186.","journal-title":"Pre-training of Deep Bidirectional Transformers for Language Understanding. In NAACL-HLT."},{"key":"e_1_3_2_1_6_1","first-page":"2331","article-title":"MAGNN","author":"Fu Xinyu","year":"2020","unstructured":"Xinyu Fu, Jiani Zhang, Ziqiao Meng, and Irwin King. 2020. MAGNN: Metapath Aggregated Graph Neural Network for Heterogeneous Graph Embedding. In WWW. 2331-2341.","journal-title":"In WWW."},{"key":"e_1_3_2_1_7_1","volume-title":"Chat-REC: Towards Interactive and Explainable LLMs-Augmented Recommender System. arXiv preprint arXiv:2303.14524","author":"Gao Yunfan","year":"2023","unstructured":"Yunfan Gao, Tao Sheng, Youlin Xiang, Yun Xiong, Haofen Wang, and Jiawei Zhang. 2023. Chat-REC: Towards Interactive and Explainable LLMs-Augmented Recommender System. arXiv preprint arXiv:2303.14524 (2023)."},{"key":"e_1_3_2_1_8_1","volume-title":"COLING Workshop (TextGraphs). 44-53","author":"Gokhan Tuba","year":"2022","unstructured":"Tuba Gokhan, Phillip Smith, and Mark Lee. 2022. GUSUM: Graph-Based Unsupervised Summarization using Sentence Features Scoring and Sentence-BERT. In COLING Workshop (TextGraphs). 44-53."},{"key":"e_1_3_2_1_9_1","unstructured":"Aaron Grattafiori Abhimanyu Dubey Abhinav Jauhri Abhinav Pandey Abhishek Kadian Ahmad Al-Dahle Aiesha Letman Akhil Mathur Alan Schelten Alex Vaughan et al. 2024. The Llama 3 Herd of Models. arXiv preprint arXiv:2407.21783 (2024)."},{"key":"e_1_3_2_1_10_1","unstructured":"Pengcheng He Xiaodong Liu Jianfeng Gao and Weizhu Chen. 2020. DeBERTa: Decoding-enhanced BERT with Disentangled Attention. In ICLR."},{"key":"e_1_3_2_1_11_1","doi-asserted-by":"publisher","DOI":"10.1145\/3701551.3703546"},{"key":"e_1_3_2_1_12_1","doi-asserted-by":"publisher","DOI":"10.1007\/BF01908075"},{"key":"e_1_3_2_1_13_1","volume-title":"A Comprehensive Survey on Process-Oriented Automatic Text Summarization with Exploration of LLM-Based Methods. arXiv preprint arXiv:2403.02901","author":"Jin Hanlei","year":"2024","unstructured":"Hanlei Jin, Yang Zhang, Dan Meng, Jun Wang, and Jinghua Tan. 2024. A Comprehensive Survey on Process-Oriented Automatic Text Summarization with Exploration of LLM-Based Methods. arXiv preprint arXiv:2403.02901 (2024)."},{"key":"e_1_3_2_1_14_1","first-page":"310","article-title":"Building Knowledge Graph using Pre-trained Language Model for Learning Entity-aware Relationships","author":"Kumar Abhijeet","year":"2020","unstructured":"Abhijeet Kumar, Abhishek Pandey, Rohit Gadia, and Mridul Mishra. 2020. Building Knowledge Graph using Pre-trained Language Model for Learning Entity-aware Relationships. In GUCON. 310-315.","journal-title":"GUCON."},{"key":"e_1_3_2_1_15_1","doi-asserted-by":"crossref","unstructured":"Irene Li Aosong Feng Hao Wu Tianxiao Li Toyotaro Suzumura and Ruihai Dong. 2022. LiGCN: Label-interpretable Graph Convolutional Networks for Multi-label Text Classification. In DLG4NLP. 60-70.","DOI":"10.18653\/v1\/2022.dlg4nlp-1.7"},{"key":"e_1_3_2_1_16_1","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v37i11.26537"},{"key":"e_1_3_2_1_17_1","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v35i5.16544"},{"key":"e_1_3_2_1_18_1","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v38i17.29823"},{"key":"e_1_3_2_1_19_1","first-page":"1456","article-title":"BertGCN: Transductive Text Classification by Combining GNN and BERT","volume":"2021","author":"Lin Yuxiao","year":"2021","unstructured":"Yuxiao Lin, Yuxian Meng, Xiaofei Sun, Qinghong Han, Kun Kuang, Jiwei Li, and Fei Wu. 2021. BertGCN: Transductive Text Classification by Combining GNN and BERT. In Findings of ACL-IJCNLP 2021. 1456-1462.","journal-title":"Findings of ACL-IJCNLP"},{"key":"e_1_3_2_1_20_1","volume-title":"RoBERTa: A Robustly Optimized BERT Pretraining Approach. arXiv preprint arXiv:1907.11692","author":"Liu Yinhan","year":"2019","unstructured":"Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, and Veselin Stoyanov. 2019. RoBERTa: A Robustly Optimized BERT Pretraining Approach. arXiv preprint arXiv:1907.11692 (2019)."},{"key":"e_1_3_2_1_21_1","doi-asserted-by":"publisher","DOI":"10.21105\/joss.00205"},{"key":"e_1_3_2_1_22_1","first-page":"1","article-title":"A BERT-based Heterogeneous Graph Convolution Approach for Mining Organization-Related Topics","author":"Qian Haoda","year":"2022","unstructured":"Haoda Qian, Minjie Yuan, Qiudan Li, and Daniel Zeng. 2022. A BERT-based Heterogeneous Graph Convolution Approach for Mining Organization-Related Topics. In IJCNN. 1-8.","journal-title":"IJCNN."},{"key":"e_1_3_2_1_23_1","first-page":"1174","author":"Qiao Wei","year":"2024","unstructured":"Wei Qiao, Tushar Dogra, Otilia Stretcu, Yu-Han Lyu, Tiantian Fang, Dongjin Kwon, Chun-Ta Lu, Enming Luo, Yuan Wang, Chih-Chun Chia, et al., 2024. Scaling Up LLM Reviews for Google Ads Content Moderation. In WSDM. 1174-1175.","journal-title":"In WSDM."},{"key":"e_1_3_2_1_24_1","first-page":"4320","article-title":"U-BERT","author":"Qiu Zhaopeng","year":"2021","unstructured":"Zhaopeng Qiu, Xian Wu, Jingyue Gao, and Wei Fan. 2021. U-BERT: Pre-training User Representations for Improved Recommendation. In AAAI. 4320-4327.","journal-title":"Pre-training User Representations for Improved Recommendation. In AAAI."},{"key":"e_1_3_2_1_25_1","first-page":"3982","article-title":"Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks","author":"Reimers Nils","year":"2019","unstructured":"Nils Reimers and Iryna Gurevych. 2019. Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks. In EMNLP-IJCNLP. 3982-3992.","journal-title":"EMNLP-IJCNLP."},{"key":"e_1_3_2_1_26_1","first-page":"2842","article-title":"HiGPT","author":"Tang Jiabin","year":"2024","unstructured":"Jiabin Tang, Yuhao Yang, Wei Wei, Lei Shi, Long Xia, Dawei Yin, and Chao Huang. 2024b. HiGPT: Heterogeneous Graph Language Model. In KDD. 2842-2853.","journal-title":"Heterogeneous Graph Language Model. In KDD."},{"key":"e_1_3_2_1_27_1","first-page":"990","article-title":"ArnetMiner","author":"Tang Jie","year":"2008","unstructured":"Jie Tang, Jing Zhang, Limin Yao, Juanzi Li, Li Zhang, and Zhong Su. 2008. ArnetMiner: Extraction and Mining of Academic Social Networks. In KDD. 990-998.","journal-title":"Extraction and Mining of Academic Social Networks. In KDD."},{"key":"e_1_3_2_1_28_1","doi-asserted-by":"publisher","DOI":"10.1145\/3624725"},{"key":"e_1_3_2_1_29_1","doi-asserted-by":"publisher","DOI":"10.1038\/s41591-023-02448-8"},{"key":"e_1_3_2_1_30_1","volume-title":"Vinija Jain, Anku Rani, Vipula Rawte, Aman Chadha, and Amitava Das.","author":"Towhidul Islam Tonmoy S.M","year":"2024","unstructured":"S.M Towhidul Islam Tonmoy, S M Mehedi Zaman, Vinija Jain, Anku Rani, Vipula Rawte, Aman Chadha, and Amitava Das. 2024. A Comprehensive Survey of Hallucination Mitigation Techniques in Large Language Models. arXiv preprint arXiv:2401.01313 (2024)."},{"key":"e_1_3_2_1_31_1","doi-asserted-by":"publisher","DOI":"10.15446\/dyna.v90n230.111700"},{"key":"e_1_3_2_1_32_1","doi-asserted-by":"publisher","DOI":"10.1016\/j.knosys.2024.112887"},{"key":"e_1_3_2_1_33_1","first-page":"872","author":"Wang Jianling","year":"2024","unstructured":"Jianling Wang, Haokai Lu, Yifan Liu, He Ma, Yueqi Wang, Yang Gu, Shuzhou Zhang, Ningren Han, Shuchao Bi, Lexi Baugher, et al., 2024. LLMs for User Interest Exploration in Large-scale Recommendation Systems. In RecSys. 872-877.","journal-title":"In RecSys."},{"key":"e_1_3_2_1_34_1","first-page":"2022","article-title":"Heterogeneous Graph Attention Network","author":"Wang Xiao","year":"2019","unstructured":"Xiao Wang, Houye Ji, Chuan Shi, Bai Wang, Yanfang Ye, Peng Cui, and Philip S. Yu. 2019. Heterogeneous Graph Attention Network. In WWW. 2022-2032.","journal-title":"WWW."},{"key":"e_1_3_2_1_35_1","first-page":"24824","author":"Wei Jason","year":"2022","unstructured":"Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Fei Xia, Ed Chi, Quoc V Le, Denny Zhou, et al., 2022. Chain-of-Thought Prompting Elicits Reasoning in Large Language Models. In NeurIPS. 24824-24837.","journal-title":"Chain-of-Thought Prompting Elicits Reasoning in Large Language Models. In NeurIPS."},{"key":"e_1_3_2_1_36_1","volume-title":"BloombergGPT: A Large Language Model for Finance. arXiv preprint arXiv:2303.17564","author":"Wu Shijie","year":"2023","unstructured":"Shijie Wu, Ozan Irsoy, Steven Lu, Vadim Dabravolski, Mark Dredze, Sebastian Gehrmann, Prabhanjan Kambadur, David Rosenberg, and Gideon Mann. 2023. BloombergGPT: A Large Language Model for Finance. arXiv preprint arXiv:2303.17564 (2023)."},{"key":"e_1_3_2_1_37_1","first-page":"1509","article-title":"Unify Graph Learning with Text","author":"Wu Songhao","year":"2024","unstructured":"Songhao Wu, Quan Tu, Hong Liu, Jia Xu, Zhongyi Liu, Guannan Zhang, Ran Wang, Xiuying Chen, and Rui Yan. 2024. Unify Graph Learning with Text: Unleashing LLM Potentials for Session Search. In WWW. 1509-1518.","journal-title":"Unleashing LLM Potentials for Session Search. In WWW."},{"key":"e_1_3_2_1_38_1","first-page":"466","article-title":"Label-Specific Document Representation for Multi-Label Text Classification","author":"Xiao Lin","year":"2019","unstructured":"Lin Xiao, Xin Huang, Boli Chen, and Liping Jing. 2019. Label-Specific Document Representation for Multi-Label Text Classification. In EMNLP-IJCNLP. 466-475.","journal-title":"EMNLP-IJCNLP."},{"key":"e_1_3_2_1_39_1","first-page":"11960","article-title":"Graph Transformer Networks","author":"Yun Seongjun","year":"2019","unstructured":"Seongjun Yun, Minbyul Jeong, Raehyun Kim, Jaewoo Kang, and Hyunwoo J. Kim. 2019. Graph Transformer Networks. In NeurIPS. 11960-11970.","journal-title":"NeurIPS."},{"key":"e_1_3_2_1_40_1","first-page":"41092","article-title":"Prompting Large Language Model for Machine Translation: A Case Study","author":"Zhang Biao","year":"2023","unstructured":"Biao Zhang, Barry Haddow, and Alexandra Birch. 2023. Prompting Large Language Model for Machine Translation: A Case Study. In ICML. 41092-41110.","journal-title":"ICML."},{"key":"e_1_3_2_1_41_1","first-page":"259","article-title":"Pretrain-KGE: Learning Knowledge Representation from Pretrained Language Models","author":"Zhang Zhiyuan","year":"2020","unstructured":"Zhiyuan Zhang, Xiaoqian Liu, Yi Zhang, Qi Su, Xu Sun, and Bin He. 2020. Pretrain-KGE: Learning Knowledge Representation from Pretrained Language Models. In Findings of EMNLP. 259-266.","journal-title":"Findings of EMNLP."},{"key":"e_1_3_2_1_42_1","first-page":"50117","article-title":"ToolQA","author":"Zhuang Yuchen","year":"2023","unstructured":"Yuchen Zhuang, Yue Yu, Kuan Wang, Haotian Sun, and Chao Zhang. 2023. ToolQA: A Dataset for LLM Question Answering with External Tools. In NeurIPS. 50117-50143.","journal-title":"In NeurIPS."}],"event":{"name":"WSDM '26:The Nineteenth ACM International Conference on Web Search and Data Mining","location":"Boise ID USA","sponsor":["SIGKDD ACM Special Interest Group on Knowledge Discovery in Data","SIGWEB ACM Special Interest Group on Hypertext, Hypermedia, and Web","SIGIR ACM Special Interest Group on Information Retrieval","SIGMOD ACM Special Interest Group on Management of Data"]},"container-title":["Proceedings of the Nineteenth ACM International Conference on Web Search and Data Mining"],"original-title":[],"deposited":{"date-parts":[[2026,2,16]],"date-time":"2026-02-16T17:54:44Z","timestamp":1771264484000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3773966.3779395"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2026,2,21]]},"references-count":42,"alternative-id":["10.1145\/3773966.3779395","10.1145\/3773966"],"URL":"https:\/\/doi.org\/10.1145\/3773966.3779395","relation":{},"subject":[],"published":{"date-parts":[[2026,2,21]]},"assertion":[{"value":"2026-02-21","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}