{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,24]],"date-time":"2026-03-24T11:48:00Z","timestamp":1774352880837,"version":"3.50.1"},"publisher-location":"New York, NY, USA","reference-count":41,"publisher":"ACM","license":[{"start":{"date-parts":[[2024,5,13]],"date-time":"2024-05-13T00:00:00Z","timestamp":1715558400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by\/4.0\/"}],"funder":[{"DOI":"10.13039\/501100006374","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62272274,T2293773,62372275,62202271,62102234,62072279"],"award-info":[{"award-number":["62272274,T2293773,62372275,62202271,62102234,62072279"]}],"id":[{"id":"10.13039\/501100006374","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100006374","name":"Natural Science Foundation of Shandong Province","doi-asserted-by":"publisher","award":["ZR2023QF159"],"award-info":[{"award-number":["ZR2023QF159"]}],"id":[{"id":"10.13039\/501100006374","id-type":"DOI","asserted-by":"publisher"}]},{"name":"National Key R&D Program of China","award":["No.2022YFC3303004"],"award-info":[{"award-number":["No.2022YFC3303004"]}]}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2024,5,13]]},"DOI":"10.1145\/3589334.3645448","type":"proceedings-article","created":{"date-parts":[[2024,5,8]],"date-time":"2024-05-08T07:08:13Z","timestamp":1715152093000},"page":"3444-3453","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":21,"title":["Generative News Recommendation"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0000-0003-1301-3700","authenticated-orcid":false,"given":"Shen","family":"Gao","sequence":"first","affiliation":[{"name":"University of Electronic Science and Technology of China, Chengdu, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0009-9248-5642","authenticated-orcid":false,"given":"Jiabao","family":"Fang","sequence":"additional","affiliation":[{"name":"Shandong University, Qingdao, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0005-8806-247X","authenticated-orcid":false,"given":"Quan","family":"Tu","sequence":"additional","affiliation":[{"name":"Renmin University of China, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0003-3323-1788","authenticated-orcid":false,"given":"Zhitao","family":"Yao","sequence":"additional","affiliation":[{"name":"Shandong University, Qingdao, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-4592-4074","authenticated-orcid":false,"given":"Zhumin","family":"Chen","sequence":"additional","affiliation":[{"name":"Shandong University, Qingdao, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-2964-6422","authenticated-orcid":false,"given":"Pengjie","family":"Ren","sequence":"additional","affiliation":[{"name":"Shandong University, Qingdao, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-9076-6565","authenticated-orcid":false,"given":"Zhaochun","family":"Ren","sequence":"additional","affiliation":[{"name":"Leiden University, Leiden, Netherlands"}]}],"member":"320","published-online":{"date-parts":[[2024,5,13]]},"reference":[{"key":"e_1_3_2_2_1_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/p19--1033"},{"key":"e_1_3_2_2_2_1","volume-title":"2023 a. A bi-step grounding paradigm for large language models in recommendation systems. arXiv preprint arXiv:2308.08434","author":"Bao Keqin","year":"2023","unstructured":"Keqin Bao, Jizhi Zhang, Wenjie Wang, Yang Zhang, Zhengyi Yang, Yancheng Luo, Fuli Feng, Xiangnaan He, and Qi Tian. 2023 a. A bi-step grounding paradigm for large language models in recommendation systems. arXiv preprint arXiv:2308.08434 (2023)."},{"key":"e_1_3_2_2_3_1","volume-title":"2023 b. Tallrec: An effective and efficient tuning framework to align large language model with recommendation. arXiv preprint arXiv:2305.00447","author":"Bao Keqin","year":"2023","unstructured":"Keqin Bao, Jizhi Zhang, Yang Zhang, Wenjie Wang, Fuli Feng, and Xiangnan He. 2023 b. Tallrec: An effective and efficient tuning framework to align large language model with recommendation. arXiv preprint arXiv:2305.00447 (2023)."},{"key":"e_1_3_2_2_4_1","volume-title":"Uncovering ChatGPT's Capabilities in Recommender Systems. arXiv preprint arXiv:2305.02182","author":"Dai Sunhao","year":"2023","unstructured":"Sunhao Dai, Ninglu Shao, Haiyuan Zhao, Weijie Yu, Zihua Si, Chen Xu, Zhongxiang Sun, Xiao Zhang, and Jun Xu. 2023. Uncovering ChatGPT's Capabilities in Recommender Systems. arXiv preprint arXiv:2305.02182 (2023)."},{"key":"e_1_3_2_2_5_1","doi-asserted-by":"publisher","DOI":"10.1145\/1242572.1242610"},{"key":"e_1_3_2_2_6_1","volume-title":"Chat-rec: Towards interactive and explainable llms-augmented recommender system. arXiv preprint arXiv:2303.14524","author":"Gao Yunfan","year":"2023","unstructured":"Yunfan Gao, Tao Sheng, Youlin Xiang, Yun Xiong, Haofen Wang, and Jiawei Zhang. 2023. Chat-rec: Towards interactive and explainable llms-augmented recommender system. arXiv preprint arXiv:2303.14524 (2023)."},{"key":"e_1_3_2_2_7_1","volume-title":"International Conference on Neural Information Processing. Springer, 341--356","author":"Guo Chunxi","year":"2023","unstructured":"Chunxi Guo, Zhiliang Tian, Jintao Tang, Shasha Li, Zhihua Wen, Kaixuan Wang, and Ting Wang. 2023. Retrieval-augmented gpt-3.5-based text-to-sql framework with sample-aware prompting and dynamic revision chain. In International Conference on Neural Information Processing. Springer, 341--356."},{"key":"e_1_3_2_2_8_1","doi-asserted-by":"publisher","DOI":"10.1145\/3604915.3610639"},{"key":"e_1_3_2_2_9_1","volume-title":"Large language models are zero-shot rankers for recommender systems. arXiv preprint arXiv:2305.08845","author":"Hou Yupeng","year":"2023","unstructured":"Yupeng Hou, Junjie Zhang, Zihan Lin, Hongyu Lu, Ruobing Xie, Julian McAuley, and Wayne Xin Zhao. 2023. Large language models are zero-shot rankers for recommender systems. arXiv preprint arXiv:2305.08845 (2023)."},{"key":"e_1_3_2_2_10_1","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2019.2944927"},{"key":"e_1_3_2_2_11_1","volume-title":"2023 a. Exploring Fine-tuning ChatGPT for News Recommendation. arXiv preprint arXiv:2311.05850","author":"Li Xinyi","year":"2023","unstructured":"Xinyi Li, Yongfeng Zhang, and Edward C Malthouse. 2023 a. Exploring Fine-tuning ChatGPT for News Recommendation. arXiv preprint arXiv:2311.05850 (2023)."},{"key":"e_1_3_2_2_12_1","volume-title":"Provider Fairness, Fake News. arXiv preprint arXiv:2306.10702","author":"Li Xinyi","year":"2023","unstructured":"Xinyi Li, Yongfeng Zhang, and Edward C Malthouse. 2023 b. A Preliminary Study of ChatGPT on News Recommendation: Personalization, Provider Fairness, Fake News. arXiv preprint arXiv:2306.10702 (2023)."},{"key":"e_1_3_2_2_13_1","volume-title":"LLaRA: Aligning Large Language Models with Sequential Recommenders. arXiv preprint arXiv:2312.02445","author":"Liao Jiayi","year":"2023","unstructured":"Jiayi Liao, Sihang Li, Zhengyi Yang, Jiancan Wu, Yancheng Yuan, Xiang Wang, and Xiangnan He. 2023. LLaRA: Aligning Large Language Models with Sequential Recommenders. arXiv preprint arXiv:2312.02445 (2023)."},{"key":"e_1_3_2_2_14_1","volume-title":"2023 a. ONCE: Boosting Content-based Recommendation with Both Open- and Closed-source Large Language Models. arXiv:2305.06566 (Aug","author":"Liu Qijiong","year":"2023","unstructured":"Qijiong Liu, Nuo Chen, Tetsuya Sakai, and Xiao-Ming Wu. 2023 a. ONCE: Boosting Content-based Recommendation with Both Open- and Closed-source Large Language Models. arXiv:2305.06566 (Aug. 2023). http:\/\/arxiv.org\/abs\/2305.06566 arXiv:2305.06566 [cs]."},{"key":"e_1_3_2_2_15_1","volume-title":"2023 b. ONCE: Boosting Content-based Recommendation with Both Open-and Closed-source Large Language Models. arXiv preprint arXiv:2305.06566","author":"Liu Qijiong","year":"2023","unstructured":"Qijiong Liu, Nuo Chen, Tetsuya Sakai, and Xiao-Ming Wu. 2023 b. ONCE: Boosting Content-based Recommendation with Both Open-and Closed-source Large Language Models. arXiv preprint arXiv:2305.06566 (2023)."},{"key":"e_1_3_2_2_16_1","unstructured":"Zheheng Luo Qianqian Xie and Sophia Ananiadou. 2023. Chatgpt as a factual inconsistency evaluator for text summarization."},{"key":"e_1_3_2_2_17_1","volume-title":"LLM-Rec: Personalized Recommendation via Prompting Large Language Models. arXiv preprint arXiv:2307.15780","author":"Lyu Hanjia","year":"2023","unstructured":"Hanjia Lyu, Song Jiang, Hanqing Zeng, Yinglong Xia, and Jiebo Luo. 2023. LLM-Rec: Personalized Recommendation via Prompting Large Language Models. arXiv preprint arXiv:2307.15780 (2023)."},{"key":"e_1_3_2_2_18_1","volume-title":"Large Language Model Augmented Narrative Driven Recommendations. arXiv preprint arXiv:2306.02250","author":"Mysore Sheshera","year":"2023","unstructured":"Sheshera Mysore, Andrew McCallum, and Hamed Zamani. 2023. Large Language Model Augmented Narrative Driven Recommendations. arXiv preprint arXiv:2306.02250 (2023)."},{"key":"e_1_3_2_2_19_1","first-page":"27730","article-title":"Training language models to follow instructions with human feedback","volume":"35","author":"Ouyang Long","year":"2022","unstructured":"Long Ouyang, Jeffrey Wu, Xu Jiang, Diogo Almeida, Carroll Wainwright, Pamela Mishkin, Chong Zhang, Sandhini Agarwal, Katarina Slama, Alex Ray, et al. 2022. Training language models to follow instructions with human feedback. Advances in Neural Information Processing Systems , Vol. 35 (2022), 27730--27744.","journal-title":"Advances in Neural Information Processing Systems"},{"key":"e_1_3_2_2_20_1","volume-title":"Sentence-bert: Sentence embeddings using siamese bert-networks. arXiv preprint arXiv:1908.10084","author":"Reimers Nils","year":"2019","unstructured":"Nils Reimers and Iryna Gurevych. 2019. Sentence-bert: Sentence embeddings using siamese bert-networks. arXiv preprint arXiv:1908.10084 (2019)."},{"key":"e_1_3_2_2_21_1","volume-title":"Lkpnr: Llm and kg for personalized news recommendation framework. arXiv preprint arXiv:2308.12028","author":"Runfeng Xie","year":"2023","unstructured":"Xie Runfeng, Cui Xiangyang, Yan Zhou, Wang Xin, Xuan Zhanwei, Zhang Kai, et al. 2023. Lkpnr: Llm and kg for personalized news recommendation framework. arXiv preprint arXiv:2308.12028 (2023)."},{"key":"e_1_3_2_2_22_1","doi-asserted-by":"publisher","DOI":"10.1145\/3604915.3608845"},{"key":"e_1_3_2_2_23_1","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v37i11.26595"},{"key":"e_1_3_2_2_24_1","volume-title":"2023 b. From Indeterminacy to Determinacy: Augmenting Logical Reasoning Capabilities with Large Language Models. arXiv preprint arXiv:2310.18659","author":"Sun Hongda","year":"2023","unstructured":"Hongda Sun, Weikai Xu, Wei Liu, Jian Luan, Bin Wang, Shuo Shang, Ji-Rong Wen, and Rui Yan. 2023 b. From Indeterminacy to Determinacy: Augmenting Logical Reasoning Capabilities with Large Language Models. arXiv preprint arXiv:2310.18659 (2023)."},{"key":"e_1_3_2_2_25_1","volume-title":"Llama: Open and efficient foundation language models. arXiv preprint arXiv:2302.13971","author":"Touvron Hugo","year":"2023","unstructured":"Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timoth\u00e9e Lacroix, Baptiste Rozi\u00e8re, Naman Goyal, Eric Hambro, Faisal Azhar, et al. 2023. Llama: Open and efficient foundation language models. arXiv preprint arXiv:2302.13971 (2023)."},{"key":"e_1_3_2_2_26_1","doi-asserted-by":"publisher","DOI":"10.1145\/3178876.3186175"},{"key":"e_1_3_2_2_27_1","volume-title":"Zero-Shot Next-Item Recommendation using Large Pretrained Language Models. arXiv preprint arXiv:2304.03153","author":"Wang Lei","year":"2023","unstructured":"Lei Wang and Ee-Peng Lim. 2023. Zero-Shot Next-Item Recommendation using Large Pretrained Language Models. arXiv preprint arXiv:2304.03153 (2023)."},{"key":"e_1_3_2_2_28_1","volume-title":"Wayne Xin Zhao, et al. 2023 d. When large language model based agent meets user behavior analysis: A novel user simulation paradigm. arXiv preprint ArXiv:2306.02552","author":"Wang Lei","year":"2023","unstructured":"Lei Wang, Jingsen Zhang, Hao Yang, Zhiyuan Chen, Jiakai Tang, Zeyu Zhang, Xu Chen, Yankai Lin, Ruihua Song, Wayne Xin Zhao, et al. 2023 d. When large language model based agent meets user behavior analysis: A novel user simulation paradigm. arXiv preprint ArXiv:2306.02552 (2023)."},{"key":"e_1_3_2_2_29_1","volume-title":"2023 b. Generative recommendation: Towards next-generation recommender paradigm. arXiv preprint arXiv:2304.03516","author":"Wang Wenjie","year":"2023","unstructured":"Wenjie Wang, Xinyu Lin, Fuli Feng, Xiangnan He, and Tat-Seng Chua. 2023 b. Generative recommendation: Towards next-generation recommender paradigm. arXiv preprint arXiv:2304.03516 (2023)."},{"key":"e_1_3_2_2_30_1","volume-title":"Jingyuan Wang, and Ji-Rong Wen. 2023 c. Rethinking the Evaluation for Conversational Recommendation in the Era of Large Language Models. arXiv preprint arXiv:2305.13112","author":"Wang Xiaolei","year":"2023","unstructured":"Xiaolei Wang, Xinyu Tang, Wayne Xin Zhao, Jingyuan Wang, and Ji-Rong Wen. 2023 c. Rethinking the Evaluation for Conversational Recommendation in the Era of Large Language Models. arXiv preprint arXiv:2305.13112 (2023)."},{"key":"e_1_3_2_2_31_1","volume-title":"2023 a. Enhancing recommender systems with large language model reasoning graphs. arXiv preprint arXiv:2308.10835","author":"Wang Yan","year":"2023","unstructured":"Yan Wang, Zhixuan Chu, Xin Ouyang, Simeng Wang, Hongyan Hao, Yue Shen, Jinjie Gu, Siqiao Xue, James Y Zhang, Qing Cui, et al. 2023 a. Enhancing recommender systems with large language model reasoning graphs. arXiv preprint arXiv:2308.10835 (2023)."},{"key":"e_1_3_2_2_32_1","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2019\/536"},{"key":"e_1_3_2_2_33_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/D19--1671"},{"key":"e_1_3_2_2_34_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/D19-1671"},{"key":"e_1_3_2_2_35_1","doi-asserted-by":"publisher","DOI":"10.1145\/3530257"},{"key":"e_1_3_2_2_36_1","doi-asserted-by":"publisher","DOI":"10.1145\/3404835.3463069"},{"key":"e_1_3_2_2_37_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.acl-main.331"},{"key":"e_1_3_2_2_38_1","volume-title":"Going Beyond Local: Global Graph-Enhanced Personalized News Recommendations. arXiv preprint arXiv:2307.06576","author":"Yang Boming","year":"2023","unstructured":"Boming Yang, Dairui Liu, Toyotaro Suzumura, Ruihai Dong, and Irene Li. 2023. Going Beyond Local: Global Graph-Enhanced Personalized News Recommendations. arXiv preprint arXiv:2307.06576 (2023)."},{"key":"e_1_3_2_2_39_1","volume-title":"Leyu Lin, and Ji-Rong Wen.","author":"Zhang Junjie","year":"2023","unstructured":"Junjie Zhang, Ruobing Xie, Yupeng Hou, Wayne Xin Zhao, Leyu Lin, and Ji-Rong Wen. 2023. Recommendation as instruction following: A large language model empowered recommendation approach. arXiv preprint arXiv:2305.07001 (2023)."},{"key":"e_1_3_2_2_40_1","unstructured":"Wayne Xin Zhao Kun Zhou Junyi Li Tianyi Tang Xiaolei Wang Yupeng Hou Yingqian Min Beichen Zhang Junjie Zhang Zican Dong et al. 2023. A survey of large language models. arXiv preprint arXiv:2303.18223 (2023)."},{"key":"e_1_3_2_2_41_1","doi-asserted-by":"crossref","unstructured":"Ce Zhou Qian Li Chen Li Jun Yu Yixin Liu Guangjing Wang Kai Zhang Cheng Ji Qiben Yan Lifang He et al. 2023. A comprehensive survey on pretrained foundation models: A history from bert to chatgpt. arXiv preprint arXiv:2302.09419 (2023). io","DOI":"10.1007\/s13042-024-02443-6"}],"event":{"name":"WWW '24: The ACM Web Conference 2024","location":"Singapore Singapore","acronym":"WWW '24","sponsor":["SIGWEB ACM Special Interest Group on Hypertext, Hypermedia, and Web"]},"container-title":["Proceedings of the ACM Web Conference 2024"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3589334.3645448","content-type":"unspecified","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3589334.3645448","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,8,22]],"date-time":"2025-08-22T00:31:33Z","timestamp":1755822693000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3589334.3645448"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,5,13]]},"references-count":41,"alternative-id":["10.1145\/3589334.3645448","10.1145\/3589334"],"URL":"https:\/\/doi.org\/10.1145\/3589334.3645448","relation":{},"subject":[],"published":{"date-parts":[[2024,5,13]]},"assertion":[{"value":"2024-05-13","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}