{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,23]],"date-time":"2026-03-23T18:46:14Z","timestamp":1774291574227,"version":"3.50.1"},"publisher-location":"New York, NY, USA","reference-count":50,"publisher":"ACM","content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2025,9,22]]},"DOI":"10.1145\/3705328.3748085","type":"proceedings-article","created":{"date-parts":[[2025,9,6]],"date-time":"2025-09-06T10:46:13Z","timestamp":1757155573000},"page":"145-154","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":2,"title":["Heterogeneous User Modeling for LLM-based Recommendation"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0009-0001-7908-4256","authenticated-orcid":false,"given":"Honghui","family":"Bao","sequence":"first","affiliation":[{"name":"National University of Singapore, Singapore, Singapore"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-5199-1428","authenticated-orcid":false,"given":"Wenjie","family":"Wang","sequence":"additional","affiliation":[{"name":"University of Science and Technology of China, Hefei, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-6931-3182","authenticated-orcid":false,"given":"Xinyu","family":"Lin","sequence":"additional","affiliation":[{"name":"National University of Singapore, Singapore, Singapore"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-6776-2040","authenticated-orcid":false,"given":"Fengbin","family":"Zhu","sequence":"additional","affiliation":[{"name":"National University of Singapore, Singapore, Singapore"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-0932-8910","authenticated-orcid":false,"given":"Teng","family":"Sun","sequence":"additional","affiliation":[{"name":"Shandong University, Qingdao, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-5828-9842","authenticated-orcid":false,"given":"Fuli","family":"Feng","sequence":"additional","affiliation":[{"name":"University of Science and Technology of China, Hefei, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-6097-7807","authenticated-orcid":false,"given":"Tat-Seng","family":"Chua","sequence":"additional","affiliation":[{"name":"National University of Singapore, Singapore, Singapore"}]}],"member":"320","published-online":{"date-parts":[[2025,9,7]]},"reference":[{"key":"e_1_3_3_1_2_2","doi-asserted-by":"publisher","DOI":"10.1145\/3604915.3608857"},{"key":"e_1_3_3_1_3_2","unstructured":"Tom\u00a0B. Brown Benjamin Mann Nick Ryder Melanie Subbiah Jared Kaplan Prafulla Dhariwal Arvind Neelakantan Pranav Shyam Girish Sastry Amanda Askell Sandhini Agarwal Ariel Herbert-Voss Gretchen Krueger Tom Henighan Rewon Child Aditya Ramesh Daniel\u00a0M. Ziegler Jeffrey Wu Clemens Winter Christopher Hesse Mark Chen Eric Sigler Mateusz Litwin Scott Gray Benjamin Chess Jack Clark Christopher Berner Sam McCandlish Alec Radford Ilya Sutskever and Dario Amodei. 2020. Language Models are Few-Shot Learners."},{"key":"e_1_3_3_1_4_2","doi-asserted-by":"publisher","DOI":"10.1145\/3511808.3557262"},{"key":"e_1_3_3_1_5_2","doi-asserted-by":"publisher","DOI":"10.1145\/3539597.3570366"},{"key":"e_1_3_3_1_6_2","unstructured":"Hyung\u00a0Won Chung Le Hou Shayne Longpre Barret Zoph Yi Tay William Fedus Yunxuan Li Xuezhi Wang Mostafa Dehghani Siddhartha Brahma et\u00a0al. 2024. Scaling instruction-finetuned language models. JMLR (2024)."},{"key":"e_1_3_3_1_7_2","volume-title":"ICLR","author":"Del\u00e9tang Gr\u00e9goire","year":"2024","unstructured":"Gr\u00e9goire Del\u00e9tang, Anian Ruoss, Paul-Ambroise Duquenne, Elliot Catt, Tim Genewein, Christopher Mattern, Jordi Grau-Moya, Li\u00a0Kevin Wenliang, Matthew Aitchison, Laurent Orseau, et\u00a0al. 2024. Language modeling is compression. In ICLR."},{"key":"e_1_3_3_1_8_2","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v39i22.34547"},{"key":"e_1_3_3_1_9_2","unstructured":"Jacob Devlin Ming-Wei Chang Kenton Lee and Kristina Toutanova. 2018. BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding. arXiv preprint arXiv:https:\/\/arXiv.org\/abs\/1810.04805 (2018)."},{"key":"e_1_3_3_1_10_2","unstructured":"Zichuan Fu Xiangyang Li Chuhan Wu Yichao Wang Kuicai Dong Xiangyu Zhao Mengchen Zhao Huifeng Guo and Ruiming Tang. 2024. A unified framework for multi-domain ctr prediction via large language models. TOIS (2024)."},{"key":"e_1_3_3_1_11_2","unstructured":"Yupeng Hou Jiacheng Li Zhankui He An Yan Xiusi Chen and Julian McAuley. 2024. Bridging Language and Items for Retrieval and Recommendation. arXiv preprint arXiv:https:\/\/arXiv.org\/abs\/2403.03952 (2024)."},{"key":"e_1_3_3_1_12_2","doi-asserted-by":"publisher","DOI":"10.1145\/3534678.3539381"},{"key":"e_1_3_3_1_13_2","unstructured":"Edward\u00a0J Hu Yelong Shen Phillip Wallis Zeyuan Allen-Zhu Yuanzhi Li Shean Wang Lu Wang and Weizhu Chen. 2021. Lora: Low-rank adaptation of large language models. arXiv:https:\/\/arXiv.org\/abs\/2106.09685 (2021)."},{"key":"e_1_3_3_1_14_2","volume-title":"ICLR","author":"Iacob Alex","year":"2025","unstructured":"Alex Iacob, Lorenzo Sani, Meghdad Kurmanji, William\u00a0F. Shen, Xinchi Qiu, Dongqi Cai, Yan Gao, and Nicholas\u00a0D. Lane. 2025. DEPT: Decoupled Embeddings for Pre-training Language Models. In ICLR."},{"key":"e_1_3_3_1_15_2","doi-asserted-by":"publisher","DOI":"10.1109\/ICDM.2018.00035"},{"key":"e_1_3_3_1_16_2","doi-asserted-by":"publisher","DOI":"10.1145\/3580305.3599519"},{"key":"e_1_3_3_1_17_2","volume-title":"WWW","author":"Li Xinhang","year":"2024","unstructured":"Xinhang Li, Chong Chen, Xiangyu Zhao, Yong Zhang, and Chunxiao Xing. 2024. E4SRec: An elegant effective efficient extensible solution of large language models for sequential recommendation. In WWW. ACM."},{"key":"e_1_3_3_1_18_2","unstructured":"Yongqi Li Xinyu Lin Wenjie Wang Fuli Feng Liang Pang Wenjie Li Liqiang Nie Xiangnan He and Tat-Seng Chua. 2024. A Survey of Generative Search and Recommendation in the Era of Large Language Models. arXiv preprint arXiv:https:\/\/arXiv.org\/abs\/2404.16924 (2024)."},{"key":"e_1_3_3_1_19_2","volume-title":"WWW","author":"Lin Jianghao","year":"2024","unstructured":"Jianghao Lin, Bo Chen, Hangyu Wang, Yunjia Xi, Yanru Qu, Xinyi Dai, Kangning Zhang, Ruiming Tang, Yong Yu, and Weinan Zhang. 2024. ClickPrompt: CTR Models are Strong Prompt Generators for Adapting Language Models to CTR Prediction. In WWW. ACM."},{"key":"e_1_3_3_1_20_2","volume-title":"WWW","author":"Lin Jianghao","year":"2024","unstructured":"Jianghao Lin, Rong Shan, Chenxu Zhu, Kounianhua Du, Bo Chen, Shigang Quan, Ruiming Tang, Yong Yu, and Weinan Zhang. 2024. Rella: Retrieval-enhanced large language models for lifelong sequential behavior comprehension in recommendation. In WWW. ACM."},{"key":"e_1_3_3_1_21_2","doi-asserted-by":"publisher","DOI":"10.1145\/3726302.3730053"},{"key":"e_1_3_3_1_22_2","doi-asserted-by":"publisher","DOI":"10.1145\/3637528.3671884"},{"key":"e_1_3_3_1_23_2","doi-asserted-by":"publisher","DOI":"10.1145\/3626772.3657807"},{"key":"e_1_3_3_1_24_2","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v38i8.28721"},{"key":"e_1_3_3_1_25_2","doi-asserted-by":"publisher","DOI":"10.1145\/3616855.3635845"},{"key":"e_1_3_3_1_26_2","unstructured":"Ilya Loshchilov and Frank Hutter. 2019. Decoupled weight decay regularization. ICLR."},{"key":"e_1_3_3_1_27_2","volume-title":"EMNLP","author":"Ma Qiyao","year":"2025","unstructured":"Qiyao Ma, Xubin Ren, and Chao Huang. 2025. XRec: Large Language Models for Explainable Recommendation. In EMNLP."},{"key":"e_1_3_3_1_28_2","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2017\/343"},{"key":"e_1_3_3_1_29_2","unstructured":"Arvind Neelakantan Tao Xu Raul Puri Alec Radford Jesse\u00a0Michael Han Jerry Tworek Qiming Yuan Nikolas Tezak Jong\u00a0Wook Kim Chris Hallacy et\u00a0al. 2022. Text and code embeddings by contrastive pre-training. arXiv preprint arXiv:https:\/\/arXiv.org\/abs\/2201.10005 (2022)."},{"key":"e_1_3_3_1_30_2","doi-asserted-by":"publisher","DOI":"10.1145\/3626772.3657710"},{"key":"e_1_3_3_1_31_2","doi-asserted-by":"publisher","DOI":"10.1145\/3604915.3608844"},{"key":"e_1_3_3_1_32_2","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v35i5.16557"},{"key":"e_1_3_3_1_33_2","volume-title":"WWW","author":"Ren Xubin","year":"2024","unstructured":"Xubin Ren, Wei Wei, Lianghao Xia, Lixin Su, Suqi Cheng, Junfeng Wang, Dawei Yin, and Chao Huang. 2024. Representation Learning with Large Language Models for Recommendation. In WWW. ACM."},{"key":"e_1_3_3_1_34_2","volume-title":"ICLR","author":"Sanh Victor","year":"2022","unstructured":"Victor Sanh, Albert Webson, Colin Raffel, Stephen\u00a0H Bach, Lintang Sutawika, Zaid Alyafeai, Antoine Chaffin, Arnaud Stiegler, Teven\u00a0Le Scao, Arun Raja, et\u00a0al. 2022. Multitask prompted training enables zero-shot task generalization. In ICLR."},{"key":"e_1_3_3_1_35_2","doi-asserted-by":"publisher","DOI":"10.1145\/3459637.3481941"},{"key":"e_1_3_3_1_36_2","doi-asserted-by":"publisher","DOI":"10.1145\/3459637.3481941"},{"key":"e_1_3_3_1_37_2","doi-asserted-by":"publisher","DOI":"10.1145\/3626772.3657821"},{"key":"e_1_3_3_1_38_2","unstructured":"Zuoli Tang Zhaoxin Huan Zihao Li Xiaolu Zhang Jun Hu Chilin Fu Jun Zhou and Chenliang Li. 2023. One model for all: Large language models are domain-agnostic recommendation systems. arXiv preprint arXiv:https:\/\/arXiv.org\/abs\/2310.14304 (2023)."},{"key":"e_1_3_3_1_39_2","unstructured":"Qwen Team. 2024. Qwen2.5: A Party of Foundation Models. https:\/\/qwenlm.github.io\/blog\/qwen2.5\/"},{"key":"e_1_3_3_1_40_2","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-540-88192-6_31"},{"key":"e_1_3_3_1_41_2","doi-asserted-by":"publisher","DOI":"10.1145\/3627673.3679569"},{"key":"e_1_3_3_1_42_2","doi-asserted-by":"publisher","DOI":"10.1145\/3616855.3635853"},{"key":"e_1_3_3_1_43_2","volume-title":"WWW","author":"Wen Hongyi","year":"2022","unstructured":"Hongyi Wen, Xinyang Yi, Tiansheng Yao, Jiaxi Tang, Lichan Hong, and Ed\u00a0H. Chi. 2022. Distributionally-robust Recommendations for Improving Worst-case User Experience. In WWW. ACM."},{"key":"e_1_3_3_1_44_2","unstructured":"Yunjia Xi Weiwen Liu Jianghao Lin Jieming Zhu Bo Chen Ruiming Tang Weinan Zhang Rui Zhang and Yong Yu. 2023. Towards open-world recommendation with knowledge augmentation from large language models. arXiv preprint arXiv:https:\/\/arXiv.org\/abs\/2306.10933 (2023)."},{"key":"e_1_3_3_1_45_2","volume-title":"WWW","author":"Xu Wujiang","year":"2024","unstructured":"Wujiang Xu, Qitian Wu, Runzhong Wang, Mingming Ha, Qiongxu Ma, Linxun Chen, Bing Han, and Junchi Yan. 2024. Rethinking Cross-Domain Sequential Recommendation under Open-World Assumptions. In WWW. ACM."},{"key":"e_1_3_3_1_46_2","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2025.acl-long.1201"},{"key":"e_1_3_3_1_47_2","unstructured":"Susan Zhang Stephen Roller Naman Goyal Mikel Artetxe Moya Chen Shuohui Chen Christopher Dewan Mona Diab Xian Li Xi\u00a0Victoria Lin et\u00a0al. 2022. Opt: Open pre-trained transformer language models. arXiv preprint arXiv:https:\/\/arXiv.org\/abs\/2205.01068 (2022)."},{"key":"e_1_3_3_1_48_2","unstructured":"Yang Zhang Fuli Feng Jizhi Zhang Keqin Bao Qifan Wang and Xiangnan He. 2023. Collm: Integrating collaborative embeddings into large language models for recommendation. arXiv preprint arXiv:https:\/\/arXiv.org\/abs\/2310.19488 (2023)."},{"key":"e_1_3_3_1_49_2","doi-asserted-by":"publisher","DOI":"10.1145\/3583780.3615492"},{"key":"e_1_3_3_1_50_2","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2025.findings-naacl.155"},{"key":"e_1_3_3_1_51_2","doi-asserted-by":"publisher","DOI":"10.1109\/ICDE60146.2024.00118"}],"event":{"name":"RecSys '25: Nineteenth ACM Conference on Recommender Systems","location":"Prague Czech Republic","acronym":"RecSys '25","sponsor":["SIGCHI ACM Special Interest Group on Computer-Human Interaction","SIGAI ACM Special Interest Group on Artificial Intelligence","SIGIR ACM Special Interest Group on Information Retrieval","SIGKDD ACM Special Interest Group on Knowledge Discovery in Data","SIGWEB ACM Special Interest Group on Hypertext, Hypermedia, and Web"]},"container-title":["Proceedings of the Nineteenth ACM Conference on Recommender Systems"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3705328.3748085","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,9,6]],"date-time":"2025-09-06T11:48:35Z","timestamp":1757159315000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3705328.3748085"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,9,7]]},"references-count":50,"alternative-id":["10.1145\/3705328.3748085","10.1145\/3705328"],"URL":"https:\/\/doi.org\/10.1145\/3705328.3748085","relation":{},"subject":[],"published":{"date-parts":[[2025,9,7]]},"assertion":[{"value":"2025-09-07","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}