{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,27]],"date-time":"2026-04-27T13:40:25Z","timestamp":1777297225165,"version":"3.51.4"},"publisher-location":"New York, NY, USA","reference-count":62,"publisher":"ACM","funder":[{"name":"Zhejiang Province &ldquo;JianBingLingYan&#x2b;X&rdquo; Research and Development Plan","award":["2025C02020"],"award-info":[{"award-number":["2025C02020"]}]}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2026,4,13]]},"DOI":"10.1145\/3774904.3792387","type":"proceedings-article","created":{"date-parts":[[2026,4,27]],"date-time":"2026-04-27T12:38:33Z","timestamp":1777293513000},"page":"6365-6376","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":0,"title":["Field Matters: A Lightweight LLM-enhanced Method for CTR Prediction"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0009-0001-6203-3022","authenticated-orcid":false,"given":"Yu","family":"Cui","sequence":"first","affiliation":[{"name":"State Key Laboratory of Blockchain and Data Security, College of Computer Science and Technology, Zhejiang University, Hangzhou, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0004-9265-9431","authenticated-orcid":false,"given":"Feng","family":"Liu","sequence":"additional","affiliation":[{"name":"OPPO Research Institute, Shenzhen, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-4752-2629","authenticated-orcid":false,"given":"Jiawei","family":"Chen","sequence":"additional","affiliation":[{"name":"The State Key Laboratory of Blockchain and Data Security, College of Computer Science and Technology, Hangzhou High-Tech Zone (Binjiang) Institute of Blockchain and Data Security, Zhejiang University, Hangzhou, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0003-3180-0668","authenticated-orcid":false,"given":"Xingyu","family":"Lou","sequence":"additional","affiliation":[{"name":"OPPO Research Institute, Shenzhen, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0004-4193-7833","authenticated-orcid":false,"given":"Changwang","family":"Zhang","sequence":"additional","affiliation":[{"name":"OPPO Research Institute, Shenzhen, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-0481-5341","authenticated-orcid":false,"given":"Jun","family":"Wang","sequence":"additional","affiliation":[{"name":"OPPO Research Institute, Shenzhen, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0009-2701-4641","authenticated-orcid":false,"given":"Yuegang","family":"Sun","sequence":"additional","affiliation":[{"name":"Intelligence Indeed, Hangzhou, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-4111-4189","authenticated-orcid":false,"given":"Xiaohu","family":"Yang","sequence":"additional","affiliation":[{"name":"State Key Laboratory of Blockchain and Data Security, Zhejiang University, Hangzhou, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-5890-4307","authenticated-orcid":false,"given":"Can","family":"Wang","sequence":"additional","affiliation":[{"name":"State Key Laboratory of Blockchain and Data Security, Hangzhou High-Tech Zone (Binjiang) Institute of Blockchain and Data Security, Zhejiang University, Hangzhou, China"}]}],"member":"320","published-online":{"date-parts":[[2026,4,12]]},"reference":[{"key":"e_1_3_2_1_1_1","volume-title":"Diogo Almeida, Janko Altenschmidt, Sam Altman, Shyamal Anadkat, et al.","author":"Achiam Josh","year":"2023","unstructured":"Josh Achiam, Steven Adler, Sandhini Agarwal, Lama Ahmad, Ilge Akkaya, Florencia Leoni Aleman, Diogo Almeida, Janko Altenschmidt, Sam Altman, Shyamal Anadkat, et al., 2023. Gpt-4 technical report. arXiv preprint arXiv:2303.08774 (2023)."},{"key":"e_1_3_2_1_2_1","volume-title":"A bi-step grounding paradigm for large language models in recommendation systems. arXiv preprint arXiv:2308.08434","author":"Bao Keqin","year":"2023","unstructured":"Keqin Bao, Jizhi Zhang, Wenjie Wang, Yang Zhang, Zhengyi Yang, Yancheng Luo, Chong Chen, Fuli Feng, and Qi Tian. 2023a. A bi-step grounding paradigm for large language models in recommendation systems. arXiv preprint arXiv:2308.08434 (2023)."},{"key":"e_1_3_2_1_3_1","doi-asserted-by":"publisher","DOI":"10.1145\/3604915.3608857"},{"key":"e_1_3_2_1_4_1","volume-title":"Hllm: Enhancing sequential recommendations via hierarchical large language models for item and user modeling. arXiv preprint arXiv:2409.12740","author":"Chen Junyi","year":"2024","unstructured":"Junyi Chen, Lu Chi, Bingyue Peng, and Zehuan Yuan. 2024. Hllm: Enhancing sequential recommendations via hierarchical large language models for item and user modeling. arXiv preprint arXiv:2409.12740 (2024)."},{"key":"e_1_3_2_1_5_1","doi-asserted-by":"publisher","DOI":"10.1145\/3522672"},{"key":"e_1_3_2_1_6_1","doi-asserted-by":"publisher","DOI":"10.1145\/2988450.2988454"},{"key":"e_1_3_2_1_7_1","volume-title":"HatLLM: Hierarchical Attention Masking for Enhanced Collaborative Modeling in LLM-based Recommendation. arXiv preprint arXiv:2510.10955","author":"Cui Yu","year":"2025","unstructured":"Yu Cui, Feng Liu, Jiawei Chen, Canghong Jin, Xingyu Lou, Changwang Zhang, Jun Wang, Yuegang Sun, and Can Wang. 2025. HatLLM: Hierarchical Attention Masking for Enhanced Collaborative Modeling in LLM-based Recommendation. arXiv preprint arXiv:2510.10955 (2025)."},{"key":"e_1_3_2_1_8_1","doi-asserted-by":"publisher","DOI":"10.1145\/3640457.3688118"},{"key":"e_1_3_2_1_9_1","volume-title":"M6-rec: Generative pretrained language models are open-ended recommender systems. arXiv preprint arXiv:2205.08084","author":"Cui Zeyu","year":"2022","unstructured":"Zeyu Cui, Jianxin Ma, Chang Zhou, Jingren Zhou, and Hongxia Yang. 2022. M6-rec: Generative pretrained language models are open-ended recommender systems. arXiv preprint arXiv:2205.08084 (2022)."},{"key":"e_1_3_2_1_10_1","first-page":"4171","volume-title":"Proceedings of the 2019 conference of the North American chapter of the association for computational linguistics: human language technologies","volume":"1","author":"Devlin Jacob","year":"2019","unstructured":"Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. Bert: Pre-training of deep bidirectional transformers for language understanding. In Proceedings of the 2019 conference of the North American chapter of the association for computational linguistics: human language technologies, volume 1 (long and short papers). 4171-4186."},{"key":"e_1_3_2_1_11_1","unstructured":"Abhimanyu Dubey Abhinav Jauhri Abhinav Pandey Abhishek Kadian Ahmad Al-Dahle Aiesha Letman Akhil Mathur Alan Schelten Amy Yang Angela Fan et al. 2024. The llama 3 herd of models. arXiv preprint arXiv:2407.21783 (2024)."},{"key":"e_1_3_2_1_12_1","volume-title":"A unified framework for multi-domain ctr prediction via large language models. ACM Transactions on Information Systems","author":"Fu Zichuan","year":"2023","unstructured":"Zichuan Fu, Xiangyang Li, Chuhan Wu, Yichao Wang, Kuicai Dong, Xiangyu Zhao, Mengchen Zhao, Huifeng Guo, and Ruiming Tang. 2023. A unified framework for multi-domain ctr prediction via large language models. ACM Transactions on Information Systems (2023)."},{"key":"e_1_3_2_1_13_1","doi-asserted-by":"publisher","DOI":"10.1145\/3626772.3657974"},{"key":"e_1_3_2_1_14_1","doi-asserted-by":"publisher","DOI":"10.1145\/3523227.3546767"},{"key":"e_1_3_2_1_15_1","unstructured":"Huifeng Guo Ruiming Tang Yunming Ye Zhenguo Li and Xiuqiang He. 2017. DeepFM: a factorization-machine based neural network for CTR prediction. arXiv preprint arXiv:1703.04247 (2017)."},{"key":"e_1_3_2_1_16_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00975"},{"key":"e_1_3_2_1_17_1","volume-title":"Lora: Low-rank adaptation of large language models. arXiv preprint arXiv:2106.09685","author":"Hu Edward J","year":"2021","unstructured":"Edward J Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuanzhi Li, Shean Wang, Lu Wang, and Weizhu Chen. 2021. Lora: Low-rank adaptation of large language models. arXiv preprint arXiv:2106.09685 (2021)."},{"key":"e_1_3_2_1_18_1","doi-asserted-by":"publisher","DOI":"10.1145\/3298689.3347043"},{"key":"e_1_3_2_1_19_1","doi-asserted-by":"publisher","DOI":"10.1145\/2959100.2959134"},{"key":"e_1_3_2_1_20_1","volume-title":"Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980","author":"Kingma Diederik P","year":"2014","unstructured":"Diederik P Kingma and Jimmy Ba. 2014. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980 (2014)."},{"key":"e_1_3_2_1_21_1","volume-title":"DCNv3: Towards Next Generation Deep Cross Network for CTR Prediction. arXiv preprint arXiv:2407.13349","author":"Li Honghao","year":"2024","unstructured":"Honghao Li, Yiwen Zhang, Yi Zhang, Hanwei Li, Lei Sang, and Jieming Zhu. 2024. DCNv3: Towards Next Generation Deep Cross Network for CTR Prediction. arXiv preprint arXiv:2407.13349 (2024)."},{"key":"e_1_3_2_1_22_1","volume-title":"Ctrl: Connect collaborative and language model for ctr prediction. ACM Transactions on Recommender Systems","author":"Li Xiangyang","year":"2023","unstructured":"Xiangyang Li, Bo Chen, Lu Hou, and Ruiming Tang. 2023. Ctrl: Connect collaborative and language model for ctr prediction. ACM Transactions on Recommender Systems (2023)."},{"key":"e_1_3_2_1_23_1","doi-asserted-by":"publisher","DOI":"10.1145\/3357384.3357951"},{"key":"e_1_3_2_1_24_1","doi-asserted-by":"publisher","DOI":"10.1145\/3589334.3645396"},{"key":"e_1_3_2_1_25_1","doi-asserted-by":"publisher","DOI":"10.1145\/3580305.3599422"},{"key":"e_1_3_2_1_26_1","doi-asserted-by":"publisher","DOI":"10.1145\/3589334.3645467"},{"key":"e_1_3_2_1_27_1","doi-asserted-by":"publisher","DOI":"10.1145\/3701551.3703579"},{"key":"e_1_3_2_1_28_1","doi-asserted-by":"publisher","DOI":"10.1145\/3308558.3313497"},{"key":"e_1_3_2_1_29_1","volume-title":"Ptab: Using the pre-trained language model for modeling tabular data. arXiv preprint arXiv:2209.08060","author":"Liu Guang","year":"2022","unstructured":"Guang Liu, Jie Yang, and Ledell Wu. 2022. Ptab: Using the pre-trained language model for modeling tabular data. arXiv preprint arXiv:2209.08060 (2022)."},{"key":"e_1_3_2_1_30_1","doi-asserted-by":"publisher","DOI":"10.1145\/2806416.2806603"},{"key":"e_1_3_2_1_31_1","volume-title":"Roberta: A robustly optimized bert pretraining approach. arXiv preprint arXiv:1907.11692","author":"Liu Yinhan","year":"2019","unstructured":"Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, and Veselin Stoyanov. 2019a. Roberta: A robustly optimized bert pretraining approach. arXiv preprint arXiv:1907.11692 (2019)."},{"key":"e_1_3_2_1_32_1","volume-title":"Proceedings of the twenty-ninth international conference on international joint conferences on artificial intelligence. 3139-3145","author":"Lu Wantong","year":"2021","unstructured":"Wantong Lu, Yantao Yu, Yongzhe Chang, Zhen Wang, Chenhui Li, and Bo Yuan. 2021. A dual input-aware factorization machine for CTR prediction. In Proceedings of the twenty-ninth international conference on international joint conferences on artificial intelligence. 3139-3145."},{"key":"e_1_3_2_1_33_1","volume-title":"International conference on Machine learning. PMLR, 23803-23828","author":"Mao Anqi","year":"2023","unstructured":"Anqi Mao, Mehryar Mohri, and Yutao Zhong. 2023a. Cross-entropy loss functions: Theoretical analysis and applications. In International conference on Machine learning. PMLR, 23803-23828."},{"key":"e_1_3_2_1_34_1","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v37i4.25577"},{"key":"e_1_3_2_1_35_1","doi-asserted-by":"publisher","DOI":"10.1145\/3178876.3186040"},{"key":"e_1_3_2_1_36_1","doi-asserted-by":"publisher","DOI":"10.1145\/3627673.3680048"},{"key":"e_1_3_2_1_37_1","doi-asserted-by":"publisher","DOI":"10.1145\/3589334.3645458"},{"key":"e_1_3_2_1_38_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICDM.2010.127"},{"key":"e_1_3_2_1_39_1","doi-asserted-by":"publisher","DOI":"10.1145\/3357384.3357925"},{"key":"e_1_3_2_1_40_1","doi-asserted-by":"publisher","DOI":"10.1145\/3442381.3449930"},{"key":"e_1_3_2_1_41_1","doi-asserted-by":"publisher","DOI":"10.1145\/3627673.3679558"},{"key":"e_1_3_2_1_42_1","doi-asserted-by":"publisher","DOI":"10.1016\/j.ipm.2019.102076"},{"key":"e_1_3_2_1_43_1","volume-title":"Contrastive representation distillation. arXiv preprint arXiv:1910.10699","author":"Tian Yonglong","year":"2019","unstructured":"Yonglong Tian, Dilip Krishnan, and Phillip Isola. 2019. Contrastive representation distillation. arXiv preprint arXiv:1910.10699 (2019)."},{"key":"e_1_3_2_1_44_1","unstructured":"Bohao Wang Jiawei Chen Feng Liu Changwang Zhang Jun Wang Canghong Jin Chun Chen and Can Wang. 2026. Does LLM Focus on the Right Words? Mitigating Context Bias in LLM-based Recommenders. arXiv:2510.10978 [cs.IR] https:\/\/arxiv.org\/abs\/2510.10978"},{"key":"e_1_3_2_1_45_1","doi-asserted-by":"publisher","DOI":"10.1145\/3726302.3730041"},{"key":"e_1_3_2_1_46_1","doi-asserted-by":"crossref","first-page":"1","DOI":"10.1145\/3762182","article-title":"Llm4dsr: Leveraging large language model for denoising sequential recommendation","volume":"44","author":"Wang Bohao","year":"2025","unstructured":"Bohao Wang, Feng Liu, Changwang Zhang, Jiawei Chen, Yudi Wu, Sheng Zhou, Xingyu Lou, Jun Wang, Yan Feng, Chun Chen, et al., 2025b. Llm4dsr: Leveraging large language model for denoising sequential recommendation. ACM Transactions on Information Systems, Vol. 44, 1 (2025), 1-32.","journal-title":"ACM Transactions on Information Systems"},{"key":"e_1_3_2_1_47_1","doi-asserted-by":"publisher","DOI":"10.1145\/3477495.3531970"},{"key":"e_1_3_2_1_48_1","volume-title":"Improving text embeddings with large language models. arXiv preprint arXiv:2401.00368","author":"Wang Liang","year":"2023","unstructured":"Liang Wang, Nan Yang, Xiaolong Huang, Linjun Yang, Rangan Majumder, and Furu Wei. 2023. Improving text embeddings with large language models. arXiv preprint arXiv:2401.00368 (2023)."},{"key":"e_1_3_2_1_49_1","doi-asserted-by":"publisher","DOI":"10.1145\/3442381.3450078"},{"key":"e_1_3_2_1_50_1","volume-title":"CELA: Cost-Efficient Language Model Alignment for CTR Prediction. arXiv preprint arXiv:2405.10596","author":"Wang Xingmei","year":"2024","unstructured":"Xingmei Wang, Weiwen Liu, Xiaolong Chen, Qi Liu, Xu Huang, Yichao Wang, Xiangyang Li, Yasheng Wang, Zhenhua Dong, Defu Lian, et al., 2024. CELA: Cost-Efficient Language Model Alignment for CTR Prediction. arXiv preprint arXiv:2405.10596 (2024)."},{"key":"e_1_3_2_1_51_1","doi-asserted-by":"publisher","DOI":"10.1145\/3640457.3688104"},{"key":"e_1_3_2_1_52_1","volume-title":"Attentional factorization machines: Learning the weight of feature interactions via attention networks. arXiv preprint arXiv:1708.04617","author":"Xiao Jun","year":"2017","unstructured":"Jun Xiao, Hao Ye, Xiangnan He, Hanwang Zhang, Fei Wu, and Tat-Seng Chua. 2017. Attentional factorization machines: Learning the weight of feature interactions via attention networks. arXiv preprint arXiv:1708.04617 (2017)."},{"key":"e_1_3_2_1_53_1","volume-title":"International conference on machine learning. PMLR, 802-810","author":"Yan Ling","year":"2014","unstructured":"Ling Yan, Wu-Jun Li, Gui-Rong Xue, and Dingyi Han. 2014. Coupled group lasso for web-scale ctr prediction in display advertising. In International conference on machine learning. PMLR, 802-810."},{"key":"e_1_3_2_1_54_1","doi-asserted-by":"publisher","DOI":"10.1016\/j.ipm.2021.102853"},{"key":"e_1_3_2_1_55_1","volume-title":"Explainable CTR Prediction via LLM Reasoning. arXiv preprint arXiv:2412.02588","author":"Yu Xiaohan","year":"2024","unstructured":"Xiaohan Yu, Li Zhang, and Chong Chen. 2024. Explainable CTR Prediction via LLM Reasoning. arXiv preprint arXiv:2412.02588 (2024)."},{"key":"e_1_3_2_1_56_1","volume-title":"Proceedings of the 41st International Conference on Machine Learning. 59421-59434","author":"Zhang Buyun","year":"2024","unstructured":"Buyun Zhang, Liang Luo, Yuxin Chen, Jade Nie, Xi Liu, Shen Li, Yanli Zhao, Yuchen Hao, Yantao Yao, Ellie Dingqiao Wen, et al., 2024. Wukong: towards a scaling law for large-scale recommendation. In Proceedings of the 41st International Conference on Machine Learning. 59421-59434."},{"key":"e_1_3_2_1_57_1","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2019.2921026"},{"key":"e_1_3_2_1_58_1","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v39i12.33450"},{"key":"e_1_3_2_1_59_1","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v33i01.33015941"},{"key":"e_1_3_2_1_60_1","doi-asserted-by":"publisher","DOI":"10.1145\/3219819.3219823"},{"key":"e_1_3_2_1_61_1","doi-asserted-by":"publisher","DOI":"10.1145\/3477495.3531723"},{"key":"e_1_3_2_1_62_1","doi-asserted-by":"publisher","DOI":"10.1145\/3539618.3591988"}],"event":{"name":"WWW '26: The ACM Web Conference 2026","location":"Dubai United Arab Emirates","sponsor":["SIGWEB ACM Special Interest Group on Hypertext, Hypermedia, and Web"]},"container-title":["Proceedings of the ACM Web Conference 2026"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3774904.3792387","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,4,27]],"date-time":"2026-04-27T12:51:26Z","timestamp":1777294286000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3774904.3792387"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2026,4,12]]},"references-count":62,"alternative-id":["10.1145\/3774904.3792387","10.1145\/3774904"],"URL":"https:\/\/doi.org\/10.1145\/3774904.3792387","relation":{},"subject":[],"published":{"date-parts":[[2026,4,12]]},"assertion":[{"value":"2026-04-12","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}