{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,11]],"date-time":"2026-02-11T13:06:56Z","timestamp":1770815216548,"version":"3.50.1"},"publisher-location":"New York, NY, USA","reference-count":55,"publisher":"ACM","license":[{"start":{"date-parts":[[2025,4,22]],"date-time":"2025-04-22T00:00:00Z","timestamp":1745280000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by\/4.0\/"}],"funder":[{"name":"DSO National Laboratories under the AI Singapore Programme","award":["AISG2-GC-2023-009"],"award-info":[{"award-number":["AISG2-GC-2023-009"]}]},{"name":"National Research Foundation Singapore"}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2025,4,22]]},"DOI":"10.1145\/3696410.3714956","type":"proceedings-article","created":{"date-parts":[[2025,4,22]],"date-time":"2025-04-22T22:52:18Z","timestamp":1745362338000},"page":"391-401","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":2,"title":["AURO: Reinforcement Learning for Adaptive User Retention Optimization in Recommender Systems"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-9340-0366","authenticated-orcid":false,"given":"Zhenghai","family":"Xue","sequence":"first","affiliation":[{"name":"Nanyang Technological University, Singapore, Singapore"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-6451-9299","authenticated-orcid":false,"given":"Qingpeng","family":"Cai","sequence":"additional","affiliation":[{"name":"Kuaishou Technology, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-7899-2017","authenticated-orcid":false,"given":"Bin","family":"Yang","sequence":"additional","affiliation":[{"name":"Kuaishou Technology, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-0697-8985","authenticated-orcid":false,"given":"Lantao","family":"Hu","sequence":"additional","affiliation":[{"name":"Kuaishou Techology, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-9266-0780","authenticated-orcid":false,"given":"Peng","family":"Jiang","sequence":"additional","affiliation":[{"name":"Kuaishou Technology, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-3636-3618","authenticated-orcid":false,"given":"Kun","family":"Gai","sequence":"additional","affiliation":[{"name":"Kuaishou Technology, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-7064-7438","authenticated-orcid":false,"given":"Bo","family":"An","sequence":"additional","affiliation":[{"name":"Nanyang Technological University, Singapore, Singapore"}]}],"member":"320","published-online":{"date-parts":[[2025,4,22]]},"reference":[{"key":"e_1_3_2_1_1_1","volume-title":"Far","author":"Afsar Mohammad Mehdi","year":"2023","unstructured":"Mohammad Mehdi Afsar, Trafford Crump, and Behrouz H. Far. 2023. Reinforcement Learning based Recommender Systems: A Survey. ACM Comput. Surv., Vol. 55, 7 (2023), 145:1--145:38."},{"key":"e_1_3_2_1_2_1","unstructured":"Xueying Bai Jian Guan and Hongning Wang. 2019. A Model-Based Reinforcement Learning with Adversarial Training for Online Recommendation. In NeurIPS."},{"key":"e_1_3_2_1_3_1","volume-title":"Zheng Xiong, Luisa M. Zintgraf, Chelsea Finn, and Shimon Whiteson.","author":"Beck Jacob","year":"2023","unstructured":"Jacob Beck, Risto Vuorio, Evan Zheran Liu, Zheng Xiong, Luisa M. Zintgraf, Chelsea Finn, and Shimon Whiteson. 2023. A Survey of Meta-Reinforcement Learning. CoRR, Vol. abs\/2301.08028 (2023)."},{"key":"e_1_3_2_1_4_1","unstructured":"William Brown and Arpit Agarwal. 2022. Diversified Recommendations for Agents with Adaptive Preferences. In NeurIPS."},{"key":"e_1_3_2_1_5_1","unstructured":"Yuri Burda Harrison Edwards Amos J. Storkey and Oleg Klimov. 2019. Exploration by random network distillation. In ICLR."},{"key":"e_1_3_2_1_6_1","unstructured":"Qingpeng Cai Shuchang Liu Xueliang Wang Tianyou Zuo Wentao Xie Bin Yang Dong Zheng Peng Jiang and Kun Gai. 2023a. Reinforcing User Retention in a Billion Scale Short Video Recommender System. In WWW."},{"key":"e_1_3_2_1_7_1","unstructured":"Qingpeng Cai Zhenghai Xue Chi Zhang Wanqi Xue Shuchang Liu Ruohan Zhan Xueliang Wang Tianyou Zuo Wentao Xie Dong Zheng Peng Jiang and Kun Gai. 2023b. Two-Stage Constrained Actor-Critic for Short Video Recommendation. In WWW."},{"key":"e_1_3_2_1_8_1","doi-asserted-by":"crossref","unstructured":"Haokun Chen Xinyi Dai Han Cai Weinan Zhang Xuejian Wang Ruiming Tang Yuzhou Zhang and Yong Yu. 2019. Large-Scale Interactive Recommendation with Tree-Structured Policy Gradient. In AAAI.","DOI":"10.1609\/aaai.v33i01.33013312"},{"key":"e_1_3_2_1_9_1","doi-asserted-by":"crossref","unstructured":"Shi-Yong Chen Yang Yu Qing Da Jun Tan Hai-Kuan Huang and Hai-Hong Tang. 2018. Stabilizing Reinforcement Learning in Dynamic Environment with Application to Online Recommendation. In KDD.","DOI":"10.1145\/3219819.3220122"},{"key":"e_1_3_2_1_10_1","unstructured":"Wang Chi Cheung David Simchi-Levi and Ruihao Zhu. 2020. Reinforcement Learning for Non-Stationary Markov Decision Processes: The Blessing of (More) Optimism. In ICML."},{"key":"e_1_3_2_1_11_1","unstructured":"Kamil Ciosek Quan Vuong Robert Loftin and Katja Hofmann. 2019. Better Exploration with Optimistic Actor Critic. In NeurIPS."},{"key":"e_1_3_2_1_12_1","doi-asserted-by":"publisher","DOI":"10.1198\/tech.2006.s353"},{"key":"e_1_3_2_1_13_1","unstructured":"Finale Doshi-Velez and George Dimitri Konidaris. 2016. Hidden Parameter Markov Decision Processes: A Semiparametric Regression Approach for Discovering Latent Task Parametrizations. In IJCAI."},{"key":"e_1_3_2_1_14_1","unstructured":"Fan Feng Biwei Huang Kun Zhang and Sara Magliacane. 2022. Factored Adaptation for Non-Stationary Reinforcement Learning. In NeurIPS."},{"key":"e_1_3_2_1_15_1","unstructured":"Scott Fujimoto and Shixiang Shane Gu. 2021. A Minimalist Approach to Offline Reinforcement Learning. In NeurIPS."},{"key":"e_1_3_2_1_16_1","unstructured":"Scott Fujimoto David Meger and Doina Precup. 2019. Off-Policy Deep Reinforcement Learning without Exploration. In ICML."},{"key":"e_1_3_2_1_17_1","unstructured":"Scott Fujimoto Herke van Hoof and David Meger. 2018. Addressing Function Approximation Error in Actor-Critic Methods. In ICML."},{"key":"e_1_3_2_1_18_1","unstructured":"Tuomas Haarnoja Aurick Zhou Pieter Abbeel and Sergey Levine. 2018. Soft Actor-Critic: Off-Policy Maximum Entropy Deep Reinforcement Learning with a Stochastic Actor. In ICML."},{"key":"e_1_3_2_1_19_1","volume-title":"RecSim: A Configurable Simulation Platform for Recommender Systems. CoRR","author":"Ie Eugene","year":"2019","unstructured":"Eugene Ie, Chih-Wei Hsu, Martin Mladenov, Vihan Jain, Sanmit Narvekar, Jing Wang, Rui Wu, and Craig Boutilier. 2019. RecSim: A Configurable Simulation Platform for Recommender Systems. CoRR, Vol. abs\/1909.04847 (2019)."},{"key":"e_1_3_2_1_20_1","doi-asserted-by":"crossref","unstructured":"Luo Ji Qi Qin Bingqing Han and Hongxia Yang. 2021. Reinforcement Learning to Optimize Lifetime Value in Cold-Start Recommendation. In CIKM.","DOI":"10.1145\/3459637.3482292"},{"key":"e_1_3_2_1_21_1","doi-asserted-by":"publisher","DOI":"10.1145\/1352793.1352837"},{"key":"e_1_3_2_1_22_1","unstructured":"Kimin Lee Younggyo Seo Seunghyun Lee Honglak Lee and Jinwoo Shin. 2020. Context-aware Dynamics Model for Generalization in Model-Based Reinforcement Learning. In ICML."},{"key":"e_1_3_2_1_23_1","first-page":"3461","article-title":"MetaDrive: Composing Diverse Driving Scenarios for Generalizable Reinforcement Learning","volume":"45","author":"Li Quanyi","year":"2023","unstructured":"Quanyi Li, Zhenghao Peng, Lan Feng, Qihang Zhang, Zhenghai Xue, and Bolei Zhou. 2023. MetaDrive: Composing Diverse Driving Scenarios for Generalizable Reinforcement Learning. IEEE Trans. Pattern Anal. Mach. Intell., Vol. 45, 3 (2023), 3461--3475.","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"e_1_3_2_1_24_1","volume-title":"Continuous control with deep reinforcement learning. arXiv preprint arXiv:1509.02971","author":"Lillicrap Timothy P","year":"2015","unstructured":"Timothy P Lillicrap, Jonathan J Hunt, Alexander Pritzel, Nicolas Heess, Tom Erez, Yuval Tassa, David Silver, and Daan Wierstra. 2015. Continuous control with deep reinforcement learning. arXiv preprint arXiv:1509.02971 (2015)."},{"key":"e_1_3_2_1_25_1","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2019.2925019"},{"key":"e_1_3_2_1_26_1","unstructured":"Shuchang Liu Qingpeng Cai Zhankui He Bowen Sun Julian J. McAuley Dong Zheng Peng Jiang and Kun Gai. 2023a. Generative Flow Network for Listwise Recommendation. In KDD."},{"key":"e_1_3_2_1_27_1","unstructured":"Shuchang Liu Qingpeng Cai Bowen Sun Yuhao Wang Ji Jiang Dong Zheng Peng Jiang Kun Gai Xiangyu Zhao and Yongfeng Zhang. 2023b. Exploration and Regularization of the Latent Action Space in Recommendation. In WWW."},{"key":"e_1_3_2_1_28_1","unstructured":"Xu-Hui Liu Zhenghai Xue Jing-Cheng Pang Shengyi Jiang Feng Xu and Yang Yu. 2021. Regret Minimization Experience Replay in Off-Policy Reinforcement Learning. In NeurIPS."},{"key":"e_1_3_2_1_29_1","doi-asserted-by":"crossref","unstructured":"Ziru Liu Shuchang Liu Zijian Zhang Qingpeng Cai Xiangyu Zhao Kesen Zhao Lantao Hu Peng Jiang and Kun Gai. 2024. Sequential Recommendation for Optimizing Both Immediate Feedback and Long-term Retention. In SIGIR.","DOI":"10.1145\/3626772.3657829"},{"key":"e_1_3_2_1_30_1","unstructured":"Fan-Ming Luo Shengyi Jiang Yang Yu Zongzhang Zhang and Yi-Feng Zhang. 2022. Adapt to Environment Sudden Changes by Learning a Context Sensitive Policy. In AAAI."},{"key":"e_1_3_2_1_31_1","unstructured":"Ling Pan Qingpeng Cai and Longbo Huang. 2020. Softmax Deep Double Deterministic Policy Gradients. In NeurIPS."},{"key":"e_1_3_2_1_32_1","unstructured":"Kate Rakelly Aurick Zhou Chelsea Finn Sergey Levine and Deirdre Quillen. 2019. Efficient Off-Policy Meta-Reinforcement Learning via Probabilistic Context Variables. In ICML."},{"key":"e_1_3_2_1_33_1","doi-asserted-by":"crossref","unstructured":"Andrew I Schein Alexandrin Popescul Lyle H Ungar and David M Pennock. 2002. Methods and metrics for cold-start recommendations. In SIGIR.","DOI":"10.1145\/564376.564421"},{"key":"e_1_3_2_1_34_1","doi-asserted-by":"publisher","DOI":"10.5555\/1046920.1088715"},{"key":"e_1_3_2_1_35_1","unstructured":"Jing-Cheng Shi Yang Yu Qing Da Shi-Yong Chen and Anxiang Zeng. 2019. Virtual-Taobao: Virtualizing Real-World Online Retail Environment for Reinforcement Learning. In AAAI."},{"key":"e_1_3_2_1_36_1","doi-asserted-by":"crossref","unstructured":"Harald Steck. 2018. Calibrated recommendations. In RecSys.","DOI":"10.1145\/3240323.3240372"},{"key":"e_1_3_2_1_37_1","volume-title":"Introduction: The challenge of reinforcement learning. Reinforcement Learning","author":"Sutton Richard S","year":"1992","unstructured":"Richard S Sutton. 1992. Introduction: The challenge of reinforcement learning. Reinforcement Learning (1992), 1--3."},{"key":"e_1_3_2_1_38_1","doi-asserted-by":"publisher","DOI":"10.1109\/TNN.1998.712192"},{"key":"e_1_3_2_1_39_1","unstructured":"Adith Swaminathan and Thorsten Joachims. 2015. The Self-Normalized Estimator for Counterfactual Learning. In NIPS."},{"key":"e_1_3_2_1_40_1","volume-title":"RL4RS: A Real-World Benchmark for Reinforcement Learning based Recommender System. CoRR","author":"Wang Kai","year":"2021","unstructured":"Kai Wang, Zhene Zou, Qilin Deng, Yue Shang, Minghao Zhao, Runze Wu, Xudong Shen, Tangjie Lyu, and Changjie Fan. 2021. RL4RS: A Real-World Benchmark for Reinforcement Learning based Recommender System. CoRR, Vol. abs\/2110.11073 (2021)."},{"key":"e_1_3_2_1_41_1","doi-asserted-by":"crossref","unstructured":"Xiaobei Wang Shuchang Liu Xueliang Wang Qingpeng Cai Lantao Hu Han Li Peng Jiang Kun Gai and Guangming Xie. 2024. Future Impact Decomposition in Request-level Recommendations. In KDD.","DOI":"10.1145\/3637528.3671506"},{"key":"e_1_3_2_1_42_1","volume-title":"Chi, and Minmin Chen","author":"Wang Yuyan","year":"2022","unstructured":"Yuyan Wang, Mohit Sharma, Can Xu, Sriraj Badam, Qian Sun, Lee Richardson, Lisa Chung, Ed H. Chi, and Minmin Chen. 2022. Surrogate for Long-Term User Experience in Recommender Systems. In KDD."},{"key":"e_1_3_2_1_43_1","unstructured":"Ruiyang Xu Jalaj Bhandari Dmytro Korenkevych Fan Liu Yuchen He Alex Nikulkov and Zheqing Zhu. 2023. Optimizing Long-term Value for Auction-Based Recommender Systems via On-Policy Reinforcement Learning. In RecSys."},{"key":"e_1_3_2_1_44_1","volume-title":"PrefRec: Preference-based Recommender Systems for Reinforcing Long-term User Engagement. CoRR","author":"Xue Wanqi","year":"2022","unstructured":"Wanqi Xue, Qingpeng Cai, Zhenghai Xue, Shuo Sun, Shuchang Liu, Dong Zheng, Peng Jiang, and Bo An. 2022. PrefRec: Preference-based Recommender Systems for Reinforcing Long-term User Engagement. CoRR, Vol. abs\/2212.02779 (2022)."},{"key":"e_1_3_2_1_45_1","unstructured":"Wanqi Xue Qingpeng Cai Ruohan Zhan Dong Zheng Peng Jiang Kun Gai and Bo An. 2023b. ResAct: Reinforcing Long-term Engagement in Sequential Recommendation with Residual Actor. In ICLR."},{"key":"e_1_3_2_1_46_1","volume-title":"State Regularized Policy Optimization on Data with Dynamics Shift. CoRR","author":"Xue Zhenghai","year":"2023","unstructured":"Zhenghai Xue, Qingpeng Cai, Shuchang Liu, Dong Zheng, Peng Jiang, Kun Gai, and Bo An. 2023a. State Regularized Policy Optimization on Data with Dynamics Shift. CoRR, Vol. abs\/2306.03552 (2023)."},{"key":"e_1_3_2_1_47_1","volume-title":"Roberto Calandra, Yarin Gal, and Sergey Levine.","author":"Zhang Amy","year":"2021","unstructured":"Amy Zhang, Rowan Thomas McAllister, Roberto Calandra, Yarin Gal, and Sergey Levine. 2021. Learning Invariant Representations for Reinforcement Learning without Reconstruction. In ICLR."},{"key":"e_1_3_2_1_48_1","doi-asserted-by":"crossref","unstructured":"Qihua Zhang Junning Liu Yuzhuo Dai Yiyan Qi Yifan Yuan Kunlun Zheng Fan Huang and Xianfeng Tan. 2022. Multi-Task Fusion via Reinforcement Learning for Long-Term User Satisfaction in Recommender Systems. In KDD.","DOI":"10.1145\/3534678.3539040"},{"key":"e_1_3_2_1_49_1","unstructured":"Kesen Zhao Shuchang Liu Qingpeng Cai Xiangyu Zhao Ziru Liu Dong Zheng Peng Jiang and Kun Gai. 2023. KuaiSim: A comprehensive simulator for recommender systems. In NeurIPS."},{"key":"e_1_3_2_1_50_1","doi-asserted-by":"crossref","unstructured":"Xing Zhao Ziwei Zhu Majid Alfifi and James Caverlee. 2020. Addressing the Target Customer Distortion Problem in Recommender Systems. In WWW.","DOI":"10.1145\/3366423.3380065"},{"key":"e_1_3_2_1_51_1","doi-asserted-by":"crossref","unstructured":"Xing Zhao Ziwei Zhu and James Caverlee. 2021. Rabbit Holes and Taste Distortion: Distribution-Aware Recommendation with Evolving Interests. In WWW.","DOI":"10.1145\/3442381.3450099"},{"key":"e_1_3_2_1_52_1","doi-asserted-by":"crossref","unstructured":"Guorui Zhou Na Mou Ying Fan Qi Pi Weijie Bian Chang Zhou Xiaoqiang Zhu and Kun Gai. 2019. Deep interest evolution network for click-through rate prediction. In AAAI.","DOI":"10.1609\/aaai.v33i01.33015941"},{"key":"e_1_3_2_1_53_1","doi-asserted-by":"crossref","unstructured":"Guorui Zhou Xiaoqiang Zhu Chenru Song Ying Fan Han Zhu Xiao Ma Yanghui Yan Junqi Jin Han Li and Kun Gai. 2018. Deep interest network for click-through rate prediction. In KDD.","DOI":"10.1145\/3219819.3219823"},{"key":"e_1_3_2_1_54_1","unstructured":"Luisa M. Zintgraf Kyriacos Shiarlis Maximilian Igl Sebastian Schulze Yarin Gal Katja Hofmann and Shimon Whiteson. 2020. VariBAD: A Very Good Method for Bayes-Adaptive Deep RL via Meta-Learning. In ICLR."},{"key":"e_1_3_2_1_55_1","doi-asserted-by":"crossref","unstructured":"Lixin Zou Long Xia Zhuoye Ding Jiaxing Song Weidong Liu and Dawei Yin. 2019. Reinforcement Learning to Optimize Long-term User Engagement in Recommender Systems. In KDD.","DOI":"10.1145\/3292500.3330668"}],"event":{"name":"WWW '25: The ACM Web Conference 2025","location":"Sydney NSW Australia","acronym":"WWW '25","sponsor":["SIGWEB ACM Special Interest Group on Hypertext, Hypermedia, and Web"]},"container-title":["Proceedings of the ACM on Web Conference 2025"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3696410.3714956","content-type":"unspecified","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3696410.3714956","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,6,19]],"date-time":"2025-06-19T01:18:54Z","timestamp":1750295934000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3696410.3714956"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,4,22]]},"references-count":55,"alternative-id":["10.1145\/3696410.3714956","10.1145\/3696410"],"URL":"https:\/\/doi.org\/10.1145\/3696410.3714956","relation":{},"subject":[],"published":{"date-parts":[[2025,4,22]]},"assertion":[{"value":"2025-04-22","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}