{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,28]],"date-time":"2026-02-28T18:22:56Z","timestamp":1772302976534,"version":"3.50.1"},"publisher-location":"New York, NY, USA","reference-count":63,"publisher":"ACM","content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2025,7,13]]},"DOI":"10.1145\/3726302.3729957","type":"proceedings-article","created":{"date-parts":[[2025,7,14]],"date-time":"2025-07-14T01:18:36Z","timestamp":1752455916000},"page":"1240-1250","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":8,"title":["Parametric Retrieval Augmented Generation"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-8718-9402","authenticated-orcid":false,"given":"Weihang","family":"Su","sequence":"first","affiliation":[{"name":"DCST, Tsinghua University, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0004-0367-5916","authenticated-orcid":false,"given":"Yichen","family":"Tang","sequence":"additional","affiliation":[{"name":"DCST, Tsinghua University, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-5030-709X","authenticated-orcid":false,"given":"Qingyao","family":"Ai","sequence":"additional","affiliation":[{"name":"DCST, Tsinghua University, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0005-2974-7321","authenticated-orcid":false,"given":"Junxi","family":"Yan","sequence":"additional","affiliation":[{"name":"DCST, Tsinghua University, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0009-5460-5279","authenticated-orcid":false,"given":"Changyue","family":"Wang","sequence":"additional","affiliation":[{"name":"DCST, Tsinghua University, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-6524-9195","authenticated-orcid":false,"given":"Hongning","family":"Wang","sequence":"additional","affiliation":[{"name":"DCST, Tsinghua University, Beijing, VA, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-5622-0235","authenticated-orcid":false,"given":"Ziyi","family":"Ye","sequence":"additional","affiliation":[{"name":"DCST, Tsinghua University, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-3530-3787","authenticated-orcid":false,"given":"Yujia","family":"Zhou","sequence":"additional","affiliation":[{"name":"DCST, Tsinghua University, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-0140-4512","authenticated-orcid":false,"given":"Yiqun","family":"Liu","sequence":"additional","affiliation":[{"name":"DCST, Tsinghua University, Beijing, China"}]}],"member":"320","published-online":{"date-parts":[[2025,7,13]]},"reference":[{"key":"e_1_3_2_1_1_1","volume-title":"With greater text comes greater necessity: Inference-time training helps long text generation. arXiv preprint arXiv:2401.11504","year":"2024","unstructured":"2024. With greater text comes greater necessity: Inference-time training helps long text generation. arXiv preprint arXiv:2401.11504 (2024)."},{"key":"e_1_3_2_1_2_1","volume-title":"Knowledge Storage and Extraction. In Forty-first International Conference on Machine Learning.","author":"Allen-Zhu Zeyuan","unstructured":"Zeyuan Allen-Zhu and Yuanzhi Li. [n.d.]. Physics of Language Models: Part 3.1, Knowledge Storage and Extraction. In Forty-first International Conference on Machine Learning."},{"key":"e_1_3_2_1_3_1","volume-title":"The Twelfth International Conference on Learning Representations.","author":"Asai Akari","unstructured":"Akari Asai, ZeqiuWu, YizhongWang, Avirup Sil, and Hannaneh Hajishirzi. [n.d.]. Self-RAG: Learning to Retrieve, Generate, and Critique through Self-Reflection. In The Twelfth International Conference on Learning Representations."},{"key":"e_1_3_2_1_4_1","volume-title":"Probing-RAG: Self-Probing to Guide Language Models in Selective Document Retrieval. arXiv preprint arXiv:2410.13339","author":"Baek Ingeol","year":"2024","unstructured":"Ingeol Baek, Hwan Chang, Byeongjeong Kim, Jimin Lee, and Hwanhee Lee. 2024. Probing-RAG: Self-Probing to Guide Language Models in Selective Document Retrieval. arXiv preprint arXiv:2410.13339 (2024)."},{"key":"e_1_3_2_1_5_1","volume-title":"International conference on machine learning. PMLR, 2206-2240","author":"Borgeaud Sebastian","year":"2022","unstructured":"Sebastian Borgeaud, Arthur Mensch, Jordan Hoffmann, Trevor Cai, Eliza Rutherford, Katie Millican, George Bm Van Den Driessche, Jean-Baptiste Lespiau, Bogdan Damoc, Aidan Clark, et al. 2022. Improving language models by retrieving from trillions of tokens. In International conference on machine learning. PMLR, 2206-2240."},{"key":"e_1_3_2_1_6_1","unstructured":"Tom Brown Benjamin Mann Nick Ryder Melanie Subbiah Jared D Kaplan Prafulla Dhariwal Arvind Neelakantan Pranav Shyam Girish Sastry Amanda Askell et al. 2020. Language models are few-shot learners. Advances in neural information processing systems 33 (2020) 1877-1901."},{"key":"e_1_3_2_1_7_1","volume-title":"Charles Sutton, Sebastian Gehrmann, et al.","author":"Chowdhery Aakanksha","year":"2022","unstructured":"Aakanksha Chowdhery, Sharan Narang, Jacob Devlin, Maarten Bosma, Gaurav Mishra, Adam Roberts, Paul Barham, Hyung Won Chung, Charles Sutton, Sebastian Gehrmann, et al. 2022. Palm: Scaling language modeling with pathways. arXiv preprint arXiv:2204.02311 (2022)."},{"key":"e_1_3_2_1_8_1","volume-title":"How abilities in large language models are affected by supervised fine-tuning data composition. arXiv preprint arXiv:2310.05492","author":"Dong Guanting","year":"2023","unstructured":"Guanting Dong, Hongyi Yuan, Keming Lu, Chengpeng Li, Mingfeng Xue, Dayiheng Liu, Wei Wang, Zheng Yuan, Chang Zhou, and Jingren Zhou. 2023. How abilities in large language models are affected by supervised fine-tuning data composition. arXiv preprint arXiv:2310.05492 (2023)."},{"key":"e_1_3_2_1_9_1","doi-asserted-by":"publisher","DOI":"10.1145\/3696410.3714608"},{"key":"e_1_3_2_1_10_1","doi-asserted-by":"publisher","DOI":"10.1145\/3583780.3614923"},{"key":"e_1_3_2_1_11_1","volume-title":"From local to global: A graph rag approach to query-focused summarization. arXiv preprint arXiv:2404.16130","author":"Edge Darren","year":"2024","unstructured":"Darren Edge, Ha Trinh, Newman Cheng, Joshua Bradley, Alex Chao, Apurva Mody, Steven Truitt, and Jonathan Larson. 2024. From local to global: A graph rag approach to query-focused summarization. arXiv preprint arXiv:2404.16130 (2024)."},{"key":"e_1_3_2_1_12_1","doi-asserted-by":"publisher","DOI":"10.1145\/3626772.3657743"},{"key":"e_1_3_2_1_13_1","volume-title":"International conference on machine learning. PMLR, 3929-3938","author":"Guu Kelvin","year":"2020","unstructured":"Kelvin Guu, Kenton Lee, Zora Tung, Panupong Pasupat, and Mingwei Chang. 2020. Retrieval augmented language model pre-training. In International conference on machine learning. PMLR, 3929-3938."},{"key":"e_1_3_2_1_14_1","volume-title":"Saku Sugawara, and Akiko Aizawa.","author":"Ho Xanh","year":"2020","unstructured":"Xanh Ho, Anh-Khoa Duong Nguyen, Saku Sugawara, and Akiko Aizawa. 2020. Constructing a multi-hop QA dataset for comprehensive evaluation of reasoning steps. arXiv preprint arXiv:2011.01060 (2020)."},{"key":"e_1_3_2_1_15_1","volume-title":"LoRA: Low-Rank Adaptation of Large Language Models. In International Conference on Learning Representations.","author":"Hu Edward J","year":"2022","unstructured":"Edward J Hu, Phillip Wallis, Zeyuan Allen-Zhu, Yuanzhi Li, Shean Wang, Lu Wang,Weizhu Chen, et al. 2022. LoRA: Low-Rank Adaptation of Large Language Models. In International Conference on Learning Representations."},{"key":"e_1_3_2_1_16_1","volume-title":"GRAG: Graph Retrieval-Augmented Generation. arXiv preprint arXiv:2405.16506","author":"Hu Yuntong","year":"2024","unstructured":"Yuntong Hu, Zhihan Lei, Zheng Zhang, Bo Pan, Chen Ling, and Liang Zhao. 2024. GRAG: Graph Retrieval-Augmented Generation. arXiv preprint arXiv:2405.16506 (2024)."},{"key":"e_1_3_2_1_17_1","volume-title":"Leveraging passage retrieval with generative models for open domain question answering. arXiv preprint arXiv:2007.01282","author":"Izacard Gautier","year":"2020","unstructured":"Gautier Izacard and Edouard Grave. 2020. Leveraging passage retrieval with generative models for open domain question answering. arXiv preprint arXiv:2007.01282 (2020)."},{"key":"e_1_3_2_1_18_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2024.naacl-long.389"},{"key":"e_1_3_2_1_19_1","volume-title":"Retrieval as attention: End-to-end learning of retrieval and reading within a single transformer. arXiv preprint arXiv:2212.02027","author":"Jiang Zhengbao","year":"2022","unstructured":"Zhengbao Jiang, Luyu Gao, Jun Araki, Haibo Ding, Zhiruo Wang, Jamie Callan, and Graham Neubig. 2022. Retrieval as attention: End-to-end learning of retrieval and reading within a single transformer. arXiv preprint arXiv:2212.02027 (2022)."},{"key":"e_1_3_2_1_20_1","volume-title":"Active retrieval augmented generation. arXiv preprint arXiv:2305.06983","author":"Jiang Zhengbao","year":"2023","unstructured":"Zhengbao Jiang, Frank F Xu, Luyu Gao, Zhiqing Sun, Qian Liu, Jane Dwivedi-Yu, Yiming Yang, Jamie Callan, and Graham Neubig. 2023. Active retrieval augmented generation. arXiv preprint arXiv:2305.06983 (2023)."},{"key":"e_1_3_2_1_21_1","volume-title":"Dense passage retrieval for opendomain question answering. arXiv preprint arXiv:2004.04906","author":"Karpukhin Vladimir","year":"2020","unstructured":"Vladimir Karpukhin, Barlas Oguz, Sewon Min, Patrick Lewis, Ledell Wu, Sergey Edunov, Danqi Chen, and Wen-tau Yih. 2020. Dense passage retrieval for opendomain question answering. arXiv preprint arXiv:2004.04906 (2020)."},{"key":"e_1_3_2_1_22_1","volume-title":"Bridging the preference gap between retrievers and llms. arXiv preprint arXiv:2401.06954","author":"Ke Zixuan","year":"2024","unstructured":"Zixuan Ke, Weize Kong, Cheng Li, Mingyang Zhang, Qiaozhu Mei, and Michael Bendersky. 2024. Bridging the preference gap between retrievers and llms. arXiv preprint arXiv:2401.06954 (2024)."},{"key":"e_1_3_2_1_23_1","volume-title":"Same task, more tokens: the impact of input length on the reasoning performance of large language models. arXiv preprint arXiv:2402.14848","author":"Levy Mosh","year":"2024","unstructured":"Mosh Levy, Alon Jacoby, and Yoav Goldberg. 2024. Same task, more tokens: the impact of input length on the reasoning performance of large language models. arXiv preprint arXiv:2402.14848 (2024)."},{"key":"e_1_3_2_1_24_1","first-page":"9459","article-title":"Retrieval-augmented generation for knowledge-intensive nlp tasks","volume":"33","author":"Lewis Patrick","year":"2020","unstructured":"Patrick Lewis, Ethan Perez, Aleksandra Piktus, Fabio Petroni, Vladimir Karpukhin, Naman Goyal, Heinrich K\u00fcttler, Mike Lewis, Wen-tau Yih, Tim Rockt\u00e4schel, et al. 2020. Retrieval-augmented generation for knowledge-intensive nlp tasks. Advances in Neural Information Processing Systems 33 (2020), 9459-9474.","journal-title":"Advances in Neural Information Processing Systems"},{"key":"e_1_3_2_1_25_1","volume-title":"Towards better web search performance: pre-training, fine-tuning and learning to rank. arXiv preprint arXiv:2303.04710","author":"Li Haitao","year":"2023","unstructured":"Haitao Li, Jia Chen, Weihang Su, Qingyao Ai, and Yiqun Liu. 2023. Towards better web search performance: pre-training, fine-tuning and learning to rank. arXiv preprint arXiv:2303.04710 (2023)."},{"key":"e_1_3_2_1_26_1","volume-title":"Cong Zhang, and Yong Liu.","author":"Liu Huanshuo","year":"2024","unstructured":"Huanshuo Liu, Hao Zhang, Zhijiang Guo, Kuicai Dong, Xiangyang Li, Yi Quan Lee, Cong Zhang, and Yong Liu. 2024. CtrlA: Adaptive Retrieval-Augmented Generation via Probe-Guided Control. arXiv preprint arXiv:2405.18727 (2024)."},{"key":"e_1_3_2_1_27_1","doi-asserted-by":"publisher","DOI":"10.1162\/tacl_a_00638"},{"key":"e_1_3_2_1_28_1","volume-title":"CaseEncoder: A Knowledge-enhanced Pre-trained Model for Legal Case Encoding. arXiv preprint arXiv:2305.05393","author":"Ma Yixiao","year":"2023","unstructured":"Yixiao Ma, Yueyue Wu, Weihang Su, Qingyao Ai, and Yiqun Liu. 2023. CaseEncoder: A Knowledge-enhanced Pre-trained Model for Legal Case Encoding. arXiv preprint arXiv:2305.05393 (2023)."},{"key":"e_1_3_2_1_29_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.acl-long.546"},{"key":"e_1_3_2_1_30_1","unstructured":"Meta. 2024. Llama-3.2-1B-Instruct. https:\/\/huggingface.co\/meta-llama\/Llama-3.2-1B-Instruct Accessed: 2024-09."},{"key":"e_1_3_2_1_31_1","unstructured":"Meta. 2024. Meta-Llama-3-8B-Instruct. https:\/\/huggingface.co\/meta-llama\/Meta- Llama-3-8B-Instruct Accessed: 2024-04."},{"key":"e_1_3_2_1_32_1","unstructured":"Neel Nanda Senthooran Rajamanoharan J\u00e1nos Kram\u00e1r and Rohin Shah. 2023. Fact Finding: Attempting to Reverse-Engineer Factual Recall on the Neuron Level. https:\/\/www.lesswrong.com\/posts\/iGuwZTHWb6DFY3sKB\/fact-findingattempting-to-reverse-engineer-factual-recall Accessed: 2025-01-24."},{"key":"e_1_3_2_1_33_1","volume-title":"Graph retrieval-augmented generation: A survey. arXiv preprint arXiv:2408.08921","author":"Peng Boci","year":"2024","unstructured":"Boci Peng, Yun Zhu, Yongchao Liu, Xiaohe Bo, Haizhou Shi, Chuntao Hong, Yan Zhang, and Siliang Tang. 2024. Graph retrieval-augmented generation: A survey. arXiv preprint arXiv:2408.08921 (2024)."},{"key":"e_1_3_2_1_34_1","volume-title":"In-context retrieval-augmented language models. arXiv preprint arXiv:2302.00083","author":"Ram Ori","year":"2023","unstructured":"Ori Ram, Yoav Levine, Itay Dalmedigos, Dor Muhlgay, Amnon Shashua, Kevin Leyton-Brown, and Yoav Shoham. 2023. In-context retrieval-augmented language models. arXiv preprint arXiv:2302.00083 (2023)."},{"key":"e_1_3_2_1_35_1","doi-asserted-by":"crossref","unstructured":"Stephen Robertson Hugo Zaragoza et al. 2009. The probabilistic relevance framework: BM25 and beyond. Foundations and Trends\u00ae in Information Retrieval 3 4 (2009) 333-389.","DOI":"10.1561\/1500000019"},{"key":"e_1_3_2_1_36_1","doi-asserted-by":"publisher","DOI":"10.1145\/3626772.3657733"},{"key":"e_1_3_2_1_37_1","volume-title":"Fran\u00e7ois Yvon, Matthias Gall\u00e9, et al.","author":"Scao Teven Le","year":"2022","unstructured":"Teven Le Scao, Angela Fan, Christopher Akiki, Ellie Pavlick, Suzana Ilic, Daniel Hesslow, Roman Castagn\u00e9, Alexandra Sasha Luccioni, Fran\u00e7ois Yvon, Matthias Gall\u00e9, et al. 2022. Bloom: A 176b-parameter open-access multilingual language model. arXiv preprint arXiv:2211.05100 (2022)."},{"key":"e_1_3_2_1_38_1","volume-title":"Replug: Retrieval-augmented black-box language models. arXiv preprint arXiv:2301.12652","author":"Shi Weijia","year":"2023","unstructured":"Weijia Shi, Sewon Min, Michihiro Yasunaga, Minjoon Seo, Rich James, Mike Lewis, Luke Zettlemoyer, and Wen-tau Yih. 2023. Replug: Retrieval-augmented black-box language models. arXiv preprint arXiv:2301.12652 (2023)."},{"key":"e_1_3_2_1_39_1","doi-asserted-by":"publisher","DOI":"10.1145\/331403.331405"},{"key":"e_1_3_2_1_40_1","unstructured":"Tim Soulo. 2023. 96.55% of Content Gets No Traffic From Google. Here's How to Be in the Other 3.45% [New Research for 2023]. https:\/\/ahrefs.com\/blog\/searchtraffic-study\/ Accessed: 2025-01-24."},{"key":"e_1_3_2_1_41_1","volume-title":"Wikiformer: Pre-training with Structured Information of Wikipedia for Ad-hoc Retrieval. arXiv preprint arXiv:2312.10661","author":"Su Weihang","year":"2023","unstructured":"Weihang Su, Qingyao Ai, Xiangsheng Li, Jia Chen, Yiqun Liu, Xiaolong Wu, and Shengluan Hou. 2023. Wikiformer: Pre-training with Structured Information of Wikipedia for Ad-hoc Retrieval. arXiv preprint arXiv:2312.10661 (2023)."},{"key":"e_1_3_2_1_42_1","volume-title":"Caseformer: Pre-training for Legal Case Retrieval. arXiv preprint arXiv:2311.00333","author":"Su Weihang","year":"2023","unstructured":"Weihang Su, Qingyao Ai, Yueyue Wu, Yixiao Ma, Haitao Li, and Yiqun Liu. 2023. Caseformer: Pre-training for Legal Case Retrieval. arXiv preprint arXiv:2311.00333 (2023)."},{"key":"e_1_3_2_1_43_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2024.findings-emnlp.625"},{"key":"e_1_3_2_1_44_1","volume-title":"Thuir2 at ntcir-16 session search (ss) task. arXiv preprint arXiv:2307.00250","author":"Su Weihang","year":"2023","unstructured":"Weihang Su, Xiangsheng Li, Yiqun Liu, Min Zhang, and Shaoping Ma. 2023. Thuir2 at ntcir-16 session search (ss) task. arXiv preprint arXiv:2307.00250 (2023)."},{"key":"e_1_3_2_1_45_1","doi-asserted-by":"publisher","DOI":"10.1145\/3673791.3698403"},{"key":"e_1_3_2_1_46_1","doi-asserted-by":"publisher","unstructured":"Weihang Su Yichen Tang Qingyao Ai ZhijingWu and Yiqun Liu. 2024. DRAGIN: Dynamic Retrieval Augmented Generation based on the Real-time Information Needs of Large Language Models. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers) Lun-Wei Ku Andre Martins and Vivek Srikumar (Eds.). Association for Computational Linguistics Bangkok Thailand 12991-13013. https:\/\/doi.org\/10.18653\/v1\/2024. acl-long.702","DOI":"10.18653\/v1\/2024"},{"key":"e_1_3_2_1_47_1","volume-title":"Unsupervised real-time hallucination detection based on the internal states of large language models. arXiv preprint arXiv:2403.06448","author":"Su Weihang","year":"2024","unstructured":"Weihang Su, Changyue Wang, Qingyao Ai, Yiran Hu, Zhijing Wu, Yujia Zhou, and Yiqun Liu. 2024. Unsupervised real-time hallucination detection based on the internal states of large language models. arXiv preprint arXiv:2403.06448 (2024)."},{"key":"e_1_3_2_1_48_1","doi-asserted-by":"publisher","DOI":"10.1145\/3726302.3730295"},{"key":"e_1_3_2_1_49_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/N18-1059"},{"key":"e_1_3_2_1_50_1","volume-title":"Llama: Open and efficient foundation language models. arXiv preprint arXiv:2302.13971","author":"Touvron Hugo","year":"2023","unstructured":"Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timoth\u00e9e Lacroix, Baptiste Rozi\u00e8re, Naman Goyal, Eric Hambro, Faisal Azhar, et al. 2023. Llama: Open and efficient foundation language models. arXiv preprint arXiv:2302.13971 (2023)."},{"key":"e_1_3_2_1_51_1","volume-title":"Interleaving retrieval with chain-of-thought reasoning for knowledgeintensive multi-step questions. arXiv preprint arXiv:2212.10509","author":"Trivedi Harsh","year":"2022","unstructured":"Harsh Trivedi, Niranjan Balasubramanian, Tushar Khot, and Ashish Sabharwal. 2022. Interleaving retrieval with chain-of-thought reasoning for knowledgeintensive multi-step questions. arXiv preprint arXiv:2212.10509 (2022)."},{"key":"e_1_3_2_1_52_1","volume-title":"RbFT: Robust Fine-tuning for Retrieval-Augmented Generation against Retrieval Defects. arXiv preprint arXiv:2501.18365","author":"Tu Yiteng","year":"2025","unstructured":"Yiteng Tu, Weihang Su, Yujia Zhou, Yiqun Liu, and Qingyao Ai. 2025. RbFT: Robust Fine-tuning for Retrieval-Augmented Generation against Retrieval Defects. arXiv preprint arXiv:2501.18365 (2025)."},{"key":"e_1_3_2_1_53_1","volume-title":"LeKUBE: A Legal Knowledge Update BEnchmark. arXiv preprint arXiv:2407.14192","author":"Wang Changyue","year":"2024","unstructured":"Changyue Wang, Weihang Su, Hu Yiran, Qingyao Ai, Yueyue Wu, Cheng Luo, Yiqun Liu, Min Zhang, and Shaoping Ma. 2024. LeKUBE: A Legal Knowledge Update BEnchmark. arXiv preprint arXiv:2407.14192 (2024)."},{"key":"e_1_3_2_1_54_1","volume-title":"Self-knowledge guided retrieval augmentation for large language models. arXiv preprint arXiv:2310.05002","author":"Wang Yile","year":"2023","unstructured":"Yile Wang, Peng Li, Maosong Sun, and Yang Liu. 2023. Self-knowledge guided retrieval augmentation for large language models. arXiv preprint arXiv:2310.05002 (2023)."},{"key":"e_1_3_2_1_55_1","volume-title":"Rat: Retrieval augmented thoughts elicit context-aware reasoning in long-horizon generation. arXiv preprint arXiv:2403.05313","author":"Wang Zihao","year":"2024","unstructured":"Zihao Wang, Anji Liu, Haowei Lin, Jiaqi Li, Xiaojian Ma, and Yitao Liang. 2024. Rat: Retrieval augmented thoughts elicit context-aware reasoning in long-horizon generation. arXiv preprint arXiv:2403.05313 (2024)."},{"key":"e_1_3_2_1_56_1","volume-title":"Continual learning for large language models: A survey. arXiv preprint arXiv:2402.01364","author":"Wu Tongtong","year":"2024","unstructured":"Tongtong Wu, Linhao Luo, Yuan-Fang Li, Shirui Pan, Thuy-Trang Vu, and Gholamreza Haffari. 2024. Continual learning for large language models: A survey. arXiv preprint arXiv:2402.01364 (2024)."},{"key":"e_1_3_2_1_57_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.acl-long.875"},{"key":"e_1_3_2_1_58_1","unstructured":"An Yang Baosong Yang Beichen Zhang Binyuan Hui Bo Zheng Bowen Yu Chengyuan Li Dayiheng Liu Fei Huang Haoran Wei et al. 2024. Qwen2. 5 Technical Report. arXiv preprint arXiv:2412.15115 (2024)."},{"key":"e_1_3_2_1_59_1","volume-title":"HotpotQA: A dataset for diverse, explainable multi-hop question answering. arXiv preprint arXiv:1809.09600","author":"Yang Zhilin","year":"2018","unstructured":"Zhilin Yang, Peng Qi, Saizheng Zhang, Yoshua Bengio, WilliamWCohen, Ruslan Salakhutdinov, and Christopher D Manning. 2018. HotpotQA: A dataset for diverse, explainable multi-hop question answering. arXiv preprint arXiv:1809.09600 (2018)."},{"key":"e_1_3_2_1_60_1","volume-title":"Seakr: Self-aware knowledge retrieval for adaptive retrieval augmented generation. arXiv preprint arXiv:2406.19215","author":"Yao Zijun","year":"2024","unstructured":"Zijun Yao, Weijian Qi, Liangming Pan, Shulin Cao, Linmei Hu, Weichuan Liu, Lei Hou, and Juanzi Li. 2024. Seakr: Self-aware knowledge retrieval for adaptive retrieval augmented generation. arXiv preprint arXiv:2406.19215 (2024)."},{"key":"e_1_3_2_1_61_1","volume-title":"Rankrag: Unifying context ranking with retrieval-augmented generation in llms. arXiv preprint arXiv:2407.02485","author":"Yu Yue","year":"2024","unstructured":"Yue Yu,Wei Ping, Zihan Liu, BoxinWang, Jiaxuan You, Chao Zhang, Mohammad Shoeybi, and Bryan Catanzaro. 2024. Rankrag: Unifying context ranking with retrieval-augmented generation in llms. arXiv preprint arXiv:2407.02485 (2024)."},{"key":"e_1_3_2_1_62_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2024.emnlp-main.191"},{"key":"e_1_3_2_1_63_1","volume-title":"Statistical language models for information retrieval. Synthesis lectures on human language technologies 1, 1","author":"Zhai ChengXiang","year":"2008","unstructured":"ChengXiang Zhai. 2008. Statistical language models for information retrieval. Synthesis lectures on human language technologies 1, 1 (2008), 1-141."}],"event":{"name":"SIGIR '25: The 48th International ACM SIGIR Conference on Research and Development in Information Retrieval","location":"Padua Italy","acronym":"SIGIR '25","sponsor":["SIGIR ACM Special Interest Group on Information Retrieval"]},"container-title":["Proceedings of the 48th International ACM SIGIR Conference on Research and Development in Information Retrieval"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3726302.3729957","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,8,22]],"date-time":"2025-08-22T18:30:27Z","timestamp":1755887427000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3726302.3729957"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,7,13]]},"references-count":63,"alternative-id":["10.1145\/3726302.3729957","10.1145\/3726302"],"URL":"https:\/\/doi.org\/10.1145\/3726302.3729957","relation":{},"subject":[],"published":{"date-parts":[[2025,7,13]]},"assertion":[{"value":"2025-07-13","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}