{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,14]],"date-time":"2026-02-14T10:23:04Z","timestamp":1771064584503,"version":"3.50.1"},"publisher-location":"New York, NY, USA","reference-count":69,"publisher":"ACM","license":[{"start":{"date-parts":[[2024,7,10]],"date-time":"2024-07-10T00:00:00Z","timestamp":1720569600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by\/4.0\/"}],"funder":[{"name":"Quan Cheng Laboratory","award":["QCLZD202301"],"award-info":[{"award-number":["QCLZD202301"]}]}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2024,7,10]]},"DOI":"10.1145\/3626772.3657689","type":"proceedings-article","created":{"date-parts":[[2024,7,11]],"date-time":"2024-07-11T12:40:05Z","timestamp":1720701605000},"page":"48-58","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":7,"title":["Unsupervised Large Language Model Alignment for Information Retrieval via Contrastive Feedback"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-6858-5303","authenticated-orcid":false,"given":"Qian","family":"Dong","sequence":"first","affiliation":[{"name":"DCST, Tsinghua University, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-6857-261X","authenticated-orcid":false,"given":"Yiding","family":"Liu","sequence":"additional","affiliation":[{"name":"Baidu Inc., Beijing, Singapore"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-5030-709X","authenticated-orcid":false,"given":"Qingyao","family":"Ai","sequence":"additional","affiliation":[{"name":"Quan Cheng Laboratory &amp; DCST, Tsinghua University, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-2473-3746","authenticated-orcid":false,"given":"Zhijing","family":"Wu","sequence":"additional","affiliation":[{"name":"School of Computer Science and Technology, Beijing Institute of Technology, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0006-8766-8610","authenticated-orcid":false,"given":"Haitao","family":"Li","sequence":"additional","affiliation":[{"name":"DCST, Tsinghua University, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-0140-4512","authenticated-orcid":false,"given":"Yiqun","family":"Liu","sequence":"additional","affiliation":[{"name":"DCST, Tsinghua University, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-9212-1947","authenticated-orcid":false,"given":"Shuaiqiang","family":"Wang","sequence":"additional","affiliation":[{"name":"Baidu Inc., Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-0684-6205","authenticated-orcid":false,"given":"Dawei","family":"Yin","sequence":"additional","affiliation":[{"name":"Baidu Inc., Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-8762-8268","authenticated-orcid":false,"given":"Shaoping","family":"Ma","sequence":"additional","affiliation":[{"name":"DCST, Tsinghua University, Beijing, China"}]}],"member":"320","published-online":{"date-parts":[[2024,7,11]]},"reference":[{"key":"e_1_3_2_1_1_1","unstructured":"Yuntao Bai Saurav Kadavath Sandipan Kundu Amanda Askell Jackson Kernion Andy Jones Anna Chen Anna Goldie Azalia Mirhoseini Cameron McKinnon et al. 2022. Constitutional ai: Harmlessness from ai feedback. arXiv preprint arXiv:2212.08073 (2022)."},{"key":"e_1_3_2_1_2_1","volume-title":"Better rewards yield better summaries: Learning to summarise without references. arXiv preprint arXiv:1909.01214","author":"B\u00f6hm Florian","year":"2019","unstructured":"Florian B\u00f6hm, Yang Gao, Christian M Meyer, Ori Shapira, Ido Dagan, and Iryna Gurevych. 2019. Better rewards yield better summaries: Learning to summarise without references. arXiv preprint arXiv:1909.01214 (2019)."},{"key":"e_1_3_2_1_3_1","volume-title":"InPars: Data Augmentation for Information Retrieval using Large Language Models. arXiv preprint arXiv:2202.05144","author":"Bonifacio Luiz","year":"2022","unstructured":"Luiz Bonifacio, Hugo Abonizio, Marzieh Fadaee, and Rodrigo Nogueira. 2022. InPars: Data Augmentation for Information Retrieval using Large Language Models. arXiv preprint arXiv:2202.05144 (2022)."},{"key":"e_1_3_2_1_4_1","unstructured":"Mrinmoi Borah Pankaj Dadure Partha Pakray et al. 2022. Comparative analysis of T5 model for abstractive text summarization on different datasets. (2022)."},{"key":"e_1_3_2_1_5_1","volume-title":"Unsupervised opinion summarization as copycat-review generation. arXiv preprint arXiv:1911.02247","author":"Bravzinskas Arthur","year":"2019","unstructured":"Arthur Bravzinskas, Mirella Lapata, and Ivan Titov. 2019. Unsupervised opinion summarization as copycat-review generation. arXiv preprint arXiv:1911.02247 (2019)."},{"key":"e_1_3_2_1_6_1","unstructured":"Tom Brown Benjamin Mann Nick Ryder Melanie Subbiah Jared D Kaplan Prafulla Dhariwal Arvind Neelakantan Pranav Shyam Girish Sastry Amanda Askell et al. 2020. Language models are few-shot learners. Advances in neural information processing systems Vol. 33 (2020) 1877--1901."},{"key":"e_1_3_2_1_7_1","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v29i1.9490"},{"key":"e_1_3_2_1_8_1","doi-asserted-by":"publisher","DOI":"10.1145\/3477495.3531943"},{"key":"e_1_3_2_1_9_1","volume-title":"International conference on machine learning. PMLR, 1597--1607","author":"Chen Ting","year":"2020","unstructured":"Ting Chen, Simon Kornblith, Mohammad Norouzi, and Geoffrey Hinton. 2020. A simple framework for contrastive learning of visual representations. In International conference on machine learning. PMLR, 1597--1607."},{"key":"e_1_3_2_1_10_1","volume-title":"Layout-aware Webpage Quality Assessment. arXiv preprint arXiv:2301.12152","author":"Cheng Anfeng","year":"2023","unstructured":"Anfeng Cheng, Yiding Liu, Weibin Li, Qian Dong, Shuaiqiang Wang, Zhengjie Huang, Shikun Feng, Zhicong Cheng, and Dawei Yin. 2023. Layout-aware Webpage Quality Assessment. arXiv preprint arXiv:2301.12152 (2023)."},{"key":"e_1_3_2_1_11_1","unstructured":"Hyung Won Chung Le Hou Shayne Longpre Barret Zoph Yi Tay William Fedus Eric Li Xuezhi Wang Mostafa Dehghani Siddhartha Brahma et al. 2022. Scaling instruction-finetuned language models. arXiv preprint arXiv:2210.11416 (2022)."},{"key":"e_1_3_2_1_12_1","volume-title":"Trung Bui, Seokhwan Kim, Walter Chang, and Nazli Goharian.","author":"Cohan Arman","year":"2018","unstructured":"Arman Cohan, Franck Dernoncourt, Doo Soon Kim, Trung Bui, Seokhwan Kim, Walter Chang, and Nazli Goharian. 2018. A discourse-aware attention model for abstractive summarization of long documents. arXiv preprint arXiv:1804.05685 (2018)."},{"key":"e_1_3_2_1_13_1","volume-title":"Chataug: Leveraging chatgpt for text data augmentation. arXiv preprint arXiv:2302.13007","author":"Dai Haixing","year":"2023","unstructured":"Haixing Dai, Zhengliang Liu, Wenxiong Liao, Xiaoke Huang, Zihao Wu, Lin Zhao, Wei Liu, Ninghao Liu, Sheng Li, Dajiang Zhu, et al. 2023a. Chataug: Leveraging chatgpt for text data augmentation. arXiv preprint arXiv:2302.13007 (2023)."},{"key":"e_1_3_2_1_14_1","volume-title":"Safe rlhf: Safe reinforcement learning from human feedback. arXiv preprint arXiv:2310.12773","author":"Dai Josef","year":"2023","unstructured":"Josef Dai, Xuehai Pan, Ruiyang Sun, Jiaming Ji, Xinbo Xu, Mickel Liu, Yizhou Wang, and Yaodong Yang. 2023b. Safe rlhf: Safe reinforcement learning from human feedback. arXiv preprint arXiv:2310.12773 (2023)."},{"key":"e_1_3_2_1_15_1","volume-title":"Promptagator: Few-shot dense retrieval from 8 examples. arXiv preprint arXiv:2209.11755","author":"Dai Zhuyun","year":"2022","unstructured":"Zhuyun Dai, Vincent Y Zhao, Ji Ma, Yi Luan, Jianmo Ni, Jing Lu, Anton Bakalov, Kelvin Guu, Keith B Hall, and Ming-Wei Chang. 2022. Promptagator: Few-shot dense retrieval from 8 examples. arXiv preprint arXiv:2209.11755 (2022)."},{"key":"e_1_3_2_1_16_1","volume-title":"Bert: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805","author":"Devlin Jacob","year":"2018","unstructured":"Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2018. Bert: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805 (2018)."},{"key":"e_1_3_2_1_17_1","volume-title":"Incorporating Implicit Interaction in Pre-trained Language Models for Passage Retrieval. arXiv preprint arXiv:2306.02371","author":"Dong Qian","year":"2023","unstructured":"Qian Dong, Yiding Liu, Qingyao Ai, Haitao Li, Shuaiqiang Wang, Yiqun Liu, Dawei Yin, and Shaoping Ma. 2023. I^ 3 Retriever: Incorporating Implicit Interaction in Pre-trained Language Models for Passage Retrieval. arXiv preprint arXiv:2306.02371 (2023)."},{"key":"e_1_3_2_1_18_1","volume-title":"Incorporating Explicit Knowledge in Pre-trained Language Models for Passage Re-ranking. arXiv preprint arXiv:2204.11673","author":"Dong Qian","year":"2022","unstructured":"Qian Dong, Yiding Liu, Suqi Cheng, Shuaiqiang Wang, Zhicong Cheng, Shuzi Niu, and Dawei Yin. 2022a. Incorporating Explicit Knowledge in Pre-trained Language Models for Passage Re-ranking. arXiv preprint arXiv:2204.11673 (2022)."},{"key":"e_1_3_2_1_19_1","volume-title":"Latent Graph Recurrent Network for Document Ranking. In International Conference on Database Systems for Advanced Applications. Springer, 88--103","author":"Dong Qian","year":"2021","unstructured":"Qian Dong and Shuzi Niu. 2021a. Latent Graph Recurrent Network for Document Ranking. In International Conference on Database Systems for Advanced Applications. Springer, 88--103."},{"key":"e_1_3_2_1_20_1","doi-asserted-by":"publisher","DOI":"10.1145\/3404835.3462931"},{"key":"e_1_3_2_1_21_1","doi-asserted-by":"publisher","DOI":"10.1007\/s41019-022-00179-3"},{"key":"e_1_3_2_1_22_1","unstructured":"Mehrdad Farahani. 2020. Summarization using Bert2Bert model on WikiSummary dataset. https:\/\/github.com\/m3hrdadfi\/wiki-summary."},{"key":"e_1_3_2_1_23_1","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-28238-6_31"},{"key":"e_1_3_2_1_24_1","volume-title":"Lcsts: A large scale chinese short text summarization dataset. arXiv preprint arXiv:1506.05865","author":"Hu Baotian","year":"2015","unstructured":"Baotian Hu, Qingcai Chen, and Fangze Zhu. 2015. Lcsts: A large scale chinese short text summarization dataset. arXiv preprint arXiv:1506.05865 (2015)."},{"key":"e_1_3_2_1_25_1","volume-title":"Citation: A key to building responsible and accountable large language models. arXiv preprint arXiv:2307.02185","author":"Huang Jie","year":"2023","unstructured":"Jie Huang and Kevin Chen-Chuan Chang. 2023. Citation: A key to building responsible and accountable large language models. arXiv preprint arXiv:2307.02185 (2023)."},{"key":"e_1_3_2_1_26_1","volume-title":"Unsupervised dense information retrieval with contrastive learning. arXiv preprint arXiv:2112.09118","author":"Izacard Gautier","year":"2021","unstructured":"Gautier Izacard, Mathilde Caron, Lucas Hosseini, Sebastian Riedel, Piotr Bojanowski, Armand Joulin, and Edouard Grave. 2021. Unsupervised dense information retrieval with contrastive learning. arXiv preprint arXiv:2112.09118 (2021)."},{"key":"e_1_3_2_1_27_1","volume-title":"Mill: Mutual verification with large language models for zero-shot query expansion. arXiv preprint arXiv:2310.19056","author":"Jia Pengyue","year":"2023","unstructured":"Pengyue Jia, Yiding Liu, Xiangyu Zhao, Xiaopeng Li, Changying Hao, Shuaiqiang Wang, and Dawei Yin. 2023. Mill: Mutual verification with large language models for zero-shot query expansion. arXiv preprint arXiv:2310.19056 (2023)."},{"key":"e_1_3_2_1_28_1","volume-title":"An empirical survey on long document summarization: Datasets, models, and metrics. ACM computing surveys","author":"Koh Huan Yee","year":"2022","unstructured":"Huan Yee Koh, Jiaxin Ju, Ming Liu, and Shirui Pan. 2022. An empirical survey on long document summarization: Datasets, models, and metrics. ACM computing surveys, Vol. 55, 8 (2022), 1--35."},{"key":"e_1_3_2_1_29_1","volume-title":"Automated Summarization of Stack Overflow Posts. arXiv preprint arXiv:2305.16680","author":"Kou Bonan","year":"2023","unstructured":"Bonan Kou, Muhao Chen, and Tianyi Zhang. 2023. Automated Summarization of Stack Overflow Posts. arXiv preprint arXiv:2305.16680 (2023)."},{"key":"e_1_3_2_1_30_1","doi-asserted-by":"publisher","DOI":"10.1162\/tacl_a_00276"},{"key":"e_1_3_2_1_31_1","volume-title":"Rlaif: Scaling reinforcement learning from human feedback with ai feedback. arXiv preprint arXiv:2309.00267","author":"Lee Harrison","year":"2023","unstructured":"Harrison Lee, Samrat Phatale, Hassan Mansoor, Kellie Lu, Thomas Mesnard, Colton Bishop, Victor Carbune, and Abhinav Rastogi. 2023. Rlaif: Scaling reinforcement learning from human feedback with ai feedback. arXiv preprint arXiv:2309.00267 (2023)."},{"key":"e_1_3_2_1_32_1","volume-title":"Bart: Denoising sequence-to-sequence pre-training for natural language generation, translation, and comprehension. arXiv preprint arXiv:1910.13461","author":"Lewis Mike","year":"2019","unstructured":"Mike Lewis, Yinhan Liu, Naman Goyal, Marjan Ghazvininejad, Abdelrahman Mohamed, Omer Levy, Ves Stoyanov, and Luke Zettlemoyer. 2019. Bart: Denoising sequence-to-sequence pre-training for natural language generation, translation, and comprehension. arXiv preprint arXiv:1910.13461 (2019)."},{"key":"e_1_3_2_1_33_1","volume-title":"SAILER: Structure-aware Pre-trained Language Model for Legal Case Retrieval. arXiv preprint arXiv:2304.11370","author":"Li Haitao","year":"2023","unstructured":"Haitao Li, Qingyao Ai, Jia Chen, Qian Dong, Yueyue Wu, Yiqun Liu, Chong Chen, and Qi Tian. 2023. SAILER: Structure-aware Pre-trained Language Model for Legal Case Retrieval. arXiv preprint arXiv:2304.11370 (2023)."},{"key":"e_1_3_2_1_34_1","volume-title":"Evaluating verifiability in generative search engines. arXiv preprint arXiv:2304.09848","author":"Liu Nelson F","year":"2023","unstructured":"Nelson F Liu, Tianyi Zhang, and Percy Liang. 2023. Evaluating verifiability in generative search engines. arXiv preprint arXiv:2304.09848 (2023)."},{"key":"e_1_3_2_1_35_1","volume-title":"Fine-tune BERT for extractive summarization. arXiv preprint arXiv:1903.10318","author":"Liu Yang","year":"2019","unstructured":"Yang Liu. 2019. Fine-tune BERT for extractive summarization. arXiv preprint arXiv:1903.10318 (2019)."},{"key":"e_1_3_2_1_36_1","volume-title":"Pre-trained Language Model for Web-scale Retrieval in Baidu Search. arXiv preprint arXiv:2106.03373","author":"Liu Yiding","year":"2021","unstructured":"Yiding Liu, Weixue Lu, Suqi Cheng, Daiting Shi, Shuaiqiang Wang, Zhicong Cheng, and Dawei Yin. 2021. Pre-trained Language Model for Web-scale Retrieval in Baidu Search. arXiv preprint arXiv:2106.03373 (2021)."},{"key":"e_1_3_2_1_37_1","volume-title":"Pre-training with Large Language Model-based Document Expansion for Dense Passage Retrieval. arXiv preprint arXiv:2308.08285","author":"Ma Guangyuan","year":"2023","unstructured":"Guangyuan Ma, Xing Wu, Peng Wang, Zijia Lin, and Songlin Hu. 2023. Pre-training with Large Language Model-based Document Expansion for Dense Passage Retrieval. arXiv preprint arXiv:2308.08285 (2023)."},{"key":"e_1_3_2_1_38_1","doi-asserted-by":"crossref","unstructured":"Ramesh Nallapati Bowen Zhou Caglar Gulcehre Bing Xiang et al. 2016. Abstractive text summarization using sequence-to-sequence rnns and beyond. arXiv preprint arXiv:1602.06023 (2016).","DOI":"10.18653\/v1\/K16-1028"},{"key":"e_1_3_2_1_39_1","volume-title":"Ranking sentences for extractive summarization with reinforcement learning. arXiv preprint arXiv:1802.08636","author":"Narayan Shashi","year":"2018","unstructured":"Shashi Narayan, Shay B Cohen, and Mirella Lapata. 2018. Ranking sentences for extractive summarization with reinforcement learning. arXiv preprint arXiv:1802.08636 (2018)."},{"key":"e_1_3_2_1_40_1","volume-title":"From doc2query to docTTTTTquery. Online preprint","author":"Nogueira Rodrigo","year":"2019","unstructured":"Rodrigo Nogueira, Jimmy Lin, and AI Epistemic. 2019a. From doc2query to docTTTTTquery. Online preprint, Vol. 6 (2019), 2."},{"key":"e_1_3_2_1_41_1","volume-title":"Document expansion by query prediction. arXiv preprint arXiv:1904.08375","author":"Nogueira Rodrigo","year":"2019","unstructured":"Rodrigo Nogueira, Wei Yang, Jimmy Lin, and Kyunghyun Cho. 2019b. Document expansion by query prediction. arXiv preprint arXiv:1904.08375 (2019)."},{"key":"e_1_3_2_1_42_1","first-page":"27730","article-title":"Training language models to follow instructions with human feedback","volume":"35","author":"Ouyang Long","year":"2022","unstructured":"Long Ouyang, Jeffrey Wu, Xu Jiang, Diogo Almeida, Carroll Wainwright, Pamela Mishkin, Chong Zhang, Sandhini Agarwal, Katarina Slama, Alex Ray, et al. 2022. Training language models to follow instructions with human feedback. Advances in Neural Information Processing Systems, Vol. 35 (2022), 27730--27744.","journal-title":"Advances in Neural Information Processing Systems"},{"key":"e_1_3_2_1_43_1","unstructured":"Alec Radford Karthik Narasimhan Tim Salimans Ilya Sutskever et al. 2018. Improving language understanding by generative pre-training. (2018)."},{"key":"e_1_3_2_1_44_1","unstructured":"Alec Radford Jeffrey Wu Rewon Child David Luan Dario Amodei Ilya Sutskever et al. 2019. Language models are unsupervised multitask learners. OpenAI blog Vol. 1 8 (2019) 9."},{"key":"e_1_3_2_1_45_1","first-page":"1","article-title":"Exploring the limits of transfer learning with a unified text-to-text transformer","volume":"21","author":"Raffel Colin","year":"2020","unstructured":"Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, Peter J Liu, et al. 2020. Exploring the limits of transfer learning with a unified text-to-text transformer. J. Mach. Learn. Res., Vol. 21, 140 (2020), 1--67.","journal-title":"J. Mach. Learn. Res."},{"key":"e_1_3_2_1_46_1","unstructured":"Elizabeth Reid. 2023. Supercharging search with Generative AI. https:\/\/blog.google\/products\/search\/generative-ai-search\/"},{"key":"e_1_3_2_1_47_1","doi-asserted-by":"publisher","DOI":"10.1561\/1500000019"},{"key":"e_1_3_2_1_48_1","volume-title":"A neural attention model for abstractive sentence summarization. arXiv preprint arXiv:1509.00685","author":"Rush Alexander M","year":"2015","unstructured":"Alexander M Rush, Sumit Chopra, and Jason Weston. 2015. A neural attention model for abstractive sentence summarization. arXiv preprint arXiv:1509.00685 (2015)."},{"key":"e_1_3_2_1_49_1","volume-title":"Proximal policy optimization algorithms. arXiv preprint arXiv:1707.06347","author":"Schulman John","year":"2017","unstructured":"John Schulman, Filip Wolski, Prafulla Dhariwal, Alec Radford, and Oleg Klimov. 2017. Proximal policy optimization algorithms. arXiv preprint arXiv:1707.06347 (2017)."},{"key":"e_1_3_2_1_50_1","volume-title":"Get to the point: Summarization with pointer-generator networks. arXiv preprint arXiv:1704.04368","author":"Liu Peter J","year":"2017","unstructured":"Abigail See, Peter J Liu, and Christopher D Manning. 2017. Get to the point: Summarization with pointer-generator networks. arXiv preprint arXiv:1704.04368 (2017)."},{"key":"e_1_3_2_1_51_1","first-page":"3008","article-title":"Learning to summarize with human feedback","volume":"33","author":"Stiennon Nisan","year":"2020","unstructured":"Nisan Stiennon, Long Ouyang, Jeffrey Wu, Daniel Ziegler, Ryan Lowe, Chelsea Voss, Alec Radford, Dario Amodei, and Paul F Christiano. 2020. Learning to summarize with human feedback. Advances in Neural Information Processing Systems, Vol. 33 (2020), 3008--3021.","journal-title":"Advances in Neural Information Processing Systems"},{"key":"e_1_3_2_1_52_1","volume-title":"Beir: A heterogenous benchmark for zero-shot evaluation of information retrieval models. arXiv preprint arXiv:2104.08663","author":"Thakur Nandan","year":"2021","unstructured":"Nandan Thakur, Nils Reimers, Andreas R\u00fcckl\u00e9, Abhishek Srivastava, and Iryna Gurevych. 2021. Beir: A heterogenous benchmark for zero-shot evaluation of information retrieval models. arXiv preprint arXiv:2104.08663 (2021)."},{"key":"e_1_3_2_1_53_1","unstructured":"Ashish Vaswani Noam Shazeer Niki Parmar Jakob Uszkoreit Llion Jones Aidan N Gomez \u0141ukasz Kaiser and Illia Polosukhin. 2017. Attention is all you need. In Advances in neural information processing systems. 5998--6008."},{"key":"e_1_3_2_1_54_1","volume-title":"Gpl: Generative pseudo labeling for unsupervised domain adaptation of dense retrieval. arXiv preprint arXiv:2112.07577","author":"Wang Kexin","year":"2021","unstructured":"Kexin Wang, Nandan Thakur, Nils Reimers, and Iryna Gurevych. 2021. Gpl: Generative pseudo labeling for unsupervised domain adaptation of dense retrieval. arXiv preprint arXiv:2112.07577 (2021)."},{"key":"e_1_3_2_1_55_1","volume-title":"Query2doc: Query Expansion with Large Language Models. arXiv preprint arXiv:2303.07678","author":"Wang Liang","year":"2023","unstructured":"Liang Wang, Nan Yang, and Furu Wei. 2023. Query2doc: Query Expansion with Large Language Models. arXiv preprint arXiv:2303.07678 (2023)."},{"key":"e_1_3_2_1_56_1","volume-title":"LLM-powered Data Augmentation for Enhanced Crosslingual Performance. arXiv preprint arXiv:2305.14288","author":"Whitehouse Chenxi","year":"2023","unstructured":"Chenxi Whitehouse, Monojit Choudhury, and Alham Fikri Aji. 2023. LLM-powered Data Augmentation for Enhanced Crosslingual Performance. arXiv preprint arXiv:2305.14288 (2023)."},{"key":"e_1_3_2_1_57_1","unstructured":"Likang Wu Zhi Zheng Zhaopeng Qiu Hao Wang Hongchao Gu Tingjia Shen Chuan Qin Chen Zhu Hengshu Zhu Qi Liu et al. 2023. A Survey on Large Language Models for Recommendation. arXiv preprint arXiv:2305.19860 (2023)."},{"key":"e_1_3_2_1_58_1","unstructured":"Xiaohui Xie Qian Dong Bingning Wang Feiyang Lv Ting Yao Weinan Gan Zhijing Wu Xiangsheng Li Haitao Li Yiqun Liu et al. 2023. T2Ranking: A large-scale Chinese Benchmark for Passage Ranking. arXiv preprint arXiv:2304.03679 (2023)."},{"key":"e_1_3_2_1_59_1","volume-title":"Approximate nearest neighbor negative contrastive learning for dense text retrieval. arXiv preprint arXiv:2007.00808","author":"Xiong Lee","year":"2020","unstructured":"Lee Xiong, Chenyan Xiong, Ye Li, Kwok-Fung Tang, Jialin Liu, Paul Bennett, Junaid Ahmed, and Arnold Overwijk. 2020. Approximate nearest neighbor negative contrastive learning for dense text retrieval. arXiv preprint arXiv:2007.00808 (2020)."},{"key":"e_1_3_2_1_60_1","volume-title":"Fine-Tuning BART for Abstractive Reviews Summarization. In Computational Intelligence: Select Proceedings of InCITe","author":"Yadav Hemant","year":"2023","unstructured":"Hemant Yadav, Nehal Patel, and Dishank Jani. 2023. Fine-Tuning BART for Abstractive Reviews Summarization. In Computational Intelligence: Select Proceedings of InCITe 2022. Springer, 375--385."},{"key":"e_1_3_2_1_61_1","volume-title":"RLCD: Reinforcement Learning from Contrast Distillation for Language Model Alignment. arXiv preprint arXiv:2307.12950","author":"Yang Kevin","year":"2023","unstructured":"Kevin Yang, Dan Klein, Asli Celikyilmaz, Nanyun Peng, and Yuandong Tian. 2023. RLCD: Reinforcement Learning from Contrast Distillation for Language Model Alignment. arXiv preprint arXiv:2307.12950 (2023)."},{"key":"e_1_3_2_1_62_1","volume-title":"China Conference on Information Retrieval. Springer, 28--39","author":"Yang Shenghao","year":"2022","unstructured":"Shenghao Yang, Yiqun Liu, Xiaohui Xie, Min Zhang, and Shaoping Ma. 2022. Enhance Performance of Ad-hoc Search via Prompt Learning. In China Conference on Information Retrieval. Springer, 28--39."},{"key":"e_1_3_2_1_63_1","volume-title":"BELLE: Bloom-Enhanced Large Language model Engine. https:\/\/github.com\/LianjiaTech\/BELLE.","author":"Yiping Peng Qiang Niu Yan Gong","year":"2023","unstructured":"Yan Gong Yiping Peng Qiang Niu Baochang Ma Xiangang Li Yunjie Ji, Yong Deng. 2023. BELLE: Bloom-Enhanced Large Language model Engine. https:\/\/github.com\/LianjiaTech\/BELLE."},{"key":"e_1_3_2_1_64_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/D19-1311"},{"key":"e_1_3_2_1_65_1","volume-title":"Neural document summarization by jointly learning to score and select sentences. arXiv preprint arXiv:1807.02305","author":"Zhou Qingyu","year":"2018","unstructured":"Qingyu Zhou, Nan Yang, Furu Wei, Shaohan Huang, Ming Zhou, and Tiejun Zhao. 2018. Neural document summarization by jointly learning to score and select sentences. arXiv preprint arXiv:1807.02305 (2018)."},{"key":"e_1_3_2_1_66_1","doi-asserted-by":"publisher","DOI":"10.1145\/3624918.3625328"},{"key":"e_1_3_2_1_67_1","volume-title":"Large language models for information retrieval: A survey. arXiv preprint arXiv:2308.07107","author":"Zhu Yutao","year":"2023","unstructured":"Yutao Zhu, Huaying Yuan, Shuting Wang, Jiongnan Liu, Wenhan Liu, Chenlong Deng, Zhicheng Dou, and Ji-Rong Wen. 2023. Large language models for information retrieval: A survey. arXiv preprint arXiv:2308.07107 (2023)."},{"key":"e_1_3_2_1_68_1","doi-asserted-by":"publisher","DOI":"10.1145\/1060745.1060754"},{"key":"e_1_3_2_1_69_1","volume-title":"Fine-tuning language models from human preferences. arXiv preprint arXiv:1909.08593","author":"Ziegler Daniel M","year":"2019","unstructured":"Daniel M Ziegler, Nisan Stiennon, Jeffrey Wu, Tom B Brown, Alec Radford, Dario Amodei, Paul Christiano, and Geoffrey Irving. 2019. Fine-tuning language models from human preferences. arXiv preprint arXiv:1909.08593 (2019)."}],"event":{"name":"SIGIR 2024: The 47th International ACM SIGIR Conference on Research and Development in Information Retrieval","location":"Washington DC USA","acronym":"SIGIR 2024","sponsor":["SIGIR ACM Special Interest Group on Information Retrieval"]},"container-title":["Proceedings of the 47th International ACM SIGIR Conference on Research and Development in Information Retrieval"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3626772.3657689","content-type":"unspecified","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3626772.3657689","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,8,22]],"date-time":"2025-08-22T05:44:14Z","timestamp":1755841454000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3626772.3657689"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,7,10]]},"references-count":69,"alternative-id":["10.1145\/3626772.3657689","10.1145\/3626772"],"URL":"https:\/\/doi.org\/10.1145\/3626772.3657689","relation":{},"subject":[],"published":{"date-parts":[[2024,7,10]]},"assertion":[{"value":"2024-07-11","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}