{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,8,22]],"date-time":"2025-08-22T01:13:17Z","timestamp":1755825197246,"version":"3.44.0"},"publisher-location":"New York, NY, USA","reference-count":62,"publisher":"ACM","license":[{"start":{"date-parts":[[2024,3,4]],"date-time":"2024-03-04T00:00:00Z","timestamp":1709510400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.acm.org\/publications\/policies\/copyright_policy#Background"}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2024,3,4]]},"DOI":"10.1145\/3616855.3635774","type":"proceedings-article","created":{"date-parts":[[2024,3,4]],"date-time":"2024-03-04T18:18:12Z","timestamp":1709576292000},"page":"655-664","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":1,"title":["LEAD: Liberal Feature-based Distillation for Dense Retrieval"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0000-0001-8456-7925","authenticated-orcid":false,"given":"Hao","family":"Sun","sequence":"first","affiliation":[{"name":"Peking University, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-8893-366X","authenticated-orcid":false,"given":"Xiao","family":"Liu","sequence":"additional","affiliation":[{"name":"Microsoft Research Asia, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-9954-9674","authenticated-orcid":false,"given":"Yeyun","family":"Gong","sequence":"additional","affiliation":[{"name":"Microsoft Research Asia, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-8241-4746","authenticated-orcid":false,"given":"Anlei","family":"Dong","sequence":"additional","affiliation":[{"name":"Microsoft, Mountain View, CA, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-8208-898X","authenticated-orcid":false,"given":"Jingwen","family":"Lu","sequence":"additional","affiliation":[{"name":"Microsoft, Redmond, WA, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-4003-0290","authenticated-orcid":false,"given":"Yan","family":"Zhang","sequence":"additional","affiliation":[{"name":"Peking University, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-8843-1822","authenticated-orcid":false,"given":"Linjun","family":"Yang","sequence":"additional","affiliation":[{"name":"Microsoft, Redmond, WA, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-2430-575X","authenticated-orcid":false,"given":"Rangan","family":"Majumder","sequence":"additional","affiliation":[{"name":"Microsoft, Redmond, WA, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-3387-4674","authenticated-orcid":false,"given":"Nan","family":"Duan","sequence":"additional","affiliation":[{"name":"Microsoft Research Asia, Beijing, China"}]}],"member":"320","published-online":{"date-parts":[[2024,3,4]]},"reference":[{"key":"e_1_3_2_1_1_1","doi-asserted-by":"publisher","DOI":"10.1145\/1553374.1553380"},{"key":"e_1_3_2_1_2_1","volume-title":"Inpars: Data augmentation for information retrieval using large language models. arXiv preprint arXiv:2202.05144","author":"Bonifacio Luiz","year":"2022","unstructured":"Luiz Bonifacio, Hugo Abonizio, Marzieh Fadaee, and Rodrigo Nogueira. 2022. Inpars: Data augmentation for information retrieval using large language models. arXiv preprint arXiv:2202.05144 (2022)."},{"key":"e_1_3_2_1_3_1","unstructured":"Tom Brown Benjamin Mann Nick Ryder Melanie Subbiah Jared D Kaplan Prafulla Dhariwal Arvind Neelakantan Pranav Shyam Girish Sastry Amanda Askell et al. 2020. Language models are few-shot learners. Advances in neural information processing systems Vol. 33 (2020) 1877--1901."},{"key":"e_1_3_2_1_4_1","volume-title":"Overview of the TREC 2020 deep learning track. CoRR","volume":"2102","author":"Craswell Nick","year":"2021","unstructured":"Nick Craswell, Bhaskar Mitra, Emine Yilmaz, and Daniel Campos. 2021. Overview of the TREC 2020 deep learning track. CoRR , Vol. abs\/2102.07662 (2021). showeprint[arXiv]2102.07662 https:\/\/arxiv.org\/abs\/2102.07662"},{"key":"e_1_3_2_1_5_1","volume-title":"Overview of the TREC 2019 deep learning track. CoRR","volume":"2003","author":"Craswell Nick","year":"2020","unstructured":"Nick Craswell, Bhaskar Mitra, Emine Yilmaz, Daniel Campos, and Ellen M. Voorhees. 2020. Overview of the TREC 2019 deep learning track. CoRR , Vol. abs\/2003.07820 (2020). showeprint[arXiv]2003.07820 https:\/\/arxiv.org\/abs\/2003.07820"},{"key":"e_1_3_2_1_6_1","doi-asserted-by":"crossref","unstructured":"Zhuyun Dai and Jamie Callan. 2019. Deeper Text Understanding for IR with Contextual Neural Language Modeling. In SIGIR. 985--988.","DOI":"10.1145\/3331184.3331303"},{"key":"e_1_3_2_1_7_1","volume-title":"Bert: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805","author":"Devlin Jacob","year":"2018","unstructured":"Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2018. Bert: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805 (2018)."},{"key":"e_1_3_2_1_8_1","volume-title":"SPLADE v2: Sparse Lexical and Expansion Model for Information Retrieval. CoRR","author":"Formal Thibault","year":"2021","unstructured":"Thibault Formal, Carlos Lassance, Benjamin Piwowarski, and St\u00e9 phane Clinchant. 2021. SPLADE v2: Sparse Lexical and Expansion Model for Information Retrieval. CoRR , Vol. abs\/2109.10086 (2021). showeprint[arXiv]2109.10086 https:\/\/arxiv.org\/abs\/2109.10086"},{"key":"e_1_3_2_1_9_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.emnlp-main.75"},{"key":"e_1_3_2_1_10_1","volume-title":"Is your language model ready for dense representation fine-tuning. arXiv preprint arXiv:2104.08253","author":"Gao Luyu","year":"2021","unstructured":"Luyu Gao and Jamie Callan. 2021b. Is your language model ready for dense representation fine-tuning. arXiv preprint arXiv:2104.08253 (2021)."},{"key":"e_1_3_2_1_11_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.naacl-main.241"},{"key":"e_1_3_2_1_12_1","doi-asserted-by":"publisher","DOI":"10.1007\/s11263-021-01453-z"},{"key":"e_1_3_2_1_13_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.findings-naacl.103"},{"key":"e_1_3_2_1_14_1","volume-title":"Nan Duan, et al.","author":"He Xingwei","year":"2022","unstructured":"Xingwei He, Yeyun Gong, A Jin, Hang Zhang, Anlei Dong, Jian Jiao, Siu Ming Yiu, Nan Duan, et al. 2022. Curriculum Sampling for Dense Retrieval with Document Expansion. arXiv preprint arXiv:2212.09114 (2022)."},{"key":"e_1_3_2_1_15_1","volume-title":"Distilling the Knowledge in a Neural Network. CoRR","author":"Hinton Geoffrey E.","year":"2015","unstructured":"Geoffrey E. Hinton, Oriol Vinyals, and Jeffrey Dean. 2015. Distilling the Knowledge in a Neural Network. CoRR , Vol. abs\/1503.02531 (2015)."},{"key":"e_1_3_2_1_16_1","volume-title":"Improving Efficient Neural Ranking Models with Cross-Architecture Knowledge Distillation. CoRR","author":"Sebastian","year":"2020","unstructured":"Sebastian Hofst\"a tter, Sophia Althammer, Michael Schr\u00f6 der, Mete Sertkan, and Allan Hanbury. 2020. Improving Efficient Neural Ranking Models with Cross-Architecture Knowledge Distillation. CoRR , Vol. abs\/2010.02666 (2020). showeprint[arXiv]2010.02666 https:\/\/arxiv.org\/abs\/2010.02666"},{"key":"e_1_3_2_1_17_1","doi-asserted-by":"publisher","unstructured":"Sebastian Hofst\"a tter Sheng-Chieh Lin Jheng-Hong Yang Jimmy Lin and Allan Hanbury. 2021. Efficiently Teaching an Effective Dense Retriever with Balanced Topic Aware Sampling. In Proceedings of the 44th International ACM SIGIR Conference on Research and Development in Information Retrieval Fernando Diaz Chirag Shah Torsten Suel Pablo Castells Rosie Jones and Tetsuya Sakai (Eds.). 113--122. https:\/\/doi.org\/10.1145\/3404835.3462891","DOI":"10.1145\/3404835.3462891"},{"key":"e_1_3_2_1_18_1","volume-title":"Like what you like: Knowledge distill via neuron selectivity transfer. arXiv preprint arXiv:1707.01219","author":"Huang Zehao","year":"2017","unstructured":"Zehao Huang and Naiyan Wang. 2017. Like what you like: Knowledge distill via neuron selectivity transfer. arXiv preprint arXiv:1707.01219 (2017)."},{"key":"e_1_3_2_1_19_1","volume-title":"Tinybert: Distilling bert for natural language understanding. arXiv preprint arXiv:1909.10351","author":"Jiao Xiaoqi","year":"2019","unstructured":"Xiaoqi Jiao, Yichun Yin, Lifeng Shang, Xin Jiang, Xiao Chen, Linlin Li, Fang Wang, and Qun Liu. 2019. Tinybert: Distilling bert for natural language understanding. arXiv preprint arXiv:1909.10351 (2019)."},{"key":"e_1_3_2_1_20_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.emnlp-main.550"},{"key":"e_1_3_2_1_21_1","doi-asserted-by":"publisher","unstructured":"Omar Khattab and Matei Zaharia. 2020. ColBERT: Efficient and Effective Passage Search via Contextualized Late Interaction over BERT. In Proceedings of the 43rd International ACM SIGIR conference on research and development in Information Retrieval Jimmy X. Huang Yi Chang Xueqi Cheng Jaap Kamps Vanessa Murdock Ji-Rong Wen and Yiqun Liu (Eds.). 39--48. https:\/\/doi.org\/10.1145\/3397271.3401075","DOI":"10.1145\/3397271.3401075"},{"key":"e_1_3_2_1_22_1","volume-title":"Paraphrasing complex network: Network compression via factor transfer. Advances in neural information processing systems","author":"Kim Jangho","year":"2018","unstructured":"Jangho Kim, SeongUk Park, and Nojun Kwak. 2018. Paraphrasing complex network: Network compression via factor transfer. Advances in neural information processing systems , Vol. 31 (2018)."},{"key":"e_1_3_2_1_23_1","volume-title":"VIRT: Improving Representation-based Models for Text Matching through Virtual Interaction. arXiv preprint arXiv:2112.04195","author":"Li Dan","year":"2021","unstructured":"Dan Li, Yang Yang, Hongyin Tang, Jingang Wang, Tong Xu, Wei Wu, and Enhong Chen. 2021a. VIRT: Improving Representation-based Models for Text Matching through Virtual Interaction. arXiv preprint arXiv:2112.04195 (2021)."},{"key":"e_1_3_2_1_24_1","volume-title":"VIRT: Improving Representation-based Models for Text Matching through Virtual Interaction. CoRR","author":"Li Dan","year":"2021","unstructured":"Dan Li, Yang Yang, Hongyin Tang, Jingang Wang, Tong Xu, Wei Wu, and Enhong Chen. 2021b. VIRT: Improving Representation-based Models for Text Matching through Virtual Interaction. CoRR , Vol. abs\/2112.04195 (2021). showeprint[arXiv]2112.04195 https:\/\/arxiv.org\/abs\/2112.04195"},{"key":"e_1_3_2_1_25_1","volume-title":"Distilling Dense Representations for Ranking using Tightly-Coupled Teachers. CoRR","author":"Lin Sheng-Chieh","year":"2020","unstructured":"Sheng-Chieh Lin, Jheng-Hong Yang, and Jimmy Lin. 2020a. Distilling Dense Representations for Ranking using Tightly-Coupled Teachers. CoRR , Vol. abs\/2010.11386 (2020). showeprint[arXiv]2010.11386 https:\/\/arxiv.org\/abs\/2010.11386"},{"key":"e_1_3_2_1_26_1","volume-title":"Distilling dense representations for ranking using tightly-coupled teachers. arXiv preprint arXiv:2010.11386","author":"Lin Sheng-Chieh","year":"2020","unstructured":"Sheng-Chieh Lin, Jheng-Hong Yang, and Jimmy Lin. 2020b. Distilling dense representations for ranking using tightly-coupled teachers. arXiv preprint arXiv:2010.11386 (2020)."},{"key":"e_1_3_2_1_27_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.repl4nlp-1.17"},{"key":"e_1_3_2_1_28_1","volume-title":"PROD: Progressive Distillation for Dense Retrieval. arXiv preprint arXiv:2209.13335","author":"Lin Zhenghao","year":"2022","unstructured":"Zhenghao Lin, Yeyun Gong, Xiao Liu, Hang Zhang, Chen Lin, Anlei Dong, Jian Jiao, Jingwen Lu, Daxin Jiang, Rangan Majumder, et al. 2022. PROD: Progressive Distillation for Dense Retrieval. arXiv preprint arXiv:2209.13335 (2022)."},{"key":"e_1_3_2_1_29_1","volume-title":"Decoupled weight decay regularization. arXiv preprint arXiv:1711.05101","author":"Loshchilov Ilya","year":"2017","unstructured":"Ilya Loshchilov and Frank Hutter. 2017. Decoupled weight decay regularization. arXiv preprint arXiv:1711.05101 (2017)."},{"key":"e_1_3_2_1_30_1","doi-asserted-by":"publisher","DOI":"10.48550\/arXiv.2205.09153"},{"key":"e_1_3_2_1_31_1","doi-asserted-by":"publisher","DOI":"10.1162\/tacl_a_00369"},{"key":"e_1_3_2_1_32_1","volume-title":"Unsupervised corpus aware language model pre-training for dense passage retrieval. arXiv preprint arXiv:2108.05540","author":"Luyu Gao","year":"2021","unstructured":"Gao Luyu and Callan Jamie. 2021. Unsupervised corpus aware language model pre-training for dense passage retrieval. arXiv preprint arXiv:2108.05540 (2021)."},{"key":"e_1_3_2_1_33_1","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v34i04.5963"},{"key":"e_1_3_2_1_34_1","volume-title":"Proceedings of the Workshop on Cognitive Computation: Integrating neural and symbolic approaches 2016 (CEUR Workshop Proceedings","volume":"9","author":"Nguyen Tri","year":"2016","unstructured":"Tri Nguyen, Mir Rosenberg, Xia Song, Jianfeng Gao, Saurabh Tiwary, Rangan Majumder, and Li Deng. 2016. MS MARCO: A Human Generated MAchine Reading COmprehension Dataset. In Proceedings of the Workshop on Cognitive Computation: Integrating neural and symbolic approaches 2016 (CEUR Workshop Proceedings, Vol. 1773), , Tarek Richard Besold, Antoine Bordes, Artur S. d'Avila Garcez, and Greg Wayne (Eds.). http:\/\/ceur-ws.org\/Vol-1773\/CoCoNIPS_2016_paper9.pdf"},{"key":"e_1_3_2_1_35_1","unstructured":"Jianmo Ni Chen Qu Jing Lu Zhuyun Dai Gustavo Hern\u00e1ndez \u00c1brego Ji Ma Vincent Y Zhao Yi Luan Keith B Hall Ming-Wei Chang et al. 2021. Large dual encoders are generalizable retrievers. arXiv preprint arXiv:2112.07899 (2021)."},{"key":"e_1_3_2_1_36_1","volume-title":"Document Expansion by Query Prediction. CoRR","author":"Nogueira Rodrigo Frassetto","year":"2019","unstructured":"Rodrigo Frassetto Nogueira, Wei Yang, Jimmy Lin, and Kyunghyun Cho. 2019. Document Expansion by Query Prediction. CoRR , Vol. abs\/1904.08375 (2019)."},{"key":"e_1_3_2_1_37_1","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-01252-6_17"},{"key":"e_1_3_2_1_38_1","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v35i15.17610"},{"key":"e_1_3_2_1_39_1","volume-title":"International conference on machine learning. PMLR, 5142--5151","author":"Phuong Mary","year":"2019","unstructured":"Mary Phuong and Christoph Lampert. 2019. Towards understanding knowledge distillation. In International conference on machine learning. PMLR, 5142--5151."},{"key":"e_1_3_2_1_40_1","volume-title":"Daxiang Dong, Hua Wu, and Haifeng Wang.","author":"Qu Yingqi","year":"2021","unstructured":"Yingqi Qu, Yuchen Ding, Jing Liu, Kai Liu, Ruiyang Ren, Wayne Xin Zhao, Daxiang Dong, Hua Wu, and Haifeng Wang. 2021. RocketQA: An Optimized Training Approach to Dense Passage Retrieval for Open-Domain Question Answering. In NAACL-HLT. 5835--5847."},{"key":"e_1_3_2_1_41_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.findings-acl.191"},{"key":"e_1_3_2_1_42_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.emnlp-main.224"},{"key":"e_1_3_2_1_43_1","volume-title":"Antoine Chassang, Carlo Gatta, and Yoshua Bengio.","author":"Romero Adriana","year":"2015","unstructured":"Adriana Romero, Nicolas Ballas, Samira Ebrahimi Kahou, Antoine Chassang, Carlo Gatta, and Yoshua Bengio. 2015. FitNets: Hints for Thin Deep Nets. In ICLR (Poster)."},{"key":"e_1_3_2_1_44_1","volume-title":"a distilled version of BERT: smaller, faster, cheaper and lighter. arXiv preprint arXiv:1910.01108","author":"Sanh Victor","year":"2019","unstructured":"Victor Sanh, Lysandre Debut, Julien Chaumond, and Thomas Wolf. 2019. DistilBERT, a distilled version of BERT: smaller, faster, cheaper and lighter. arXiv preprint arXiv:1910.01108 (2019)."},{"key":"e_1_3_2_1_45_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.naacl-main.272"},{"key":"e_1_3_2_1_46_1","volume-title":"Simple entity-centric questions challenge dense retrievers. arXiv preprint arXiv:2109.08535","author":"Sciavolino Christopher","year":"2021","unstructured":"Christopher Sciavolino, Zexuan Zhong, Jinhyuk Lee, and Danqi Chen. 2021. Simple entity-centric questions challenge dense retrievers. arXiv preprint arXiv:2109.08535 (2021)."},{"key":"e_1_3_2_1_47_1","doi-asserted-by":"publisher","DOI":"10.1186\/s40537-019-0197-0"},{"key":"e_1_3_2_1_48_1","unstructured":"Shaden Smith Mostofa Patwary Brandon Norick Patrick LeGresley Samyam Rajbhandari Jared Casper Zhun Liu Shrimai Prabhumoye George Zerveas Vijay Korthikanti et al. 2022. Using deepspeed and megatron to train megatron-turing nlg 530b a large-scale generative language model. arXiv preprint arXiv:2201.11990 (2022)."},{"key":"e_1_3_2_1_49_1","volume-title":"Patient knowledge distillation for bert model compression. arXiv preprint arXiv:1908.09355","author":"Sun Siqi","year":"2019","unstructured":"Siqi Sun, Yu Cheng, Zhe Gan, and Jingjing Liu. 2019a. Patient knowledge distillation for bert model compression. arXiv preprint arXiv:1908.09355 (2019)."},{"key":"e_1_3_2_1_50_1","volume-title":"A Continual Pre-training Framework for Language Understanding. arXiv preprint arXiv:1907.12412","author":"Sun Yu","year":"2019","unstructured":"Yu Sun, Shuohuan Wang, Yukun Li, Shikun Feng, Hao Tian, Hua Wu, and Haifeng Wang. 2019b. ERNIE 2.0: A Continual Pre-training Framework for Language Understanding. arXiv preprint arXiv:1907.12412 (2019)."},{"key":"e_1_3_2_1_51_1","volume-title":"H Chi, and Sagar Jain","author":"Tang Jiaxi","year":"2020","unstructured":"Jiaxi Tang, Rakesh Shivanna, Zhe Zhao, Dong Lin, Anima Singh, Ed H Chi, and Sagar Jain. 2020. Understanding and improving knowledge distillation. arXiv preprint arXiv:2002.03532 (2020)."},{"key":"e_1_3_2_1_52_1","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58586-0_20"},{"key":"e_1_3_2_1_53_1","volume-title":"Proceedings of the 9th International Conference on Learning Representations. https:\/\/openreview.net\/forum?id=zeFrfgyZln","author":"Xiong Lee","year":"2021","unstructured":"Lee Xiong, Chenyan Xiong, Ye Li, Kwok-Fung Tang, Jialin Liu, Paul N. Bennett, Junaid Ahmed, and Arnold Overwijk. 2021. Approximate Nearest Neighbor Negative Contrastive Learning for Dense Text Retrieval. In Proceedings of the 9th International Conference on Learning Representations. https:\/\/openreview.net\/forum?id=zeFrfgyZln"},{"key":"e_1_3_2_1_54_1","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58595-2_40"},{"key":"e_1_3_2_1_55_1","doi-asserted-by":"publisher","DOI":"10.1145\/3077136.3080721"},{"key":"e_1_3_2_1_56_1","unstructured":"Sergey Zagoruyko and Nikos Komodakis. 2017. Paying More Attention to Attention: Improving the Performance of Convolutional Neural Networks via Attention Transfer. In ICLR (Poster)."},{"key":"e_1_3_2_1_57_1","doi-asserted-by":"crossref","unstructured":"Hansi Zeng Hamed Zamani and Vishwa Vinay. 2022. Curriculum Learning for Dense Retrieval Distillation. In SIGIR. 1979--1983.","DOI":"10.1145\/3477495.3531791"},{"key":"e_1_3_2_1_58_1","doi-asserted-by":"publisher","unstructured":"Jingtao Zhan Jiaxin Mao Yiqun Liu Jiafeng Guo Min Zhang and Shaoping Ma. 2021. Optimizing Dense Retrieval Model Training with Hard Negatives. In Proceedings of the 44th International ACM SIGIR Conference on Research and Development in Information Retrieval Fernando Diaz Chirag Shah Torsten Suel Pablo Castells Rosie Jones and Tetsuya Sakai (Eds.). 1503--1512. https:\/\/doi.org\/10.1145\/3404835.3462880","DOI":"10.1145\/3404835.3462880"},{"key":"e_1_3_2_1_59_1","volume-title":"Proceedings of the 10th International Conference on Learning Representations. https:\/\/openreview.net\/forum?id=MR7XubKUFB","author":"Zhang Hang","year":"2022","unstructured":"Hang Zhang, Yeyun Gong, Yelong Shen, Jiancheng Lv, Nan Duan, and Weizhu Chen. 2022a. Adversarial Retriever-Ranker for Dense Text Retrieval. In Proceedings of the 10th International Conference on Learning Representations. https:\/\/openreview.net\/forum?id=MR7XubKUFB"},{"key":"e_1_3_2_1_60_1","volume-title":"Adversarial Retriever-Ranker for Dense Text Retrieval. In International Conference on Learning Representations. https:\/\/openreview.net\/forum?id=MR7XubKUFB","author":"Zhang Hang","year":"2022","unstructured":"Hang Zhang, Yeyun Gong, Yelong Shen, Jiancheng Lv, Nan Duan, and Weizhu Chen. 2022b. Adversarial Retriever-Ranker for Dense Text Retrieval. In International Conference on Learning Representations. https:\/\/openreview.net\/forum?id=MR7XubKUFB"},{"key":"e_1_3_2_1_61_1","doi-asserted-by":"publisher","DOI":"10.48550\/arXiv.2210.11773"},{"key":"e_1_3_2_1_62_1","volume-title":"Daxin Jiang, Nan Duan, and Ji-Rong Wen.","author":"Zhou Kun","year":"2022","unstructured":"Kun Zhou, Xiao Liu, Yeyun Gong, Wayne Xin Zhao, Daxin Jiang, Nan Duan, and Ji-Rong Wen. 2022b. MASTER: Multi-task Pre-trained Bottlenecked Masked Autoencoders are Better Dense Retrievers. arXiv preprint arXiv:2212.07841 (2022). io"}],"event":{"name":"WSDM '24: The 17th ACM International Conference on Web Search and Data Mining","sponsor":["SIGMOD ACM Special Interest Group on Management of Data","SIGWEB ACM Special Interest Group on Hypertext, Hypermedia, and Web","SIGKDD ACM Special Interest Group on Knowledge Discovery in Data","SIGIR ACM Special Interest Group on Information Retrieval"],"location":"Merida Mexico","acronym":"WSDM '24"},"container-title":["Proceedings of the 17th ACM International Conference on Web Search and Data Mining"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3616855.3635774","content-type":"unspecified","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3616855.3635774","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,8,22]],"date-time":"2025-08-22T00:53:36Z","timestamp":1755824016000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3616855.3635774"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,3,4]]},"references-count":62,"alternative-id":["10.1145\/3616855.3635774","10.1145\/3616855"],"URL":"https:\/\/doi.org\/10.1145\/3616855.3635774","relation":{},"subject":[],"published":{"date-parts":[[2024,3,4]]},"assertion":[{"value":"2024-03-04","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}