{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,25]],"date-time":"2026-03-25T00:59:37Z","timestamp":1774400377196,"version":"3.50.1"},"publisher-location":"New York, NY, USA","reference-count":50,"publisher":"ACM","license":[{"start":{"date-parts":[[2023,7,18]],"date-time":"2023-07-18T00:00:00Z","timestamp":1689638400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by\/4.0\/"}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2023,7,19]]},"DOI":"10.1145\/3539618.3592047","type":"proceedings-article","created":{"date-parts":[[2023,7,19]],"date-time":"2023-07-19T00:22:23Z","timestamp":1689726143000},"page":"2308-2313","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":61,"title":["RankT5: Fine-Tuning T5 for Text Ranking with Ranking Losses"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0000-0001-8134-1509","authenticated-orcid":false,"given":"Honglei","family":"Zhuang","sequence":"first","affiliation":[{"name":"Google Research, Mountain View, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-6739-134X","authenticated-orcid":false,"given":"Zhen","family":"Qin","sequence":"additional","affiliation":[{"name":"Google Research, New York, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-5169-495X","authenticated-orcid":false,"given":"Rolf","family":"Jagerman","sequence":"additional","affiliation":[{"name":"Google Research, Amsterdam, Netherlands"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-3110-7404","authenticated-orcid":false,"given":"Kai","family":"Hui","sequence":"additional","affiliation":[{"name":"Google Research, Mountain View, USA"}]},{"ORCID":"https:\/\/orcid.org\/0009-0009-2102-8209","authenticated-orcid":false,"given":"Ji","family":"Ma","sequence":"additional","affiliation":[{"name":"Google Research, London, United Kingdom"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-1076-7662","authenticated-orcid":false,"given":"Jing","family":"Lu","sequence":"additional","affiliation":[{"name":"Google Research, New York, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-6863-8073","authenticated-orcid":false,"given":"Jianmo","family":"Ni","sequence":"additional","affiliation":[{"name":"Google Research, Mountain View, USA"}]},{"ORCID":"https:\/\/orcid.org\/0009-0000-1388-1423","authenticated-orcid":false,"given":"Xuanhui","family":"Wang","sequence":"additional","affiliation":[{"name":"Google Research, Mountain View, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-2941-6240","authenticated-orcid":false,"given":"Michael","family":"Bendersky","sequence":"additional","affiliation":[{"name":"Google Research, Mountain View, USA"}]}],"member":"320","published-online":{"date-parts":[[2023,7,18]]},"reference":[{"key":"e_1_3_2_1_1_1","volume-title":"MS MARCO: A Human Generated MAchine Reading COmprehension Dataset. arXiv preprint arXiv:1611.09268","author":"Bajaj Payal","year":"2016","unstructured":"Payal Bajaj, Daniel Campos, Nick Craswell, Li Deng, Jianfeng Gao, Xiaodong Liu, Rangan Majumder, Andrew McNamara, Bhaskar Mitra, Tri Nguyen, et al. 2016. MS MARCO: A Human Generated MAchine Reading COmprehension Dataset. arXiv preprint arXiv:1611.09268 (2016)."},{"key":"e_1_3_2_1_2_1","first-page":"1877","article-title":"Language Models are Few-Shot Learners","volume":"33","author":"Brown Tom","year":"2020","unstructured":"Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared D Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, et al. 2020. Language Models are Few-Shot Learners. In Advances in Neural Information Processing Systems, Vol. 33. 1877--1901.","journal-title":"Advances in Neural Information Processing Systems"},{"key":"e_1_3_2_1_3_1","doi-asserted-by":"publisher","DOI":"10.1145\/3341981.3344221"},{"key":"e_1_3_2_1_4_1","doi-asserted-by":"publisher","DOI":"10.1145\/1102351.1102363"},{"key":"e_1_3_2_1_5_1","doi-asserted-by":"publisher","DOI":"10.1145\/1273496.1273513"},{"key":"e_1_3_2_1_6_1","volume-title":"Charles Sutton, Sebastian Gehrmann, et al.","author":"Chowdhery Aakanksha","year":"2022","unstructured":"Aakanksha Chowdhery, Sharan Narang, Jacob Devlin, Maarten Bosma, Gaurav Mishra, Adam Roberts, Paul Barham, Hyung Won Chung, Charles Sutton, Sebastian Gehrmann, et al. 2022. PaLM: Scaling language modeling with pathways. arXiv preprint arXiv:2204.02311 (2022)."},{"key":"e_1_3_2_1_7_1","volume-title":"Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies","volume":"1","author":"Devlin Jacob","year":"2019","unstructured":"Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers). 4171--4186."},{"key":"e_1_3_2_1_8_1","volume-title":"Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing. 1722--1727","author":"dos Santos Cicero","year":"2020","unstructured":"Cicero dos Santos, Xiaofei Ma, Ramesh Nallapati, Zhiheng Huang, and Bing Xiang. 2020. Beyond [CLS] through Ranking by Generation. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing. 1722--1727."},{"key":"e_1_3_2_1_9_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.emnlp-main.342"},{"key":"e_1_3_2_1_10_1","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-72240-1_26"},{"key":"e_1_3_2_1_11_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.emnlp-main.552"},{"key":"e_1_3_2_1_12_1","volume-title":"Learning-to-Rank with BERT in TF-Ranking. arXiv preprint arXiv:2004.08476","author":"Han Shuguang","year":"2020","unstructured":"Shuguang Han, Xuanhui Wang, Mike Bendersky, and Marc Najork. 2020. Learning-to-Rank with BERT in TF-Ranking. arXiv preprint arXiv:2004.08476 (2020)."},{"key":"e_1_3_2_1_13_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.findings-acl.295"},{"key":"e_1_3_2_1_14_1","doi-asserted-by":"publisher","DOI":"10.1145\/3477495.3531849"},{"key":"e_1_3_2_1_15_1","doi-asserted-by":"publisher","DOI":"10.1145\/3534678.3539065"},{"key":"e_1_3_2_1_16_1","doi-asserted-by":"publisher","DOI":"10.1145\/582415.582418"},{"key":"e_1_3_2_1_17_1","doi-asserted-by":"publisher","DOI":"10.1145\/3404835.3463048"},{"key":"e_1_3_2_1_18_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.emnlp-main.550"},{"key":"e_1_3_2_1_19_1","doi-asserted-by":"publisher","DOI":"10.1145\/3397271.3401075"},{"key":"e_1_3_2_1_20_1","doi-asserted-by":"publisher","DOI":"10.1162\/tacl_a_00276"},{"key":"e_1_3_2_1_21_1","volume-title":"Language modeling for information retrieval","author":"Lafferty John","unstructured":"John Lafferty and Chengxiang Zhai. 2003. Probabilistic relevance models based on document and query generation. In Language modeling for information retrieval. Springer, 1--10."},{"key":"e_1_3_2_1_22_1","volume-title":"PolyLoss: A Polynomial Expansion Perspective of Classification Loss Functions. In International Conference on Learning Representations.","author":"Leng Zhaoqi","year":"2022","unstructured":"Zhaoqi Leng, Mingxing Tan, Chenxi Liu, Ekin Dogus Cubuk, Jay Shi, Shuyang Cheng, and Dragomir Anguelov. 2022. PolyLoss: A Polynomial Expansion Perspective of Classification Loss Functions. In International Conference on Learning Representations."},{"key":"e_1_3_2_1_23_1","volume-title":"Pretrained transformers for text ranking: BERT and beyond","author":"Lin Jimmy","unstructured":"Jimmy Lin, Rodrigo Nogueira, and Andrew Yates. 2021. Pretrained transformers for text ranking: BERT and beyond. Morgan & Claypool Publishers."},{"key":"e_1_3_2_1_24_1","doi-asserted-by":"publisher","DOI":"10.1145\/3442381.3449863"},{"key":"e_1_3_2_1_25_1","volume-title":"Learning to Rank for Information Retrieval","author":"Liu Tie-Yan","unstructured":"Tie-Yan Liu. 2009. Learning to Rank for Information Retrieval. Now Publishers Inc."},{"key":"e_1_3_2_1_26_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.emnlp-main.492"},{"key":"e_1_3_2_1_27_1","volume-title":"Hao Tian, Hua Wu, Shuaiqiang Wang, Dawei Yin, et al.","author":"Lu Yuxiang","year":"2022","unstructured":"Yuxiang Lu, Yiding Liu, Jiaxiang Liu, Yunsheng Shi, Zhengjie Huang, Shikun Feng Yu Sun, Hao Tian, Hua Wu, Shuaiqiang Wang, Dawei Yin, et al. 2022. ERNIE-Search: Bridging Cross-Encoder with Dual-Encoder via Self On-the-fly Distillation for Dense Passage Retrieval. arXiv preprint arXiv:2205.09153 (2022)."},{"key":"e_1_3_2_1_28_1","doi-asserted-by":"publisher","DOI":"10.1145\/3397271.3401093"},{"key":"e_1_3_2_1_29_1","doi-asserted-by":"publisher","DOI":"10.1145\/3511808.3557231"},{"key":"e_1_3_2_1_30_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.findings-acl.146"},{"key":"e_1_3_2_1_31_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.emnlp-main.669"},{"key":"e_1_3_2_1_32_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.findings-emnlp.63"},{"key":"e_1_3_2_1_33_1","volume-title":"Multi-stage document ranking with BERT. arXiv preprint arXiv:1910.14424","author":"Nogueira Rodrigo","year":"2019","unstructured":"Rodrigo Nogueira, Wei Yang, Kyunghyun Cho, and Jimmy Lin. 2019. Multi-stage document ranking with BERT. arXiv preprint arXiv:1910.14424 (2019)."},{"key":"e_1_3_2_1_34_1","unstructured":"Long Ouyang Jeffrey Wu Xu Jiang Diogo Almeida Carroll Wainwright Pamela Mishkin Chong Zhang Sandhini Agarwal Katarina Slama Alex Gray et al. 2022. Training language models to follow instructions with human feedback. In Advances in Neural Information Processing Systems."},{"key":"e_1_3_2_1_35_1","volume-title":"The expando-mono-duo design pattern for text ranking with pretrained sequence-to-sequence models. arXiv preprint arXiv:2101.05667","author":"Pradeep Ronak","year":"2021","unstructured":"Ronak Pradeep, Rodrigo Nogueira, and Jimmy Lin. 2021. The expando-mono-duo design pattern for text ranking with pretrained sequence-to-sequence models. arXiv preprint arXiv:2101.05667 (2021)."},{"key":"e_1_3_2_1_36_1","volume-title":"International Conference on Learning Representations.","author":"Qin Zhen","year":"2021","unstructured":"Zhen Qin, Le Yan, Honglei Zhuang, Yi Tay, Rama Kumar Pasumarthi, Xuanhui Wang, Michael Bendersky, and Marc Najork. 2021. Are Neural Rankers Still Outperformed by Gradient Boosted Decision Trees?. In International Conference on Learning Representations."},{"key":"e_1_3_2_1_37_1","first-page":"1","article-title":"Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer","volume":"21","author":"Raffel Colin","year":"2020","unstructured":"Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, and Peter J Liu. 2020. Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer. Journal of Machine Learning Research, Vol. 21 (2020), 1--67.","journal-title":"Journal of Machine Learning Research"},{"key":"e_1_3_2_1_38_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.emnlp-main.224"},{"key":"e_1_3_2_1_39_1","volume-title":"The probabilistic relevance framework: BM25 and beyond","author":"Robertson Stephen","unstructured":"Stephen Robertson and Hugo Zaragoza. 2009. The probabilistic relevance framework: BM25 and beyond. Now Publishers Inc."},{"key":"e_1_3_2_1_40_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.emnlp-main.249"},{"key":"e_1_3_2_1_41_1","unstructured":"Yi Tay Vinh Q Tran Mostafa Dehghani Jianmo Ni Dara Bahri Harsh Mehta Zhen Qin Kai Hui Zhe Zhao Jai Gupta et al. 2022. Transformer memory as a differentiable search index. In Advances in Neural Information Processing Systems."},{"key":"e_1_3_2_1_42_1","volume-title":"BEIR: A Heterogeneous Benchmark for Zero-shot Evaluation of Information Retrieval Models. In Thirty-fifth Conference on Neural Information Processing Systems Datasets and Benchmarks Track (Round 2).","author":"Thakur Nandan","year":"2021","unstructured":"Nandan Thakur, Nils Reimers, Andreas R\u00fcckl\u00e9, Abhishek Srivastava, and Iryna Gurevych. 2021. BEIR: A Heterogeneous Benchmark for Zero-shot Evaluation of Information Retrieval Models. In Thirty-fifth Conference on Neural Information Processing Systems Datasets and Benchmarks Track (Round 2)."},{"key":"e_1_3_2_1_43_1","doi-asserted-by":"publisher","DOI":"10.6028\/NIST.SP.500-246.qa-overview"},{"key":"e_1_3_2_1_44_1","volume-title":"SimLM: Pre-training with representation bottleneck for dense passage retrieval. arXiv preprint arXiv:2207.02578","author":"Wang Liang","year":"2022","unstructured":"Liang Wang, Nan Yang, Xiaolong Huang, Binxing Jiao, Linjun Yang, Daxin Jiang, Rangan Majumder, and Furu Wei. 2022. SimLM: Pre-training with representation bottleneck for dense passage retrieval. arXiv preprint arXiv:2207.02578 (2022)."},{"key":"e_1_3_2_1_45_1","volume-title":"RetroMAE v2: Duplex Masked Auto-Encoder For Pre-Training Retrieval-Oriented Language Models. arXiv preprint arXiv:2211.08769","author":"Xiao Shitao","year":"2022","unstructured":"Shitao Xiao and Zheng Liu. 2022. RetroMAE v2: Duplex Masked Auto-Encoder For Pre-Training Retrieval-Oriented Language Models. arXiv preprint arXiv:2211.08769 (2022)."},{"key":"e_1_3_2_1_46_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.emnlp-main.35"},{"key":"e_1_3_2_1_47_1","volume-title":"Approximate Nearest Neighbor Negative Contrastive Learning for Dense Text Retrieval. In International Conference on Learning Representations.","author":"Xiong Lee","year":"2020","unstructured":"Lee Xiong, Chenyan Xiong, Ye Li, Kwok-Fung Tang, Jialin Liu, Paul N Bennett, Junaid Ahmed, and Arnold Overwijk. 2020. Approximate Nearest Neighbor Negative Contrastive Learning for Dense Text Retrieval. In International Conference on Learning Representations."},{"key":"e_1_3_2_1_48_1","volume-title":"HLATR: enhance multi-stage text retrieval with hybrid list aware transformer reranking. arXiv preprint arXiv:2205.10569","author":"Zhang Yanzhao","year":"2022","unstructured":"Yanzhao Zhang, Dingkun Long, Guangwei Xu, and Pengjun Xie. 2022. HLATR: enhance multi-stage text retrieval with hybrid list aware transformer reranking. arXiv preprint arXiv:2205.10569 (2022)."},{"key":"e_1_3_2_1_49_1","volume-title":"Deep Query Likelihood Model for Information Retrieval. In European Conference On Information Retrieval. Springer, 463--470","author":"Zhuang Shengyao","year":"2021","unstructured":"Shengyao Zhuang, Hang Li, and Guido Zuccon. 2021. Deep Query Likelihood Model for Information Retrieval. In European Conference On Information Retrieval. Springer, 463--470."},{"key":"e_1_3_2_1_50_1","doi-asserted-by":"publisher","DOI":"10.1145\/3404835.3462922"}],"event":{"name":"SIGIR '23: The 46th International ACM SIGIR Conference on Research and Development in Information Retrieval","location":"Taipei Taiwan","acronym":"SIGIR '23","sponsor":["SIGIR ACM Special Interest Group on Information Retrieval"]},"container-title":["Proceedings of the 46th International ACM SIGIR Conference on Research and Development in Information Retrieval"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3539618.3592047","content-type":"unspecified","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3539618.3592047","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,6,17]],"date-time":"2025-06-17T16:37:49Z","timestamp":1750178269000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3539618.3592047"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,7,18]]},"references-count":50,"alternative-id":["10.1145\/3539618.3592047","10.1145\/3539618"],"URL":"https:\/\/doi.org\/10.1145\/3539618.3592047","relation":{},"subject":[],"published":{"date-parts":[[2023,7,18]]},"assertion":[{"value":"2023-07-18","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}