{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,11]],"date-time":"2026-03-11T16:41:07Z","timestamp":1773247267833,"version":"3.50.1"},"publisher-location":"New York, NY, USA","reference-count":54,"publisher":"ACM","license":[{"start":{"date-parts":[[2025,5,8]],"date-time":"2025-05-08T00:00:00Z","timestamp":1746662400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by\/4.0\/"}],"funder":[{"DOI":"10.13039\/501100006374","name":"National Science Foundation","doi-asserted-by":"publisher","award":["2310966"],"award-info":[{"award-number":["2310966"]}],"id":[{"id":"10.13039\/501100006374","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100006374","name":"Air Force Office of Scientific Research","doi-asserted-by":"publisher","award":["23RT0630"],"award-info":[{"award-number":["23RT0630"]}],"id":[{"id":"10.13039\/501100006374","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100006374","name":"National Institutes of Health","doi-asserted-by":"publisher","award":["2R01HL127661"],"award-info":[{"award-number":["2R01HL127661"]}],"id":[{"id":"10.13039\/501100006374","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2025,5,8]]},"DOI":"10.1145\/3701716.3717574","type":"proceedings-article","created":{"date-parts":[[2025,5,23]],"date-time":"2025-05-23T16:12:56Z","timestamp":1748016776000},"page":"2494-2502","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":7,"title":["APEER : &lt;u&gt;A&lt;\/u&gt;utomatic &lt;u&gt;P&lt;\/u&gt;rompt &lt;u&gt;E&lt;\/u&gt;ngineering &lt;u&gt;E&lt;\/u&gt;nhances Large Language Model &lt;u&gt;R&lt;\/u&gt;eranking"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0009-0007-3407-1658","authenticated-orcid":false,"given":"Can","family":"Jin","sequence":"first","affiliation":[{"name":"Rutgers University, Piscataway, New Jersey, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-2025-2195","authenticated-orcid":false,"given":"Hongwu","family":"Peng","sequence":"additional","affiliation":[{"name":"University of Connecticut, Storrs, Connecticut, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-4978-725X","authenticated-orcid":false,"given":"Shiyu","family":"Zhao","sequence":"additional","affiliation":[{"name":"Rutgers University, Piscataway, New Jersey, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-0311-1331","authenticated-orcid":false,"given":"Zhenting","family":"Wang","sequence":"additional","affiliation":[{"name":"Rutgers University, Piscataway, New Jersey, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-3500-1068","authenticated-orcid":false,"given":"Wujiang","family":"Xu","sequence":"additional","affiliation":[{"name":"Rutgers University, Piscataway, New Jersey, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-3166-0848","authenticated-orcid":false,"given":"Ligong","family":"Han","sequence":"additional","affiliation":[{"name":"Rutgers University, Piscataway, New Jersey, USA"}]},{"ORCID":"https:\/\/orcid.org\/0009-0000-0558-5579","authenticated-orcid":false,"given":"Jiahui","family":"Zhao","sequence":"additional","affiliation":[{"name":"University of Connecticut, Storrs, Connecticut, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-7606-1366","authenticated-orcid":false,"given":"Kai","family":"Zhong","sequence":"additional","affiliation":[{"name":"Independent, Palo Alto, California, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-0137-4843","authenticated-orcid":false,"given":"Sanguthevar","family":"Rajasekaran","sequence":"additional","affiliation":[{"name":"University of Connecticut, Storrs, Connecticut, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-7142-7640","authenticated-orcid":false,"given":"Dimitris N.","family":"Metaxas","sequence":"additional","affiliation":[{"name":"Rutgers University, Piscataway, New Jersey, USA"}]}],"member":"320","published-online":{"date-parts":[[2025,5,23]]},"reference":[{"key":"e_1_3_2_2_1_1","unstructured":"2024. Qwen2 Technical Report. (2024)."},{"key":"e_1_3_2_2_2_1","volume-title":"Diogo Almeida, Janko Altenschmidt, Sam Altman, Shyamal Anadkat, et al.","author":"Achiam Josh","year":"2023","unstructured":"Josh Achiam, Steven Adler, Sandhini Agarwal, Lama Ahmad, Ilge Akkaya, Florencia Leoni Aleman, Diogo Almeida, Janko Altenschmidt, Sam Altman, Shyamal Anadkat, et al. 2023. Gpt-4 technical report. arXiv preprint arXiv:2303.08774 (2023)."},{"key":"e_1_3_2_2_3_1","unstructured":"AI@Meta. 2024. Llama 3 Model Card. (2024). https:\/\/github.com\/meta-llama\/ llama3\/blob\/main\/MODEL_CARD.md"},{"key":"e_1_3_2_2_4_1","unstructured":"Payal Bajaj Daniel Campos Nick Craswell Li Deng Jianfeng Gao Xiaodong Liu Rangan Majumder Andrew McNamara Bhaskar Mitra Tri Nguyen et al. 2016. Ms marco: A human generated machine reading comprehension dataset. arXiv preprint arXiv:1611.09268 (2016)."},{"key":"e_1_3_2_2_5_1","unstructured":"Tom Brown Benjamin Mann Nick Ryder Melanie Subbiah Jared D Kaplan Prafulla Dhariwal Arvind Neelakantan Pranav Shyam Girish Sastry Amanda Askell et al. 2020. Language models are few-shot learners. Advances in neural information processing systems 33 (2020) 1877--1901."},{"key":"e_1_3_2_2_6_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.findings-acl.61"},{"key":"e_1_3_2_2_7_1","volume-title":"Overview of the TREC 2019 deep learning track. arXiv preprint arXiv:2003","author":"Craswell Nick","year":"2020","unstructured":"Nick Craswell, Bhaskar Mitra, Emine Yilmaz, Daniel Campos, and Ellen M Voorhees. 2020. Overview of the TREC 2019 deep learning track. arXiv preprint arXiv:2003.07820 (2020)."},{"key":"e_1_3_2_2_8_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/D19-1109"},{"key":"e_1_3_2_2_9_1","volume-title":"Recommender systems in the era of large language models (llms). arXiv preprint arXiv:2307.02046","author":"Fan Wenqi","year":"2023","unstructured":"Wenqi Fan, Zihuai Zhao, Jiatong Li, Yunqing Liu, Xiaowei Mei, YiqiWang, Jiliang Tang, and Qing Li. 2023. Recommender systems in the era of large language models (llms). arXiv preprint arXiv:2307.02046 (2023)."},{"key":"e_1_3_2_2_10_1","volume-title":"Pre-training Methods in Information Retrieval. Foundations and Trends\u00ae in Information Retrieval 16, 3","author":"Fan Yixing","year":"2022","unstructured":"Yixing Fan, Xiaohui Xie, Yinqiong Cai, Jia Chen, Xinyu Ma, Xiangsheng Li, Ruqing Zhang, and Jiafeng Guo. 2022. Pre-training Methods in Information Retrieval. Foundations and Trends\u00ae in Information Retrieval 16, 3 (2022), 178--317."},{"key":"e_1_3_2_2_11_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.acl-long.99"},{"key":"e_1_3_2_2_12_1","volume-title":"Connecting Large Language Models with Evolutionary Algorithms Yields Powerful Prompt Optimizers. In The Twelfth International Conference on Learning Representations.","author":"Guo Qingyan","year":"2023","unstructured":"Qingyan Guo, Rui Wang, Junliang Guo, Bei Li, Kaitao Song, Xu Tan, Guoqing Liu, Jiang Bian, and Yujiu Yang. 2023. Connecting Large Language Models with Evolutionary Algorithms Yields Powerful Prompt Optimizers. In The Twelfth International Conference on Learning Representations."},{"key":"e_1_3_2_2_13_1","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-56060-6_24"},{"key":"e_1_3_2_2_14_1","doi-asserted-by":"publisher","DOI":"10.1162\/tacl_a_00324"},{"key":"e_1_3_2_2_15_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.emnlp-main.550"},{"key":"e_1_3_2_2_16_1","volume-title":"Machel Reid, Yutaka Matsuo, and Yusuke Iwasawa.","author":"Kojima Takeshi","year":"2022","unstructured":"Takeshi Kojima, Shixiang Shane Gu, Machel Reid, Yutaka Matsuo, and Yusuke Iwasawa. 2022. Large language models are zero-shot reasoners. Advances in neural information processing systems 35 (2022), 22199--22213."},{"key":"e_1_3_2_2_17_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.emnlp-main.243"},{"key":"e_1_3_2_2_18_1","unstructured":"Percy Liang Rishi Bommasani Tony Lee Dimitris Tsipras Dilara Soylu Michihiro Yasunaga Yian Zhang Deepak Narayanan YuhuaiWu Ananya Kumar et al. 2022. Holistic evaluation of language models. arXiv preprint arXiv:2211.09110 (2022)."},{"key":"e_1_3_2_2_19_1","doi-asserted-by":"publisher","DOI":"10.1145\/3404835.3463238"},{"key":"e_1_3_2_2_20_1","volume-title":"GPT understands, too. AI Open","author":"Liu Xiao","year":"2023","unstructured":"Xiao Liu, Yanan Zheng, Zhengxiao Du, Ming Ding, Yujie Qian, Zhilin Yang, and Jie Tang. 2023. GPT understands, too. AI Open (2023)."},{"key":"e_1_3_2_2_21_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.acl-long.556"},{"key":"e_1_3_2_2_22_1","volume-title":"Zeroshot listwise document reranking with a large language model. arXiv preprint arXiv:2305.02156","author":"Ma Xueguang","year":"2023","unstructured":"Xueguang Ma, Xinyu Zhang, Ronak Pradeep, and Jimmy Lin. 2023. Zeroshot listwise document reranking with a large language model. arXiv preprint arXiv:2305.02156 (2023)."},{"key":"e_1_3_2_2_23_1","volume-title":"Sgpt: Gpt sentence embeddings for semantic search. arXiv preprint arXiv:2202.08904","author":"Muennighoff Niklas","year":"2022","unstructured":"Niklas Muennighoff. 2022. Sgpt: Gpt sentence embeddings for semantic search. arXiv preprint arXiv:2202.08904 (2022)."},{"key":"e_1_3_2_2_24_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.findings-emnlp.63"},{"key":"e_1_3_2_2_25_1","volume-title":"Multi-stage document ranking with BERT. arXiv preprint arXiv:1910.14424","author":"Nogueira Rodrigo","year":"2019","unstructured":"Rodrigo Nogueira, Wei Yang, Kyunghyun Cho, and Jimmy Lin. 2019. Multi-stage document ranking with BERT. arXiv preprint arXiv:1910.14424 (2019)."},{"key":"e_1_3_2_2_26_1","volume-title":"Show Your Work: Scratchpads for Intermediate Computation with Language Models. In Deep Learning for Code Workshop.","author":"Nye Maxwell","year":"2022","unstructured":"Maxwell Nye, Anders Johan Andreassen, Guy Gur-Ari, Henryk Michalewski, Jacob Austin, David Bieber, David Dohan, Aitor Lewkowycz, Maarten Bosma, David Luan, et al. 2022. Show Your Work: Scratchpads for Intermediate Computation with Language Models. In Deep Learning for Code Workshop."},{"key":"e_1_3_2_2_27_1","unstructured":"Long Ouyang Jeffrey Wu Xu Jiang Diogo Almeida Carroll Wainwright Pamela Mishkin Chong Zhang Sandhini Agarwal Katarina Slama Alex Ray et al. 2022. Training language models to follow instructions with human feedback. Advances in neural information processing systems 35 (2022) 27730--27744."},{"key":"e_1_3_2_2_28_1","volume-title":"RankZephyr: Effective and Robust Zero-Shot Listwise Reranking is a Breeze! arXiv preprint arXiv:2312.02724","author":"Pradeep Ronak","year":"2023","unstructured":"Ronak Pradeep, Sahel Sharifymoghaddam, and Jimmy Lin. 2023. RankZephyr: Effective and Robust Zero-Shot Listwise Reranking is a Breeze! arXiv preprint arXiv:2312.02724 (2023)."},{"key":"e_1_3_2_2_29_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.eacl-main.277"},{"key":"e_1_3_2_2_30_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.emnlp-main.494"},{"key":"e_1_3_2_2_31_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.naacl-main.410"},{"key":"e_1_3_2_2_32_1","doi-asserted-by":"crossref","unstructured":"Zhen Qin Rolf Jagerman Kai Hui Honglei Zhuang Junru Wu Jiaming Shen Tianqi Liu Jialu Liu Donald Metzler Xuanhui Wang et al. 2023. Large language models are effective text rankers with pairwise ranking prompting. arXiv preprint arXiv:2306.17563 (2023).","DOI":"10.18653\/v1\/2024.findings-naacl.97"},{"key":"e_1_3_2_2_33_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.naacl-main.466"},{"key":"e_1_3_2_2_34_1","volume-title":"Direct preference optimization: Your language model is secretly a reward model. Advances in Neural Information Processing Systems 36","author":"Rafailov Rafael","year":"2024","unstructured":"Rafael Rafailov, Archit Sharma, Eric Mitchell, Christopher D Manning, Stefano Ermon, and Chelsea Finn. 2024. Direct preference optimization: Your language model is secretly a reward model. Advances in Neural Information Processing Systems 36 (2024)."},{"key":"e_1_3_2_2_35_1","doi-asserted-by":"publisher","DOI":"10.1145\/3411763.3451760"},{"key":"e_1_3_2_2_36_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.emnlp-main.249"},{"key":"e_1_3_2_2_37_1","volume-title":"Multitask Prompted Training Enables Zero-Shot Task Generalization. In International Conference on Learning Representations.","author":"Sanh Victor","year":"2021","unstructured":"Victor Sanh, Albert Webson, Colin Raffel, Stephen Bach, Lintang Sutawika, Zaid Alyafeai, Antoine Chaffin, Arnaud Stiegler, Arun Raja, Manan Dey, et al. 2021. Multitask Prompted Training Enables Zero-Shot Task Generalization. In International Conference on Learning Representations."},{"key":"e_1_3_2_2_38_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.eacl-main.20"},{"key":"e_1_3_2_2_39_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.emnlp-main.923"},{"key":"e_1_3_2_2_40_1","volume-title":"BEIR: A Heterogeneous Benchmark for Zero-shot Evaluation of Information Retrieval Models. In Thirty-fifth Conference on Neural Information Processing Systems Datasets and Benchmarks Track (Round 2).","author":"Thakur Nandan","year":"2021","unstructured":"Nandan Thakur, Nils Reimers, Andreas R\u00fcckl\u00e9, Abhishek Srivastava, and Iryna Gurevych. 2021. BEIR: A Heterogeneous Benchmark for Zero-shot Evaluation of Information Retrieval Models. In Thirty-fifth Conference on Neural Information Processing Systems Datasets and Benchmarks Track (Round 2)."},{"key":"e_1_3_2_2_41_1","volume-title":"Llama: Open and efficient foundation language models. arXiv preprint arXiv:2302.13971","author":"Touvron Hugo","year":"2023","unstructured":"Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timoth\u00e9e Lacroix, Baptiste Rozi\u00e8re, Naman Goyal, Eric Hambro, Faisal Azhar, et al. 2023. Llama: Open and efficient foundation language models. arXiv preprint arXiv:2302.13971 (2023)."},{"key":"e_1_3_2_2_42_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.emnlp-main.585"},{"key":"e_1_3_2_2_43_1","volume-title":"Self-Consistency Improves Chain of Thought Reasoning in Language Models. In The Eleventh International Conference on Learning Representations.","author":"Schuurmans Dale","year":"2022","unstructured":"XuezhiWang, JasonWei, Dale Schuurmans, Quoc V Le, Ed H Chi, Sharan Narang, Aakanksha Chowdhery, and Denny Zhou. 2022. Self-Consistency Improves Chain of Thought Reasoning in Language Models. In The Eleventh International Conference on Learning Representations."},{"key":"e_1_3_2_2_44_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.naacl-main.167"},{"key":"e_1_3_2_2_45_1","first-page":"24824","article-title":"Chain-of-thought prompting elicits reasoning in large language models","volume":"35","author":"Wei Jason","year":"2022","unstructured":"Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Fei Xia, Ed Chi, Quoc V Le, Denny Zhou, et al. 2022. Chain-of-thought prompting elicits reasoning in large language models. Advances in Neural Information Processing Systems 35 (2022), 24824--24837.","journal-title":"Advances in Neural Information Processing Systems"},{"key":"e_1_3_2_2_46_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/P17-1046"},{"key":"e_1_3_2_2_47_1","volume-title":"Towards open-world recommendation with knowledge augmentation from large language models. arXiv preprint arXiv:2306.10933","author":"Xi Yunjia","year":"2023","unstructured":"Yunjia Xi, Weiwen Liu, Jianghao Lin, Jieming Zhu, Bo Chen, Ruiming Tang, Weinan Zhang, Rui Zhang, and Yong Yu. 2023. Towards open-world recommendation with knowledge augmentation from large language models. arXiv preprint arXiv:2306.10933 (2023)."},{"key":"e_1_3_2_2_48_1","volume-title":"ReAct: Synergizing Reasoning and Acting in Language Models. In The Eleventh International Conference on Learning Representations.","author":"Yao Shunyu","year":"2022","unstructured":"Shunyu Yao, Jeffrey Zhao, Dian Yu, Nan Du, Izhak Shafran, Karthik R Narasimhan, and Yuan Cao. 2022. ReAct: Synergizing Reasoning and Acting in Language Models. In The Eleventh International Conference on Learning Representations."},{"key":"e_1_3_2_2_49_1","volume-title":"Prompt engineering a prompt engineer. arXiv preprint arXiv:2311.05661","author":"Ye Qinyuan","year":"2023","unstructured":"Qinyuan Ye, Maxamed Axmed, Reid Pryzant, and Fereshte Khani. 2023. Prompt engineering a prompt engineer. arXiv preprint arXiv:2311.05661 (2023)."},{"key":"e_1_3_2_2_50_1","first-page":"27263","article-title":"Bartscore: Evaluating generated text as text generation","volume":"34","author":"Yuan Weizhe","year":"2021","unstructured":"Weizhe Yuan, Graham Neubig, and Pengfei Liu. 2021. Bartscore: Evaluating generated text as text generation. Advances in Neural Information Processing Systems 34 (2021), 27263--27277.","journal-title":"Advances in Neural Information Processing Systems"},{"key":"e_1_3_2_2_51_1","volume-title":"International conference on machine learning. PMLR, 12697--12706","author":"Zhao Zihao","year":"2021","unstructured":"Zihao Zhao, EricWallace, Shi Feng, Dan Klein, and Sameer Singh. 2021. Calibrate before use: Improving few-shot performance of language models. In International conference on machine learning. PMLR, 12697--12706."},{"key":"e_1_3_2_2_52_1","volume-title":"The Eleventh International Conference on Learning Representations.","author":"Zhou Yongchao","year":"2022","unstructured":"Yongchao Zhou, Andrei Ioan Muresanu, Ziwen Han, Keiran Paster, Silviu Pitis, Harris Chan, and Jimmy Ba. 2022. Large Language Models are Human-Level Prompt Engineers. In The Eleventh International Conference on Learning Representations."},{"key":"e_1_3_2_2_53_1","volume-title":"Large language models for information retrieval: A survey. arXiv preprint arXiv:2308.07107","author":"Zhu Yutao","year":"2023","unstructured":"Yutao Zhu, Huaying Yuan, Shuting Wang, Jiongnan Liu, Wenhan Liu, Chenlong Deng, Zhicheng Dou, and Ji-Rong Wen. 2023. Large language models for information retrieval: A survey. arXiv preprint arXiv:2308.07107 (2023)."},{"key":"e_1_3_2_2_54_1","doi-asserted-by":"publisher","DOI":"10.1145\/3539618.3592047"}],"event":{"name":"WWW '25: The ACM Web Conference 2025","location":"Sydney NSW Australia","acronym":"WWW '25","sponsor":["SIGWEB ACM Special Interest Group on Hypertext, Hypermedia, and Web"]},"container-title":["Companion Proceedings of the ACM on Web Conference 2025"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3701716.3717574","content-type":"unspecified","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3701716.3717574","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,10,8]],"date-time":"2025-10-08T03:06:11Z","timestamp":1759892771000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3701716.3717574"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,5,8]]},"references-count":54,"alternative-id":["10.1145\/3701716.3717574","10.1145\/3701716"],"URL":"https:\/\/doi.org\/10.1145\/3701716.3717574","relation":{},"subject":[],"published":{"date-parts":[[2025,5,8]]},"assertion":[{"value":"2025-05-23","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}