{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,10,14]],"date-time":"2025-10-14T20:20:41Z","timestamp":1760473241988,"version":"3.44.0"},"publisher-location":"New York, NY, USA","reference-count":54,"publisher":"ACM","license":[{"start":{"date-parts":[[2024,5,13]],"date-time":"2024-05-13T00:00:00Z","timestamp":1715558400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.acm.org\/publications\/policies\/copyright_policy#Background"}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2024,5,13]]},"DOI":"10.1145\/3589334.3645670","type":"proceedings-article","created":{"date-parts":[[2024,5,8]],"date-time":"2024-05-08T07:08:13Z","timestamp":1715152093000},"page":"4372-4382","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":12,"title":["Harnessing Multi-Role Capabilities of Large Language Models for Open-Domain Question Answering"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0000-0003-4850-6134","authenticated-orcid":false,"given":"Hongda","family":"Sun","sequence":"first","affiliation":[{"name":"Gaoling School of Artificial Intelligence, Renmin University of China, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0007-2154-7215","authenticated-orcid":false,"given":"Yuxuan","family":"Liu","sequence":"additional","affiliation":[{"name":"Nankai University, Tianjin, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0008-5018-3462","authenticated-orcid":false,"given":"Chengwei","family":"Wu","sequence":"additional","affiliation":[{"name":"Beijing Academy of Artificial Intelligence, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0000-5619-7998","authenticated-orcid":false,"given":"Haiyu","family":"Yan","sequence":"additional","affiliation":[{"name":"Renmin University of China, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0004-4431-2978","authenticated-orcid":false,"given":"Cheng","family":"Tai","sequence":"additional","affiliation":[{"name":"Moqi, Inc., Singapore, Singapore"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-7108-3574","authenticated-orcid":false,"given":"Xin","family":"Gao","sequence":"additional","affiliation":[{"name":"King Abdullah University of Science and Technology, Thuwal, Saudi Arabia"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-1117-2890","authenticated-orcid":false,"given":"Shuo","family":"Shang","sequence":"additional","affiliation":[{"name":"University of Electronic Science and Technology of China, Chengdu, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-3356-6823","authenticated-orcid":false,"given":"Rui","family":"Yan","sequence":"additional","affiliation":[{"name":"Gaoling School of Artificial Intelligence, Renmin University of China, Beijing, China"}]}],"member":"320","published-online":{"date-parts":[[2024,5,13]]},"reference":[{"key":"e_1_3_2_2_1_1","volume-title":"Promptsource: An integrated development environment and repository for natural language prompts. arXiv preprint arXiv:2202.01279","author":"Bach Stephen H","year":"2022","unstructured":"Stephen H Bach, Victor Sanh, Zheng-Xin Yong, Albert Webson, Colin Raffel, Nihal V Nayak, Abheesht Sharma, Taewoon Kim, M Saiful Bari, Thibault Fevry, et al. 2022. Promptsource: An integrated development environment and repository for natural language prompts. arXiv preprint arXiv:2202.01279 (2022)."},{"key":"e_1_3_2_2_2_1","volume-title":"Jacob Eisenstein, Kuzman Ganchev, Jonathan Herzig, Kai Hui, et al.","author":"Bohnet Bernd","year":"2022","unstructured":"Bernd Bohnet, Vinh Q Tran, Pat Verga, Roee Aharoni, Daniel Andor, Livio Baldini Soares, Jacob Eisenstein, Kuzman Ganchev, Jonathan Herzig, Kai Hui, et al. 2022. Attributed question answering: Evaluation and modeling for attributed large language models. arXiv preprint arXiv:2212.08037 (2022)."},{"key":"e_1_3_2_2_3_1","unstructured":"Tom Brown Benjamin Mann Nick Ryder Melanie Subbiah Jared D Kaplan Prafulla Dhariwal Arvind Neelakantan Pranav Shyam Girish Sastry Amanda Askell et al. 2020. Language models are few-shot learners. Advances in neural information processing systems Vol. 33 (2020) 1877--1901."},{"key":"e_1_3_2_2_4_1","volume-title":"Yuanzhi Li, Scott Lundberg, et al.","author":"Bubeck S\u00e9bastien","year":"2023","unstructured":"S\u00e9bastien Bubeck, Varun Chandrasekaran, Ronen Eldan, Johannes Gehrke, Eric Horvitz, Ece Kamar, Peter Lee, Yin Tat Lee, Yuanzhi Li, Scott Lundberg, et al. 2023. Sparks of artificial general intelligence: Early experiments with gpt-4. arXiv preprint arXiv:2303.12712 (2023)."},{"key":"e_1_3_2_2_5_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/P17-1171"},{"key":"e_1_3_2_2_6_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.acl-long.240"},{"key":"e_1_3_2_2_7_1","volume-title":"Vicuna: An open-source chatbot impressing gpt-4 with 90%* chatgpt quality. See https:\/\/vicuna. lmsys. org (accessed","author":"Chiang Wei-Lin","year":"2023","unstructured":"Wei-Lin Chiang, Zhuohan Li, Zi Lin, Ying Sheng, Zhanghao Wu, Hao Zhang, Lianmin Zheng, Siyuan Zhuang, Yonghao Zhuang, Joseph E Gonzalez, et al. 2023. Vicuna: An open-source chatbot impressing gpt-4 with 90%* chatgpt quality. See https:\/\/vicuna. lmsys. org (accessed 14 April 2023) (2023)."},{"key":"e_1_3_2_2_8_1","volume-title":"Charles Sutton, Sebastian Gehrmann, et al.","author":"Chowdhery Aakanksha","year":"2022","unstructured":"Aakanksha Chowdhery, Sharan Narang, Jacob Devlin, Maarten Bosma, Gaurav Mishra, Adam Roberts, Paul Barham, Hyung Won Chung, Charles Sutton, Sebastian Gehrmann, et al. 2022. Palm: Scaling language modeling with pathways. arXiv preprint arXiv:2204.02311 (2022)."},{"key":"e_1_3_2_2_9_1","volume-title":"Query Reranking for Open-Domain Question Answering. arXiv preprint arXiv:2305.17080","author":"Chuang Yung-Sung","year":"2023","unstructured":"Yung-Sung Chuang, Wei Fang, Shang-Wen Li, Wen-tau Yih, and James Glass. 2023. Expand, Rerank, and Retrieve: Query Reranking for Open-Domain Question Answering. arXiv preprint arXiv:2305.17080 (2023)."},{"key":"e_1_3_2_2_10_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/D19-1109"},{"key":"e_1_3_2_2_11_1","doi-asserted-by":"publisher","DOI":"10.1145\/3289600.3290992"},{"key":"e_1_3_2_2_12_1","volume-title":"Confucius: Iterative Tool Learning from Introspection Feedback by Easy-to-Difficult Curriculum. In AAAI.","author":"Gao Shen","year":"2024","unstructured":"Shen Gao, Zhengliang Shi, Minghang Zhu, Bowen Fang, Xin Xin, Pengjie Ren, Zhumin Chen, Jun Ma, and Zhaochun Ren. 2024. Confucius: Iterative Tool Learning from Introspection Feedback by Easy-to-Difficult Curriculum. In AAAI."},{"key":"e_1_3_2_2_13_1","volume-title":"Making pre-trained language models better few-shot learners. arXiv preprint arXiv:2012.15723","author":"Gao Tianyu","year":"2020","unstructured":"Tianyu Gao, Adam Fisch, and Danqi Chen. 2020. Making pre-trained language models better few-shot learners. arXiv preprint arXiv:2012.15723 (2020)."},{"key":"e_1_3_2_2_14_1","volume-title":"International conference on machine learning. PMLR, 3929--3938","author":"Guu Kelvin","year":"2020","unstructured":"Kelvin Guu, Kenton Lee, Zora Tung, Panupong Pasupat, and Mingwei Chang. 2020. Retrieval augmented language model pre-training. In International conference on machine learning. PMLR, 3929--3938."},{"key":"e_1_3_2_2_15_1","doi-asserted-by":"publisher","unstructured":"Gautier Izacard Mathilde Caron Lucas Hosseini Sebastian Riedel Piotr Bojanowski Armand Joulin and Edouard Grave. 2021. Unsupervised Dense Information Retrieval with Contrastive Learning. https:\/\/doi.org\/10.48550\/ARXIV.2112.09118","DOI":"10.48550\/ARXIV.2112.09118"},{"key":"e_1_3_2_2_16_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.eacl-main.74"},{"key":"e_1_3_2_2_17_1","volume-title":"Charles LA Clarke, and Davood Rafiei","author":"Kamalloo Ehsan","year":"2023","unstructured":"Ehsan Kamalloo, Nouha Dziri, Charles LA Clarke, and Davood Rafiei. 2023. Evaluating Open-Domain Question Answering in the Era of Large Language Models. arXiv preprint arXiv:2305.06984 (2023)."},{"key":"e_1_3_2_2_18_1","volume-title":"Sewon Min, Patrick Lewis, Ledell Wu, Sergey Edunov, Danqi Chen, and Wen-tau Yih.","author":"Karpukhin Vladimir","year":"2020","unstructured":"Vladimir Karpukhin, Barlas Oug uz, Sewon Min, Patrick Lewis, Ledell Wu, Sergey Edunov, Danqi Chen, and Wen-tau Yih. 2020. Dense passage retrieval for open-domain question answering. arXiv preprint arXiv:2004.04906 (2020)."},{"key":"e_1_3_2_2_19_1","doi-asserted-by":"publisher","DOI":"10.1162\/tacl_a_00405"},{"key":"e_1_3_2_2_20_1","volume-title":"Latent retrieval for weakly supervised open domain question answering. arXiv preprint arXiv:1906.00300","author":"Lee Kenton","year":"2019","unstructured":"Kenton Lee, Ming-Wei Chang, and Kristina Toutanova. 2019. Latent retrieval for weakly supervised open domain question answering. arXiv preprint arXiv:1906.00300 (2019)."},{"key":"e_1_3_2_2_21_1","first-page":"9459","article-title":"Retrieval-augmented generation for knowledge-intensive nlp tasks","volume":"33","author":"Lewis Patrick","year":"2020","unstructured":"Patrick Lewis, Ethan Perez, Aleksandra Piktus, Fabio Petroni, Vladimir Karpukhin, Naman Goyal, Heinrich K\u00fcttler, Mike Lewis, Wen-tau Yih, Tim Rockt\"aschel, et al. 2020. Retrieval-augmented generation for knowledge-intensive nlp tasks. Advances in Neural Information Processing Systems , Vol. 33 (2020), 9459--9474.","journal-title":"Advances in Neural Information Processing Systems"},{"key":"e_1_3_2_2_22_1","doi-asserted-by":"publisher","DOI":"10.1145\/3560815"},{"key":"e_1_3_2_2_23_1","volume-title":"Zhengxiao Du, Zhilin Yang, and Jie Tang.","author":"Liu Xiao","year":"2021","unstructured":"Xiao Liu, Kaixuan Ji, Yicheng Fu, Weng Lam Tam, Zhengxiao Du, Zhilin Yang, and Jie Tang. 2021a. P-tuning v2: Prompt tuning can be comparable to fine-tuning universally across scales and tasks. arXiv preprint arXiv:2110.07602 (2021)."},{"key":"e_1_3_2_2_24_1","volume-title":"GPT understands, too. arXiv preprint arXiv:2103.10385","author":"Liu Xiao","year":"2021","unstructured":"Xiao Liu, Yanan Zheng, Zhengxiao Du, Ming Ding, Yujie Qian, Zhilin Yang, and Jie Tang. 2021b. GPT understands, too. arXiv preprint arXiv:2103.10385 (2021)."},{"key":"e_1_3_2_2_25_1","volume-title":"2023 b. Zero-Shot Listwise Document Reranking with a Large Language Model. arXiv preprint arXiv:2305.02156","author":"Ma Xueguang","year":"2023","unstructured":"Xueguang Ma, Xinyu Zhang, Ronak Pradeep, and Jimmy Lin. 2023 b. Zero-Shot Listwise Document Reranking with a Large Language Model. arXiv preprint arXiv:2305.02156 (2023)."},{"key":"e_1_3_2_2_26_1","volume-title":"2023 a. Large language model is not a good few-shot information extractor, but a good reranker for hard samples! arXiv preprint arXiv:2303.08559","author":"Ma Yubo","year":"2023","unstructured":"Yubo Ma, Yixin Cao, YongChing Hong, and Aixin Sun. 2023 a. Large language model is not a good few-shot information extractor, but a good reranker for hard samples! arXiv preprint arXiv:2303.08559 (2023)."},{"key":"e_1_3_2_2_27_1","volume-title":"Self-refine: Iterative refinement with self-feedback. arXiv preprint arXiv:2303.17651","author":"Madaan Aman","year":"2023","unstructured":"Aman Madaan, Niket Tandon, Prakhar Gupta, Skyler Hallinan, Luyu Gao, Sarah Wiegreffe, Uri Alon, Nouha Dziri, Shrimai Prabhumoye, Yiming Yang, et al. 2023. Self-refine: Iterative refinement with self-feedback. arXiv preprint arXiv:2303.17651 (2023)."},{"key":"e_1_3_2_2_28_1","volume-title":"Generation-augmented retrieval for open-domain question answering. arXiv preprint arXiv:2009.08553","author":"Mao Yuning","year":"2020","unstructured":"Yuning Mao, Pengcheng He, Xiaodong Liu, Yelong Shen, Jianfeng Gao, Jiawei Han, and Weizhu Chen. 2020. Generation-augmented retrieval for open-domain question answering. arXiv preprint arXiv:2009.08553 (2020)."},{"key":"e_1_3_2_2_29_1","unstructured":"Sewon Min Jordan Boyd-Graber Chris Alberti Danqi Chen Eunsol Choi Michael Collins Kelvin Guu Hannaneh Hajishirzi Kenton Lee Jennimaria Palomaki et al. 2021. Neurips 2020 efficientqa competition: Systems analyses and lessons learned. In NeurIPS 2020 Competition and Demonstration Track. PMLR 86--111."},{"key":"e_1_3_2_2_30_1","volume-title":"Document expansion by query prediction. arXiv preprint arXiv:1904.08375","author":"Nogueira Rodrigo","year":"2019","unstructured":"Rodrigo Nogueira, Wei Yang, Jimmy Lin, and Kyunghyun Cho. 2019. Document expansion by query prediction. arXiv preprint arXiv:1904.08375 (2019)."},{"key":"e_1_3_2_2_31_1","first-page":"27730","article-title":"Training language models to follow instructions with human feedback","volume":"35","author":"Ouyang Long","year":"2022","unstructured":"Long Ouyang, Jeffrey Wu, Xu Jiang, Diogo Almeida, Carroll Wainwright, Pamela Mishkin, Chong Zhang, Sandhini Agarwal, Katarina Slama, Alex Ray, et al. 2022. Training language models to follow instructions with human feedback. Advances in Neural Information Processing Systems , Vol. 35 (2022), 27730--27744.","journal-title":"Advances in Neural Information Processing Systems"},{"key":"e_1_3_2_2_32_1","volume-title":"James Thorne, Yacine Jernite, Vladimir Karpukhin, Jean Maillard, et al.","author":"Petroni Fabio","year":"2020","unstructured":"Fabio Petroni, Aleksandra Piktus, Angela Fan, Patrick Lewis, Majid Yazdani, Nicola De Cao, James Thorne, Yacine Jernite, Vladimir Karpukhin, Jean Maillard, et al. 2020. KILT: a benchmark for knowledge intensive language tasks. arXiv preprint arXiv:2009.02252 (2020)."},{"key":"e_1_3_2_2_33_1","volume-title":"Language models as knowledge bases? arXiv preprint arXiv:1909.01066","author":"Petroni Fabio","year":"2019","unstructured":"Fabio Petroni, Tim Rockt\"aschel, Patrick Lewis, Anton Bakhtin, Yuxiang Wu, Alexander H Miller, and Sebastian Riedel. 2019. Language models as knowledge bases? arXiv preprint arXiv:1909.01066 (2019)."},{"key":"e_1_3_2_2_34_1","volume-title":"Is ChatGPT a general-purpose natural language processing task solver? arXiv preprint arXiv:2302.06476","author":"Qin Chengwei","year":"2023","unstructured":"Chengwei Qin, Aston Zhang, Zhuosheng Zhang, Jiaao Chen, Michihiro Yasunaga, and Diyi Yang. 2023. Is ChatGPT a general-purpose natural language processing task solver? arXiv preprint arXiv:2302.06476 (2023)."},{"key":"e_1_3_2_2_35_1","volume-title":"Daxiang Dong, Hua Wu, and Haifeng Wang.","author":"Qu Yingqi","year":"2020","unstructured":"Yingqi Qu, Yuchen Ding, Jing Liu, Kai Liu, Ruiyang Ren, Wayne Xin Zhao, Daxiang Dong, Hua Wu, and Haifeng Wang. 2020. RocketQA: An optimized training approach to dense passage retrieval for open-domain question answering. arXiv preprint arXiv:2010.08191 (2020)."},{"key":"e_1_3_2_2_36_1","unstructured":"Alec Radford Jeffrey Wu Rewon Child David Luan Dario Amodei Ilya Sutskever et al. 2019. Language models are unsupervised multitask learners. OpenAI blog Vol. 1 8 (2019) 9."},{"key":"e_1_3_2_2_37_1","doi-asserted-by":"publisher","DOI":"10.5555\/3455716.3455856"},{"key":"e_1_3_2_2_38_1","doi-asserted-by":"publisher","DOI":"10.1145\/3411763.3451760"},{"key":"e_1_3_2_2_39_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.emnlp-main.437"},{"key":"e_1_3_2_2_40_1","doi-asserted-by":"publisher","DOI":"10.1561\/1500000019"},{"key":"e_1_3_2_2_41_1","doi-asserted-by":"publisher","DOI":"10.1162\/tacl_a_00564"},{"key":"e_1_3_2_2_42_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.emnlp-main.346"},{"key":"e_1_3_2_2_43_1","volume-title":"Reflexion: Language Agents with Verbal Reinforcement Learning. arXiv preprint arXiv:2303.11366","author":"Shinn Noah","year":"2023","unstructured":"Noah Shinn, Federico Cassano, Beck Labash, Ashwin Gopinath, Karthik Narasimhan, and Shunyu Yao. 2023. Reflexion: Language Agents with Verbal Reinforcement Learning. arXiv preprint arXiv:2303.11366 (2023)."},{"key":"e_1_3_2_2_44_1","volume-title":"Deep Language Networks: Joint Prompt Training of Stacked LLMs using Variational Inference. arXiv preprint arXiv:2306.12509","author":"Sordoni Alessandro","year":"2023","unstructured":"Alessandro Sordoni, Xingdi Yuan, Marc-Alexandre C\u00f4t\u00e9 , Matheus Pereira, Adam Trischler, Ziang Xiao, Arian Hosseini, Friederike Niedtner, and Nicolas Le Roux. 2023. Deep Language Networks: Joint Prompt Training of Stacked LLMs using Variational Inference. arXiv preprint arXiv:2306.12509 (2023)."},{"key":"e_1_3_2_2_45_1","volume-title":"2023 a. BeamSearchQA: Large Language Models are Strong Zero-Shot QA Solver. arXiv preprint arXiv:2305.14766","author":"Sun Hao","year":"2023","unstructured":"Hao Sun, Xiao Liu, Yeyun Gong, Yan Zhang, and Nan Duan. 2023 a. BeamSearchQA: Large Language Models are Strong Zero-Shot QA Solver. arXiv preprint arXiv:2305.14766 (2023)."},{"key":"e_1_3_2_2_46_1","volume-title":"2023 b. From Indeterminacy to Determinacy: Augmenting Logical Reasoning Capabilities with Large Language Models. arXiv preprint arXiv:2310.18659","author":"Sun Hongda","year":"2023","unstructured":"Hongda Sun, Weikai Xu, Wei Liu, Jian Luan, Bin Wang, Shuo Shang, Ji-Rong Wen, and Rui Yan. 2023 b. From Indeterminacy to Determinacy: Augmenting Logical Reasoning Capabilities with Large Language Models. arXiv preprint arXiv:2310.18659 (2023)."},{"key":"e_1_3_2_2_47_1","volume-title":"Evaluating open question answering evaluation. arXiv preprint arXiv:2305.12421","author":"Wang Cunxiang","year":"2023","unstructured":"Cunxiang Wang, Sirui Cheng, Zhikun Xu, Bowen Ding, Yidong Wang, and Yue Zhang. 2023. Evaluating open question answering evaluation. arXiv preprint arXiv:2305.12421 (2023)."},{"key":"e_1_3_2_2_48_1","volume-title":"Large language models are reasoners with self-verification. arXiv preprint arXiv:2212.09561","author":"Weng Yixuan","year":"2022","unstructured":"Yixuan Weng, Minjun Zhu, Shizhu He, Kang Liu, and Jun Zhao. 2022. Large language models are reasoners with self-verification. arXiv preprint arXiv:2212.09561 (2022)."},{"key":"e_1_3_2_2_49_1","volume-title":"International Conference for Learning Representation (ICLR).","author":"Yu Wenhao","year":"2023","unstructured":"Wenhao Yu, Dan Iter, Shuohang Wang, Yichong Xu, Mingxuan Ju, Soumya Sanyal, Chenguang Zhu, Michael Zeng, and Meng Jiang. 2023. Generate rather than retrieve: Large language models are strong context generators. In International Conference for Learning Representation (ICLR)."},{"key":"e_1_3_2_2_50_1","first-page":"27263","article-title":"Bartscore: Evaluating generated text as text generation","volume":"34","author":"Yuan Weizhe","year":"2021","unstructured":"Weizhe Yuan, Graham Neubig, and Pengfei Liu. 2021. Bartscore: Evaluating generated text as text generation. Advances in Neural Information Processing Systems , Vol. 34 (2021), 27263--27277.","journal-title":"Advances in Neural Information Processing Systems"},{"key":"e_1_3_2_2_51_1","volume-title":"Automatic evaluation of attribution by large language models. arXiv preprint arXiv:2305.06311","author":"Yue Xiang","year":"2023","unstructured":"Xiang Yue, Boshi Wang, Kai Zhang, Ziru Chen, Yu Su, and Huan Sun. 2023. Automatic evaluation of attribution by large language models. arXiv preprint arXiv:2305.06311 (2023)."},{"key":"e_1_3_2_2_52_1","doi-asserted-by":"crossref","unstructured":"Ce Zhou Qian Li Chen Li Jun Yu Yixin Liu Guangjing Wang Kai Zhang Cheng Ji Qiben Yan Lifang He et al. 2023. A comprehensive survey on pretrained foundation models: A history from bert to chatgpt. arXiv preprint arXiv:2302.09419 (2023).","DOI":"10.1007\/s13042-024-02443-6"},{"key":"e_1_3_2_2_53_1","volume-title":"Ziwen Han, Keiran Paster, Silviu Pitis, Harris Chan, and Jimmy Ba.","author":"Zhou Yongchao","year":"2022","unstructured":"Yongchao Zhou, Andrei Ioan Muresanu, Ziwen Han, Keiran Paster, Silviu Pitis, Harris Chan, and Jimmy Ba. 2022. Large language models are human-level prompt engineers. arXiv preprint arXiv:2211.01910 (2022)."},{"key":"e_1_3_2_2_54_1","volume-title":"Retrieving and reading: A comprehensive survey on open-domain question answering. arXiv preprint arXiv:2101.00774","author":"Zhu Fengbin","year":"2021","unstructured":"Fengbin Zhu, Wenqiang Lei, Chao Wang, Jianming Zheng, Soujanya Poria, and Tat-Seng Chua. 2021. Retrieving and reading: A comprehensive survey on open-domain question answering. arXiv preprint arXiv:2101.00774 (2021). io"}],"event":{"name":"WWW '24: The ACM Web Conference 2024","sponsor":["SIGWEB ACM Special Interest Group on Hypertext, Hypermedia, and Web"],"location":"Singapore Singapore","acronym":"WWW '24"},"container-title":["Proceedings of the ACM Web Conference 2024"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3589334.3645670","content-type":"unspecified","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3589334.3645670","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,8,22]],"date-time":"2025-08-22T00:27:38Z","timestamp":1755822458000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3589334.3645670"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,5,13]]},"references-count":54,"alternative-id":["10.1145\/3589334.3645670","10.1145\/3589334"],"URL":"https:\/\/doi.org\/10.1145\/3589334.3645670","relation":{},"subject":[],"published":{"date-parts":[[2024,5,13]]},"assertion":[{"value":"2024-05-13","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}