{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,13]],"date-time":"2026-02-13T23:27:08Z","timestamp":1771025228806,"version":"3.50.1"},"publisher-location":"New York, NY, USA","reference-count":45,"publisher":"ACM","license":[{"start":{"date-parts":[[2024,7,10]],"date-time":"2024-07-10T00:00:00Z","timestamp":1720569600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.acm.org\/publications\/policies\/copyright_policy#Background"}],"funder":[{"name":"Shanghai Municipal Science and Technology Major Project","award":["2021SHZDZX0102"],"award-info":[{"award-number":["2021SHZDZX0102"]}]},{"DOI":"10.13039\/501100006374","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62322603, 62076161"],"award-info":[{"award-number":["62322603, 62076161"]}],"id":[{"id":"10.13039\/501100006374","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2024,7,10]]},"DOI":"10.1145\/3626772.3657788","type":"proceedings-article","created":{"date-parts":[[2024,7,11]],"date-time":"2024-07-11T12:40:05Z","timestamp":1720701605000},"page":"3-13","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":5,"title":["TRAD: Enhancing LLM Agents with Step-Wise Thought Retrieval and Aligned Decision"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0009-0005-9881-3249","authenticated-orcid":false,"given":"Ruiwen","family":"Zhou","sequence":"first","affiliation":[{"name":"Shanghai Jiao Tong University, Shanghai, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-0494-7440","authenticated-orcid":false,"given":"Yingxuan","family":"Yang","sequence":"additional","affiliation":[{"name":"Shanghai Jiao Tong University, Shanghai, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0000-7868-1262","authenticated-orcid":false,"given":"Muning","family":"Wen","sequence":"additional","affiliation":[{"name":"Shanghai Jiao Tong University, Shanghai, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-1247-2382","authenticated-orcid":false,"given":"Ying","family":"Wen","sequence":"additional","affiliation":[{"name":"Shanghai Jiao Tong University, Shanghai, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-3199-2618","authenticated-orcid":false,"given":"Wenhao","family":"Wang","sequence":"additional","affiliation":[{"name":"China Pacific Insurance, Shanghai, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0005-9776-5513","authenticated-orcid":false,"given":"Chunling","family":"Xi","sequence":"additional","affiliation":[{"name":"China Pacific Insurance, Shanghai, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0005-7711-8247","authenticated-orcid":false,"given":"Guoqiang","family":"Xu","sequence":"additional","affiliation":[{"name":"China Pacific Insurance, Shanghai, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-0281-8271","authenticated-orcid":false,"given":"Yong","family":"Yu","sequence":"additional","affiliation":[{"name":"Shanghai Jiao Tong University, Shanghai, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-0127-2425","authenticated-orcid":false,"given":"Weinan","family":"Zhang","sequence":"additional","affiliation":[{"name":"Shanghai Jiao Tong University, Shanghai, China"}]}],"member":"320","published-online":{"date-parts":[[2024,7,11]]},"reference":[{"key":"e_1_3_2_1_1_1","unstructured":"Constructions Aeronautiques Adele Howe Craig Knoblock ISI Drew McDermott Ashwin Ram Manuela Veloso Daniel Weld David Wilkins SRI Anthony Barrett Dave Christianson et al. 1998. Pddl| the planning domain definition language. Technical Report (1998)."},{"key":"e_1_3_2_1_2_1","volume-title":"Graph of Thoughts: Solving Elaborate Problems with Large Language Models. arXiv preprint arXiv:2308.09687","author":"Besta Maciej","year":"2023","unstructured":"Maciej Besta, Nils Blach, Ales Kubicek, Robert Gerstenberger, Lukas Gianinazzi, Joanna Gajda, Tomasz Lehmann, Michal Podstawski, Hubert Niewiadomski, Piotr Nyczyk, and Torsten Hoefler. 2023. Graph of Thoughts: Solving Elaborate Problems with Large Language Models. arXiv preprint arXiv:2308.09687 (2023)."},{"key":"e_1_3_2_1_3_1","doi-asserted-by":"publisher","DOI":"10.5555\/3495724.3495883"},{"key":"e_1_3_2_1_4_1","volume-title":"Proceedings of the 37th Advances in Neural Information Processing Systems (NeurIPS).","author":"Deng Xiang","year":"2023","unstructured":"Xiang Deng, Yu Gu, Boyuan Zheng, Shijie Chen, Samuel Stevens, Boshi Wang, Huan Sun, and Yu Su. 2023. Mind2Web: Towards a Generalist Agent for the Web. In Proceedings of the 37th Advances in Neural Information Processing Systems (NeurIPS)."},{"key":"e_1_3_2_1_5_1","volume-title":"Bert: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805","author":"Devlin Jacob","year":"2018","unstructured":"Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2018. Bert: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805 (2018)."},{"key":"e_1_3_2_1_6_1","volume-title":"Everything of thoughts: Defying the law of penrose triangle for thought generation. arXiv preprint arXiv:2311.04254","author":"Ding Ruomeng","year":"2023","unstructured":"Ruomeng Ding, Chaoyun Zhang, Lu Wang, Yong Xu, Minghua Ma, Wei Zhang, Si Qin, Saravan Rajmohan, Qingwei Lin, and Dongmei Zhang. 2023. Everything of thoughts: Defying the law of penrose triangle for thought generation. arXiv preprint arXiv:2311.04254 (2023)."},{"key":"e_1_3_2_1_7_1","volume-title":"Proceedings of The 12th International Conference on Learning Representations (ICLR).","author":"Gur Izzeddin","year":"2024","unstructured":"Izzeddin Gur, Hiroki Furuta, Austin Huang, Mustafa Safdari, Yutaka Matsuo, Douglas Eck, and Aleksandra Faust. 2024. A Real-World WebAgent with Planning, Long Context Understanding, and Program Synthesis. In Proceedings of The 12th International Conference on Learning Representations (ICLR)."},{"key":"e_1_3_2_1_8_1","volume-title":"Aakanksha Chowdhery, Sharan Narang, Noah Fiedel, and Aleksandra Faust.","author":"Gur Izzeddin","year":"2023","unstructured":"Izzeddin Gur, Ofir Nachum, Yingjie Miao, Mustafa Safdari, Austin Huang, Aakanksha Chowdhery, Sharan Narang, Noah Fiedel, and Aleksandra Faust. 2023. Understanding HTML with Large Language Models. In Findings of the Association for Computational Linguistics (EMNLP). 2803--2821."},{"key":"e_1_3_2_1_9_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.emnlp-main.507"},{"key":"e_1_3_2_1_10_1","volume-title":"Proceedings of the 8th International Conference on Learning Representations (ICLR).","author":"Holtzman Ari","year":"2020","unstructured":"Ari Holtzman, Jan Buys, Li Du, Maxwell Forbes, and Yejin Choi. 2020. The Curious Case of Neural Text Degeneration. In Proceedings of the 8th International Conference on Learning Representations (ICLR)."},{"key":"e_1_3_2_1_11_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.emnlp-main.550"},{"key":"e_1_3_2_1_12_1","volume-title":"Proceedings of the 37th Advances in Neural Information Processing Systems (NeurIPS).","author":"Kim Geunwoo","year":"2023","unstructured":"Geunwoo Kim, Pierre Baldi, and Stephen McAleer. 2023. Language Models can Solve Computer Tasks. In Proceedings of the 37th Advances in Neural Information Processing Systems (NeurIPS)."},{"key":"e_1_3_2_1_13_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA48891.2023.10160591"},{"key":"e_1_3_2_1_14_1","volume-title":"LLMP: Empowering large language models with optimal planning proficiency. arXiv preprint arXiv:2304.11477","author":"Liu Bo","year":"2023","unstructured":"Bo Liu, Yuqian Jiang, Xiaohan Zhang, Qiang Liu, Shiqi Zhang, Joydeep Biswas, and Peter Stone. 2023. LLMP: Empowering large language models with optimal planning proficiency. arXiv preprint arXiv:2304.11477 (2023)."},{"key":"e_1_3_2_1_15_1","volume-title":"What Makes Good In-Context Examples for GPT-3? arXiv preprint arXiv:2101.06804","author":"Liu Jiachang","year":"2021","unstructured":"Jiachang Liu, Dinghan Shen, Yizhe Zhang, Bill Dolan, Lawrence Carin, and Weizhu Chen. 2021. What Makes Good In-Context Examples for GPT-3? arXiv preprint arXiv:2101.06804 (2021)."},{"key":"e_1_3_2_1_16_1","volume-title":"Webgpt: Browser-assisted question-answering with human feedback. arXiv preprint arXiv:2112.09332","author":"Nakano Reiichiro","year":"2021","unstructured":"Reiichiro Nakano, Jacob Hilton, Suchir Balaji, Jeff Wu, Long Ouyang, Christina Kim, Christopher Hesse, Shantanu Jain, Vineet Kosaraju, William Saunders, et al. 2021. Webgpt: Browser-assisted question-answering with human feedback. arXiv preprint arXiv:2112.09332 (2021)."},{"key":"e_1_3_2_1_17_1","unstructured":"OpenAI. 2023. GPT-4 Technical Report. arXiv preprint arXiv:2303.08774 (2023)."},{"key":"e_1_3_2_1_18_1","doi-asserted-by":"publisher","DOI":"10.5555\/3600270.3602281"},{"key":"e_1_3_2_1_19_1","doi-asserted-by":"publisher","DOI":"10.1145\/3586183.3606763"},{"key":"e_1_3_2_1_20_1","volume-title":"Language models are unsupervised multitask learners. OpenAI Blog","author":"Radford Alec","year":"2019","unstructured":"Alec Radford, Jeffrey Wu, Rewon Child, David Luan, Dario Amodei, and Ilya Sutskever. 2019. Language models are unsupervised multitask learners. OpenAI Blog (2019)."},{"key":"e_1_3_2_1_21_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/D19-1410"},{"key":"e_1_3_2_1_22_1","volume-title":"Yossi Adi, Jingyu Liu, Tal Remez, J\u00e9r\u00e9my Rapin, et al.","author":"Roziere Baptiste","year":"2023","unstructured":"Baptiste Roziere, Jonas Gehring, Fabian Gloeckle, Sten Sootla, Itai Gat, Xiaoqing Ellen Tan, Yossi Adi, Jingyu Liu, Tal Remez, J\u00e9r\u00e9my Rapin, et al. 2023. Code llama: Open foundation models for code. arXiv preprint arXiv:2308.12950 (2023)."},{"key":"e_1_3_2_1_23_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.naacl-main.191"},{"key":"e_1_3_2_1_24_1","volume-title":"Maria Lomeli, Luke Zettlemoyer, Nicola Cancedda, and Thomas Scialom.","author":"Schick Timo","year":"2023","unstructured":"Timo Schick, Jane Dwivedi-Yu, Roberto Dess`i, Roberta Raileanu, Maria Lomeli, Luke Zettlemoyer, Nicola Cancedda, and Thomas Scialom. 2023. Toolformer: Language models can teach themselves to use tools. In Proceedings of the 37th Advances in Neural Information Processing Systems (NeurIPS)."},{"key":"e_1_3_2_1_25_1","volume-title":"Proceedings of the 34th International Conference on Machine Learning (ICML)","volume":"70","author":"Shi Tianlin","year":"2017","unstructured":"Tianlin Shi, Andrej Karpathy, Linxi Fan, Jonathan Hernandez, and Percy Liang. 2017. World of Bits: An Open-Domain Platform for Web-Based Agents. In Proceedings of the 34th International Conference on Machine Learning (ICML), Vol. 70. 3135--3144."},{"key":"e_1_3_2_1_26_1","volume-title":"Proceedings of the 37th Advances in Neural Information Processing Systems (NeurIPS).","author":"Shinn Noah","year":"2023","unstructured":"Noah Shinn, Federico Cassano, Ashwin Gopinath, Karthik R Narasimhan, and Shunyu Yao. 2023. Reflexion: Language agents with verbal reinforcement learning. In Proceedings of the 37th Advances in Neural Information Processing Systems (NeurIPS)."},{"key":"e_1_3_2_1_27_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.01075"},{"key":"e_1_3_2_1_28_1","volume":"202","author":"Shridhar Mohit","unstructured":"Mohit Shridhar, Xingdi Yuan, Marc-Alexandre C\u00f4 t\u00e9 , Yonatan Bisk, Adam Trischler, and Matthew J. Hausknecht. 2021. ALFWorld: Aligning Text and Embodied Environments for Interactive Learning. In Proceedings of 9th International Conference on Learning Representations (ICLR).","journal-title":"Matthew J. Hausknecht."},{"key":"e_1_3_2_1_29_1","unstructured":"The LongChat Team. 2023. How Long Can Open-Source LLMs Truly Promise on Context Length? https:\/\/lmsys.org\/blog\/2023-06--29-longchat\/"},{"key":"e_1_3_2_1_30_1","volume-title":"LLaMA: Open and Efficient Foundation Language Models. arXiv preprint arXiv:2302.13971","author":"Touvron Hugo","year":"2023","unstructured":"Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timoth\u00e9e Lacroix, Baptiste Rozi\u00e8re, Naman Goyal, Eric Hambro, Faisal Azhar, Aurelien Rodriguez, Armand Joulin, Edouard Grave, and Guillaume Lample. 2023. LLaMA: Open and Efficient Foundation Language Models. arXiv preprint arXiv:2302.13971 (2023)."},{"key":"e_1_3_2_1_31_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.acl-long.557"},{"key":"e_1_3_2_1_32_1","volume-title":"2023 d. Voyager: An open-ended embodied agent with large language models. arXiv preprint arXiv:2305.16291","author":"Wang Guanzhi","year":"2023","unstructured":"Guanzhi Wang, Yuqi Xie, Yunfan Jiang, Ajay Mandlekar, Chaowei Xiao, Yuke Zhu, Linxi Fan, and Anima Anandkumar. 2023 d. Voyager: An open-ended embodied agent with large language models. arXiv preprint arXiv:2305.16291 (2023)."},{"key":"e_1_3_2_1_33_1","volume-title":"2023 b. A survey on large language model based autonomous agents. arXiv preprint arXiv:2308.11432","author":"Wang Lei","year":"2023","unstructured":"Lei Wang, Chen Ma, Xueyang Feng, Zeyu Zhang, Hao Yang, Jingsen Zhang, Zhiyuan Chen, Jiakai Tang, Xu Chen, Yankai Lin, et al. 2023 b. A survey on large language model based autonomous agents. arXiv preprint arXiv:2308.11432 (2023)."},{"key":"e_1_3_2_1_34_1","volume-title":"Self-Consistency Improves Chain of Thought Reasoning in Language Models. In The 11th International Conference on Learning Representations, (ICLR).","author":"Wang Xuezhi","year":"2023","unstructured":"Xuezhi Wang, Jason Wei, Dale Schuurmans, Quoc V. Le, Ed H. Chi, Sharan Narang, Aakanksha Chowdhery, and Denny Zhou. 2023 c. Self-Consistency Improves Chain of Thought Reasoning in Language Models. In The 11th International Conference on Learning Representations, (ICLR)."},{"key":"e_1_3_2_1_35_1","volume-title":"Proceedings of the 37th Advances in Neural Information Processing Systems (NeurIPS).","author":"Wang Zihao","year":"2023","unstructured":"Zihao Wang, Shaofei Cai, Anji Liu, Xiaojian Ma, and Yitao Liang. 2023 a. Describe, explain, plan and select: Interactive planning with large language models enables open-world multi-task agents. In Proceedings of the 37th Advances in Neural Information Processing Systems (NeurIPS)."},{"key":"e_1_3_2_1_36_1","doi-asserted-by":"publisher","DOI":"10.5555\/3600270.3602070"},{"key":"e_1_3_2_1_37_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.acl-long.79"},{"key":"e_1_3_2_1_38_1","volume-title":"Proceedings of 36th Conference on Neural Information Processing Systems (NeurIPS).","author":"Yao Shunyu","year":"2022","unstructured":"Shunyu Yao, Howard Chen, John Yang, and Karthik Narasimhan. 2022. WebShop: Towards Scalable Real-World Web Interaction with Grounded Language Agents. In Proceedings of 36th Conference on Neural Information Processing Systems (NeurIPS)."},{"key":"e_1_3_2_1_39_1","volume-title":"Proceedings of 37th Conference on Neural Information Processing Systems (NeurIPS).","author":"Yao Shunyu","year":"2023","unstructured":"Shunyu Yao, Dian Yu, Jeffrey Zhao, Izhak Shafran, Thomas L. Griffiths, Yuan Cao, and Karthik Narasimhan. 2023 a. Tree of Thoughts: Deliberate Problem Solving with Large Language Models. In Proceedings of 37th Conference on Neural Information Processing Systems (NeurIPS)."},{"key":"e_1_3_2_1_40_1","volume-title":"Proceedings of The 11th International Conference on Learning Representations (ICLR).","author":"Yao Shunyu","year":"2023","unstructured":"Shunyu Yao, Jeffrey Zhao, Dian Yu, Nan Du, Izhak Shafran, Karthik R. Narasimhan, and Yuan Cao. 2023 b. ReAct: Synergizing Reasoning and Acting in Language Models. In Proceedings of The 11th International Conference on Learning Representations (ICLR)."},{"key":"e_1_3_2_1_41_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.emnlp-main.622"},{"key":"e_1_3_2_1_42_1","volume-title":"Proceedings of The 12th International Conference on Learning Representations (ICLR).","author":"Zheng Huaixiu Steven","year":"2024","unstructured":"Huaixiu Steven Zheng, Swaroop Mishra, Xinyun Chen, Heng-Tze Cheng, Ed H. Chi, Quoc V Le, and Denny Zhou. 2024 a. Step-Back Prompting Enables Reasoning Via Abstraction in Large Language Models. In Proceedings of The 12th International Conference on Learning Representations (ICLR)."},{"key":"e_1_3_2_1_43_1","volume-title":"Proceedings of 12th International Conference on Learning Representations (ICLR).","author":"Zheng Longtao","year":"2024","unstructured":"Longtao Zheng, Rundong Wang, Xinrun Wang, and Bo An. 2024 b. Synapse: Trajectory-as-Exemplar Prompting with Memory for Computer Control. In Proceedings of 12th International Conference on Learning Representations (ICLR)."},{"key":"e_1_3_2_1_44_1","volume-title":"Least-to-Most Prompting Enables Complex Reasoning in Large Language Models. In The 11th International Conference on Learning Representations (ICLR).","author":"Zhou Denny","unstructured":"Denny Zhou, Nathanael Sch\"a rli, Le Hou, Jason Wei, Nathan Scales, Xuezhi Wang, Dale Schuurmans, Claire Cui, Olivier Bousquet, Quoc V. Le, and Ed H. Chi. 2023. Least-to-Most Prompting Enables Complex Reasoning in Large Language Models. In The 11th International Conference on Learning Representations (ICLR)."},{"key":"e_1_3_2_1_45_1","volume-title":"Large language models for information retrieval: A survey. arXiv preprint arXiv:2308.07107","author":"Zhu Yutao","year":"2023","unstructured":"Yutao Zhu, Huaying Yuan, Shuting Wang, Jiongnan Liu, Wenhan Liu, Chenlong Deng, Zhicheng Dou, and Ji-Rong Wen. 2023. Large language models for information retrieval: A survey. arXiv preprint arXiv:2308.07107 (2023). io"}],"event":{"name":"SIGIR 2024: The 47th International ACM SIGIR Conference on Research and Development in Information Retrieval","location":"Washington DC USA","acronym":"SIGIR 2024","sponsor":["SIGIR ACM Special Interest Group on Information Retrieval"]},"container-title":["Proceedings of the 47th International ACM SIGIR Conference on Research and Development in Information Retrieval"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3626772.3657788","content-type":"unspecified","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3626772.3657788","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,8,22]],"date-time":"2025-08-22T05:41:47Z","timestamp":1755841307000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3626772.3657788"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,7,10]]},"references-count":45,"alternative-id":["10.1145\/3626772.3657788","10.1145\/3626772"],"URL":"https:\/\/doi.org\/10.1145\/3626772.3657788","relation":{},"subject":[],"published":{"date-parts":[[2024,7,10]]},"assertion":[{"value":"2024-07-11","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}