{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,14]],"date-time":"2026-04-14T07:09:28Z","timestamp":1776150568332,"version":"3.50.1"},"publisher-location":"New York, NY, USA","reference-count":34,"publisher":"ACM","license":[{"start":{"date-parts":[[2023,11,25]],"date-time":"2023-11-25T00:00:00Z","timestamp":1700870400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.acm.org\/publications\/policies\/copyright_policy#Background"}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2023,11,27]]},"DOI":"10.1145\/3604237.3626866","type":"proceedings-article","created":{"date-parts":[[2023,11,25]],"date-time":"2023-11-25T18:09:47Z","timestamp":1700935787000},"page":"349-356","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":137,"title":["Enhancing Financial Sentiment Analysis via Retrieval Augmented Large Language Models"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0000-0001-8596-878X","authenticated-orcid":false,"given":"Boyu","family":"Zhang","sequence":"first","affiliation":[{"name":"The University of Adelaide, AU"}]},{"ORCID":"https:\/\/orcid.org\/0009-0000-8168-5264","authenticated-orcid":false,"given":"Hongyang","family":"Yang","sequence":"additional","affiliation":[{"name":"Columbia University, US"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-4978-2491","authenticated-orcid":false,"given":"Tianyu","family":"Zhou","sequence":"additional","affiliation":[{"name":"Brown University, US"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-9696-3626","authenticated-orcid":false,"given":"Muhammad","family":"Ali Babar","sequence":"additional","affiliation":[{"name":"The University of Adelaide, AU"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-9532-1709","authenticated-orcid":false,"given":"Xiao-Yang","family":"Liu","sequence":"additional","affiliation":[{"name":"Columbia University, US"}]}],"member":"320","published-online":{"date-parts":[[2023,11,25]]},"reference":[{"key":"e_1_3_2_1_1_1","unstructured":"Dogu Araci. 2019. FinBERT: Financial sentiment analysis with pre-trained language models. In arXiv preprint arXiv:1908.10063."},{"key":"e_1_3_2_1_2_1","volume-title":"Language models are few-shot learners. Advances in neural information processing systems 33","author":"Brown Tom","year":"2020","unstructured":"Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared\u00a0D Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, 2020. Language models are few-shot learners. Advances in neural information processing systems 33 (2020), 1877\u20131901."},{"key":"e_1_3_2_1_3_1","doi-asserted-by":"publisher","DOI":"10.1145\/3477495.3532682"},{"key":"e_1_3_2_1_4_1","volume-title":"Vicuna: An Open-Source Chatbot Impressing GPT-4 with 90%* ChatGPT Quality.","author":"Chiang Wei-Lin","year":"2023","unstructured":"Wei-Lin Chiang, Zhuohan Li, Zi Lin, Ying Sheng, Zhanghao Wu, Hao Zhang, Lianmin Zheng, Siyuan Zhuang, Yonghao Zhuang, Joseph\u00a0E. Gonzalez, Ion Stoica, and Eric\u00a0P. Xing. 2023. Vicuna: An Open-Source Chatbot Impressing GPT-4 with 90%* ChatGPT Quality."},{"key":"e_1_3_2_1_5_1","doi-asserted-by":"publisher","DOI":"10.1109\/ASONAM.2016.7752381"},{"key":"e_1_3_2_1_6_1","unstructured":"Gartner Glossary. 2023. Definition of Sentiment Analysis - Finance Glossary - Gartner."},{"key":"e_1_3_2_1_7_1","first-page":"9459","article-title":"Retrieval-augmented generation for knowledge-intensive nlp tasks","volume":"33","author":"Lewis Patrick","year":"2020","unstructured":"Patrick Lewis, Ethan Perez, Aleksandra Piktus, Fabio Petroni, Vladimir Karpukhin, Naman Goyal, Heinrich K\u00fcttler, Mike Lewis, Wen-tau Yih, Tim Rockt\u00e4schel, 2020. Retrieval-augmented generation for knowledge-intensive nlp tasks. Advances in Neural Information Processing Systems 33 (2020), 9459\u20139474.","journal-title":"Advances in Neural Information Processing Systems"},{"key":"e_1_3_2_1_8_1","volume-title":"Retrieval-augmented generation for code summarization via hybrid gnn. arXiv preprint arXiv:2006.05405","author":"Liu Shangqing","year":"2020","unstructured":"Shangqing Liu, Yu Chen, Xiaofei Xie, Jingkai Siow, and Yang Liu. 2020. Retrieval-augmented generation for code summarization via hybrid gnn. arXiv preprint arXiv:2006.05405 (2020)."},{"key":"e_1_3_2_1_9_1","volume-title":"Fixing weight decay regularization in adam. arXiv preprint arXiv:1711.05101","author":"Loshchilov Ilya","year":"2017","unstructured":"Ilya Loshchilov and Frank Hutter. 2017. Fixing weight decay regularization in adam. arXiv preprint arXiv:1711.05101 (2017)."},{"key":"e_1_3_2_1_10_1","volume-title":"Is prompt all you need? no. A comprehensive and broader view of instruction learning. arXiv preprint arXiv:2303.10475","author":"Lou Renze","year":"2023","unstructured":"Renze Lou, Kai Zhang, and Wenpeng Yin. 2023. Is prompt all you need? no. A comprehensive and broader view of instruction learning. arXiv preprint arXiv:2303.10475 (2023)."},{"key":"e_1_3_2_1_11_1","unstructured":"Neural Magic. 2022. Twitter Financial News Sentiment. http:\/\/precog.iiitd.edu.in\/people\/anupama."},{"key":"e_1_3_2_1_12_1","volume-title":"International World Wide Web Conferences Steering Committee","author":"Maia Macedo","year":"2018","unstructured":"Macedo Maia, Siegfried Handschuh, Andre Freitas, Brian Davis, Ross McDermott, Manel Zarrouk, and Alexandra. Balahur. 2018. WWW \u201918: Companion Proceedings of the The Web Conference 2018. In International World Wide Web Conferences Steering Committee (Lyon, France). Republic and Canton of Geneva, CHE."},{"key":"e_1_3_2_1_13_1","doi-asserted-by":"publisher","DOI":"10.1002\/asi.23062"},{"key":"e_1_3_2_1_14_1","volume-title":"Generation-augmented retrieval for open-domain question answering. arXiv preprint arXiv:2009.08553","author":"Mao Yuning","year":"2020","unstructured":"Yuning Mao, Pengcheng He, Xiaodong Liu, Yelong Shen, Jianfeng Gao, Jiawei Han, and Weizhu Chen. 2020. Generation-augmented retrieval for open-domain question answering. arXiv preprint arXiv:2009.08553 (2020)."},{"key":"e_1_3_2_1_15_1","first-page":"27730","article-title":"Training language models to follow instructions with human feedback","volume":"35","author":"Ouyang Long","year":"2022","unstructured":"Long Ouyang, Jeffrey Wu, Xu Jiang, Diogo Almeida, Carroll Wainwright, Pamela Mishkin, Chong Zhang, Sandhini Agarwal, Katarina Slama, Alex Ray, 2022. Training language models to follow instructions with human feedback. Advances in Neural Information Processing Systems 35 (2022), 27730\u201327744.","journal-title":"Advances in Neural Information Processing Systems"},{"key":"e_1_3_2_1_16_1","volume-title":"Retrieval augmented code generation and summarization. arXiv preprint arXiv:2108.11601","author":"Parvez Md\u00a0Rizwan","year":"2021","unstructured":"Md\u00a0Rizwan Parvez, Wasi\u00a0Uddin Ahmad, Saikat Chakraborty, Baishakhi Ray, and Kai-Wei Chang. 2021. Retrieval augmented code generation and summarization. arXiv preprint arXiv:2108.11601 (2021)."},{"key":"e_1_3_2_1_17_1","volume-title":"DeepSpeed: System Optimizations Enable Training Deep Learning Models with Over 100 Billion Parameters","author":"Rasley Jeff","unstructured":"Jeff Rasley, Samyam Rajbhandari, Olatunji Ruwase, and Yuxiong He. 2020. DeepSpeed: System Optimizations Enable Training Deep Learning Models with Over 100 Billion Parameters. In Association for Computing Machinery (Virtual Event, CA, USA) (KDD \u201920). New York, NY, USA, 3505\u20133506."},{"key":"e_1_3_2_1_18_1","volume-title":"Workshop on Mining Data for Financial Applications. Springer, 77\u201391","author":"Rawte Vipula","year":"2020","unstructured":"Vipula Rawte, Aparna Gupta, and Mohammed\u00a0J Zaki. 2020. A comparative analysis of temporal long text similarity: Application to financial documents. In Workshop on Mining Data for Financial Applications. Springer, 77\u201391."},{"key":"e_1_3_2_1_19_1","volume-title":"Multitask prompted training enables zero-shot task generalization. arXiv preprint arXiv:2110.08207","author":"Sanh Victor","year":"2021","unstructured":"Victor Sanh, Albert Webson, Colin Raffel, Stephen\u00a0H Bach, Lintang Sutawika, Zaid Alyafeai, Antoine Chaffin, Arnaud Stiegler, Teven\u00a0Le Scao, Arun Raja, 2021. Multitask prompted training enables zero-shot task generalization. arXiv preprint arXiv:2110.08207 (2021)."},{"key":"e_1_3_2_1_20_1","volume-title":"Neural machine translation of rare words with subword units. arXiv preprint arXiv:1508.07909","author":"Sennrich Rico","year":"2015","unstructured":"Rico Sennrich, Barry Haddow, and Alexandra Birch. 2015. Neural machine translation of rare words with subword units. arXiv preprint arXiv:1508.07909 (2015)."},{"key":"e_1_3_2_1_21_1","doi-asserted-by":"publisher","DOI":"10.1186\/s40537-017-0111-6"},{"key":"e_1_3_2_1_22_1","volume-title":"Stanford Alpaca: An Instruction-following LLaMA model. https:\/\/github.com\/tatsu-lab\/stanford_alpaca.","author":"Taori Rohan","year":"2023","unstructured":"Rohan Taori, Ishaan Gulrajani, Tianyi Zhang, Yann Dubois, Xuechen Li, Carlos Guestrin, Percy Liang, and Tatsunori\u00a0B. Hashimoto. 2023. Stanford Alpaca: An Instruction-following LLaMA model. https:\/\/github.com\/tatsu-lab\/stanford_alpaca."},{"key":"e_1_3_2_1_23_1","volume-title":"Lamda: Language models for dialog applications. arXiv preprint arXiv:2201.08239","author":"Thoppilan Romal","year":"2022","unstructured":"Romal Thoppilan, Daniel De\u00a0Freitas, Jamie Hall, Noam Shazeer, Apoorv Kulshreshtha, Heng-Tze Cheng, Alicia Jin, Taylor Bos, Leslie Baker, Yu Du, 2022. Lamda: Language models for dialog applications. arXiv preprint arXiv:2201.08239 (2022)."},{"key":"e_1_3_2_1_24_1","volume-title":"Llama: Open and efficient foundation language models. arXiv preprint arXiv:2302.13971","author":"Touvron Hugo","year":"2023","unstructured":"Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timoth\u00e9e Lacroix, Baptiste Rozi\u00e8re, Naman Goyal, Eric Hambro, Faisal Azhar, 2023. Llama: Open and efficient foundation language models. arXiv preprint arXiv:2302.13971 (2023)."},{"key":"e_1_3_2_1_25_1","doi-asserted-by":"crossref","unstructured":"M.K. Vijaymeena1 and K. Kavitha. 2016. A Survey on Similarity Measures in Text Mining. Machine Learning and Applications (2016).","DOI":"10.5121\/mlaij.2016.3103"},{"key":"e_1_3_2_1_26_1","volume-title":"Self-instruct: Aligning language model with self generated instructions. arXiv preprint arXiv:2212.10560","author":"Wang Yizhong","year":"2022","unstructured":"Yizhong Wang, Yeganeh Kordi, Swaroop Mishra, Alisa Liu, Noah\u00a0A Smith, Daniel Khashabi, and Hannaneh Hajishirzi. 2022. Self-instruct: Aligning language model with self generated instructions. arXiv preprint arXiv:2212.10560 (2022)."},{"key":"e_1_3_2_1_27_1","volume-title":"International Conference on Learning Representations.","author":"Wei Jason","year":"2022","unstructured":"Jason Wei, Maarten Bosma, Vincent Zhao, Kelvin Guu, Adams\u00a0Wei Yu, Brian Lester, Nan Du, Andrew\u00a0M Dai, and Quoc\u00a0V Le. 2022. Finetuned Language Models are Zero-Shot Learners. In International Conference on Learning Representations."},{"key":"e_1_3_2_1_28_1","volume-title":"Generalization of backpropagation with application to a recurrent gas market model. Neural networks 1, 4","author":"Werbos J","year":"1988","unstructured":"Paul\u00a0J Werbos. 1988. Generalization of backpropagation with application to a recurrent gas market model. Neural networks 1, 4 (1988), 339\u2013356."},{"key":"e_1_3_2_1_29_1","volume-title":"BloombergGPT: A large language model for finance. arXiv preprint arXiv:2303.17564","author":"Wu Shijie","year":"2023","unstructured":"Shijie Wu, Ozan Irsoy, Steven Lu, Vadim Dabravolski, Mark Dredze, Sebastian Gehrmann, Prabhanjan Kambadur, David Rosenberg, and Gideon Mann. 2023. BloombergGPT: A large language model for finance. arXiv preprint arXiv:2303.17564 (2023)."},{"key":"e_1_3_2_1_30_1","volume-title":"FinGPT: Open-Source Financial Large Language Models. arXiv preprint arXiv:2306.06031","author":"Yang Hongyang","year":"2023","unstructured":"Hongyang Yang, Xiao-Yang Liu, and Christina\u00a0Dan Wang. 2023. FinGPT: Open-Source Financial Large Language Models. arXiv preprint arXiv:2306.06031 (2023)."},{"key":"e_1_3_2_1_31_1","volume-title":"Mark Christopher\u00a0Siy Uy, and Allen Huang","author":"Yang Yi","year":"2020","unstructured":"Yi Yang, Mark Christopher\u00a0Siy Uy, and Allen Huang. 2020. Finbert: A pretrained language model for financial communications. arXiv preprint arXiv:2006.08097 (2020)."},{"key":"e_1_3_2_1_32_1","volume-title":"Glm-130b: An open bilingual pre-trained model. arXiv preprint arXiv:2210.02414","author":"Zeng Aohan","year":"2022","unstructured":"Aohan Zeng, Xiao Liu, Zhengxiao Du, Zihan Wang, Hanyu Lai, Ming Ding, Zhuoyi Yang, Yifan Xu, Wendi Zheng, Xiao Xia, 2022. Glm-130b: An open bilingual pre-trained model. arXiv preprint arXiv:2210.02414 (2022)."},{"key":"e_1_3_2_1_33_1","volume-title":"Instruct-FinGPT: Financial Sentiment Analysis by Instruction Tuning of General-Purpose Large Language Models. arXiv preprint arXiv:2306.12659","author":"Zhang Boyu","year":"2023","unstructured":"Boyu Zhang, Hongyang Yang, and Xiao-Yang Liu. 2023. Instruct-FinGPT: Financial Sentiment Analysis by Instruction Tuning of General-Purpose Large Language Models. arXiv preprint arXiv:2306.12659 (2023)."},{"key":"e_1_3_2_1_34_1","unstructured":"Wayne\u00a0Xin Zhao Kun Zhou Junyi Li Tianyi Tang Xiaolei Wang Yupeng Hou Yingqian Min Beichen Zhang Junjie Zhang Zican Dong Yifan Du Chen Yang Yushuo Chen Zhipeng Chen Jinhao Jiang Ruiyang Ren Yifan Li Xinyu Tang Zikang Liu Peiyu Liu Jian-Yun Nie and Ji-Rong Wen. 2023. A Survey of Large Language Models. arxiv:2303.18223\u00a0[cs.CL]"}],"event":{"name":"ICAIF '23: 4th ACM International Conference on AI in Finance","location":"Brooklyn NY USA","acronym":"ICAIF '23"},"container-title":["4th ACM International Conference on AI in Finance"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3604237.3626866","content-type":"unspecified","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3604237.3626866","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,8,22]],"date-time":"2025-08-22T17:38:46Z","timestamp":1755884326000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3604237.3626866"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,11,25]]},"references-count":34,"alternative-id":["10.1145\/3604237.3626866","10.1145\/3604237"],"URL":"https:\/\/doi.org\/10.1145\/3604237.3626866","relation":{},"subject":[],"published":{"date-parts":[[2023,11,25]]},"assertion":[{"value":"2023-11-25","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}