{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,12,11]],"date-time":"2025-12-11T21:07:13Z","timestamp":1765487233073,"version":"build-2065373602"},"publisher-location":"New York, NY, USA","reference-count":23,"publisher":"ACM","license":[{"start":{"date-parts":[[2025,5,8]],"date-time":"2025-05-08T00:00:00Z","timestamp":1746662400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.acm.org\/publications\/policies\/copyright_policy#Background"}],"funder":[{"name":"The University of Manchester"},{"DOI":"10.13039\/501100006374","name":"New Energy and Industrial Technology Development Organization","doi-asserted-by":"publisher","award":["JPNP20006"],"award-info":[{"award-number":["JPNP20006"]}],"id":[{"id":"10.13039\/501100006374","id-type":"DOI","asserted-by":"publisher"}]},{"name":"Manchester-Melbourne-Toronto Research Funding"},{"name":"The National Recovery and Resilience Plan Greece 2.0 funded by the European Union under the NextGeneration EU Program","award":["MIS 5154714"],"award-info":[{"award-number":["MIS 5154714"]}]}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2025,5,8]]},"DOI":"10.1145\/3701716.3715599","type":"proceedings-article","created":{"date-parts":[[2025,5,23]],"date-time":"2025-05-23T16:20:01Z","timestamp":1748017201000},"page":"1153-1157","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":1,"title":["FMDLlama: Financial Misinformation Detection Based on Large Language Models"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-7015-5054","authenticated-orcid":false,"given":"Zhiwei","family":"Liu","sequence":"first","affiliation":[{"name":"The University of Manchester, Manchester, United Kingdom"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-0237-9539","authenticated-orcid":false,"given":"Xin","family":"Zhang","sequence":"additional","affiliation":[{"name":"The University of Manchester, Manchester, United Kingdom"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-3142-2516","authenticated-orcid":false,"given":"Kailai","family":"Yang","sequence":"additional","affiliation":[{"name":"The University of Manchester, Manchester, United Kingdom"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-9588-7454","authenticated-orcid":false,"given":"Qianqian","family":"Xie","sequence":"additional","affiliation":[{"name":"The Fin AI, Singapore, Singapore"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-3501-3907","authenticated-orcid":false,"given":"Jimin","family":"Huang","sequence":"additional","affiliation":[{"name":"The Fin AI, Singapore, Singapore"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-4097-9191","authenticated-orcid":false,"given":"Sophia","family":"Ananiadou","sequence":"additional","affiliation":[{"name":"The University of Manchester, Manchester, United Kingdom and Archimedes RC, Athena, Greece"}]}],"member":"320","published-online":{"date-parts":[[2025,5,23]]},"reference":[{"key":"e_1_3_2_1_1_1","volume-title":"Diogo Almeida, Janko Altenschmidt, Sam Altman, Shyamal Anadkat, et al.","author":"Achiam Josh","year":"2023","unstructured":"Josh Achiam, Steven Adler, Sandhini Agarwal, Lama Ahmad, Ilge Akkaya, Florencia Leoni Aleman, Diogo Almeida, Janko Altenschmidt, Sam Altman, Shyamal Anadkat, et al. 2023. Gpt-4 technical report. arXiv preprint arXiv:2303.08774 (2023)."},{"key":"e_1_3_2_1_2_1","doi-asserted-by":"publisher","DOI":"10.1007\/s10796-022-10327-9"},{"key":"e_1_3_2_1_3_1","volume-title":"Bert: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805","author":"Devlin Jacob","year":"2018","unstructured":"Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2018. Bert: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805 (2018)."},{"key":"e_1_3_2_1_4_1","unstructured":"Abhimanyu Dubey Abhinav Jauhri Abhinav Pandey Abhishek Kadian Ahmad Al-Dahle Aiesha Letman Akhil Mathur Alan Schelten Amy Yang Angela Fan et al. 2024. The llama 3 herd of models. arXiv preprint arXiv:2407.21783 (2024)."},{"key":"e_1_3_2_1_5_1","volume-title":"Diego de las Casas, Florian Bressand, Gianna Lengyel, Guillaume Lample, Lucile Saulnier, et al.","author":"Jiang Albert Q","year":"2023","unstructured":"Albert Q Jiang, Alexandre Sablayrolles, Arthur Mensch, Chris Bamford, Devendra Singh Chaplot, Diego de las Casas, Florian Bressand, Gianna Lengyel, Guillaume Lample, Lucile Saulnier, et al. 2023. Mistral 7B. arXiv preprint arXiv:2310.06825 (2023)."},{"key":"e_1_3_2_1_6_1","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-45170-6_67"},{"key":"e_1_3_2_1_7_1","unstructured":"Shimon Kogan Tobias J Moskowitz and Marina Niessner. 2020. Fake news in financial markets. SSRN."},{"key":"e_1_3_2_1_8_1","volume-title":"Rouge: A package for automatic evaluation of summaries. In Text summarization branches out. 74--81.","author":"Lin Chin-Yew","year":"2004","unstructured":"Chin-Yew Lin. 2004. Rouge: A package for automatic evaluation of summaries. In Text summarization branches out. 74--81."},{"key":"e_1_3_2_1_9_1","volume-title":"Roberta: A robustly optimized bert pretraining approach. arXiv preprint arXiv:1907.11692","author":"Liu Yinhan","year":"2019","unstructured":"Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, and Veselin Stoyanov. 2019. Roberta: A robustly optimized bert pretraining approach. arXiv preprint arXiv:1907.11692 (2019)."},{"key":"e_1_3_2_1_10_1","volume-title":"ConspEmoLLM: Conspiracy Theory Detection Using an Emotion-Based Large Language Model. arXiv preprint arXiv:2403.06765","author":"Liu Zhiwei","year":"2024","unstructured":"Zhiwei Liu, Boyang Liu, Paul Thompson, Kailai Yang, Raghav Jain, and Sophia Ananiadou. 2024a. ConspEmoLLM: Conspiracy Theory Detection Using an Emotion-Based Large Language Model. arXiv preprint arXiv:2403.06765 (2024)."},{"key":"e_1_3_2_1_11_1","doi-asserted-by":"publisher","DOI":"10.1145\/3637528.3671552"},{"key":"e_1_3_2_1_12_1","volume-title":"Decoupled weight decay regularization. arXiv preprint arXiv:1711.05101","author":"Loshchilov Ilya","year":"2017","unstructured":"Ilya Loshchilov and Frank Hutter. 2017. Decoupled weight decay regularization. arXiv preprint arXiv:1711.05101 (2017)."},{"key":"e_1_3_2_1_13_1","doi-asserted-by":"publisher","DOI":"10.1109\/COMSNETS56262.2023.10041329"},{"key":"e_1_3_2_1_14_1","unstructured":"Aman Rangapur Haoran Wang and Kai Shu. 2023a. Fin-Fact: A Benchmark Dataset for Multimodal Financial Fact Checking and Explanation Generation. arxiv: 2309.08793 [cs.AI]"},{"key":"e_1_3_2_1_15_1","volume-title":"Investigating online financial misinformation and its consequences: A computational perspective. arXiv preprint arXiv:2309.12363","author":"Rangapur Aman","year":"2023","unstructured":"Aman Rangapur, Haoran Wang, and Kai Shu. 2023b. Investigating online financial misinformation and its consequences: A computational perspective. arXiv preprint arXiv:2309.12363 (2023)."},{"key":"e_1_3_2_1_16_1","doi-asserted-by":"publisher","DOI":"10.1145\/3394486.3406703"},{"key":"e_1_3_2_1_17_1","volume-title":"When flue meets flang: Benchmarks and large pre-trained language model for financial domain. arXiv preprint arXiv:2211.00083","author":"Shah Raj Sanjay","year":"2022","unstructured":"Raj Sanjay Shah, Kunal Chawla, Dheeraj Eidnani, Agam Shah, Wendi Du, Sudheer Chava, Natraj Raman, Charese Smiley, Jiaao Chen, and Diyi Yang. 2022. When flue meets flang: Benchmarks and large pre-trained language model for financial domain. arXiv preprint arXiv:2211.00083 (2022)."},{"key":"e_1_3_2_1_18_1","volume-title":"Juliette Love, et al.","author":"Team Gemma","year":"2024","unstructured":"Gemma Team, Thomas Mesnard, Cassidy Hardin, Robert Dadashi, Surya Bhupatiraju, Shreya Pathak, Laurent Sifre, Morgane Rivi\u00e8re, Mihir Sanjay Kale, Juliette Love, et al. 2024. Gemma: Open models based on gemini research and technology. arXiv preprint arXiv:2403.08295 (2024)."},{"key":"e_1_3_2_1_19_1","unstructured":"Hugo Touvron Louis Martin Kevin Stone Peter Albert Amjad Almahairi Yasmine Babaei Nikolay Bashlykov Soumya Batra Prajjwal Bhargava Shruti Bhosale et al. 2023. Llama 2: Open foundation and fine-tuned chat models. arXiv preprint arXiv:2307.09288 (2023)."},{"key":"e_1_3_2_1_20_1","volume-title":"PIXIU: A Large Language Model, Instruction Data and Evaluation Benchmark for Finance. arXiv preprint arXiv:2306.05443","author":"Xie Qianqian","year":"2023","unstructured":"Qianqian Xie, Weiguang Han, Xiao Zhang, Yanzhao Lai, Min Peng, Alejandro Lopez-Lira, and Jimin Huang. 2023. PIXIU: A Large Language Model, Instruction Data and Evaluation Benchmark for Finance. arXiv preprint arXiv:2306.05443 (2023)."},{"key":"e_1_3_2_1_21_1","volume-title":"Mentalllama: Interpretable mental health analysis on social media with large language models. arXiv preprint arXiv:2309.13567","author":"Yang Kailai","year":"2023","unstructured":"Kailai Yang, Tianlin Zhang, Ziyan Kuang, Qianqian Xie, and Sophia Ananiadou. 2023. Mentalllama: Interpretable mental health analysis on social media with large language models. arXiv preprint arXiv:2309.13567 (2023)."},{"key":"e_1_3_2_1_22_1","volume-title":"Bertscore: Evaluating text generation with bert. arXiv preprint arXiv:1904.09675","author":"Zhang Tianyi","year":"2019","unstructured":"Tianyi Zhang, Varsha Kishore, Felix Wu, Kilian Q Weinberger, and Yoav Artzi. 2019. Bertscore: Evaluating text generation with bert. arXiv preprint arXiv:1904.09675 (2019)."},{"key":"e_1_3_2_1_23_1","volume-title":"Building emotional support chatbots in the era of llms. arXiv preprint arXiv:2308.11584","author":"Zheng Zhonghua","year":"2023","unstructured":"Zhonghua Zheng, Lizi Liao, Yang Deng, and Liqiang Nie. 2023. Building emotional support chatbots in the era of llms. arXiv preprint arXiv:2308.11584 (2023)."}],"event":{"name":"WWW '25: The ACM Web Conference 2025","sponsor":["SIGWEB ACM Special Interest Group on Hypertext, Hypermedia, and Web"],"location":"Sydney NSW Australia","acronym":"WWW '25"},"container-title":["Companion Proceedings of the ACM on Web Conference 2025"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3701716.3715599","content-type":"unspecified","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3701716.3715599","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,10,8]],"date-time":"2025-10-08T03:09:32Z","timestamp":1759892972000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3701716.3715599"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,5,8]]},"references-count":23,"alternative-id":["10.1145\/3701716.3715599","10.1145\/3701716"],"URL":"https:\/\/doi.org\/10.1145\/3701716.3715599","relation":{},"subject":[],"published":{"date-parts":[[2025,5,8]]},"assertion":[{"value":"2025-05-23","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}