{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,10]],"date-time":"2026-04-10T10:05:38Z","timestamp":1775815538655,"version":"3.50.1"},"publisher-location":"New York, NY, USA","reference-count":62,"publisher":"ACM","license":[{"start":{"date-parts":[[2025,4,22]],"date-time":"2025-04-22T00:00:00Z","timestamp":1745280000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.acm.org\/publications\/policies\/copyright_policy#Background"}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2025,4,28]]},"DOI":"10.1145\/3696410.3714532","type":"proceedings-article","created":{"date-parts":[[2025,4,22]],"date-time":"2025-04-22T22:57:28Z","timestamp":1745362648000},"page":"5364-5375","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":6,"title":["From Predictions to Analyses: Rationale-Augmented Fake News Detection with Large Vision-Language Models"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0009-0009-4128-682X","authenticated-orcid":false,"given":"Xiaofan","family":"Zheng","sequence":"first","affiliation":[{"name":"Xi'an Jiaotong University, Xi'an, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0000-7755-3621","authenticated-orcid":false,"given":"Zinan","family":"Zeng","sequence":"additional","affiliation":[{"name":"Xi'an Jiaotong University, Xi'an, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0009-8686-4566","authenticated-orcid":false,"given":"Heng","family":"Wang","sequence":"additional","affiliation":[{"name":"Xi'an Jiaotong University, Xi'an, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-6261-4583","authenticated-orcid":false,"given":"Yuyang","family":"Bai","sequence":"additional","affiliation":[{"name":"Xi'an Jiaotong University, Xi'an, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0001-9029-5812","authenticated-orcid":false,"given":"Yuhan","family":"Liu","sequence":"additional","affiliation":[{"name":"Xi'an Jiaotong University, Xi'an, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-0140-7860","authenticated-orcid":false,"given":"Minnan","family":"Luo","sequence":"additional","affiliation":[{"name":"Xi'an Jiaotong University, Xi'an, China"}]}],"member":"320","published-online":{"date-parts":[[2025,4,22]]},"reference":[{"key":"e_1_3_2_1_1_1","doi-asserted-by":"publisher","DOI":"10.1145\/3653325nolinkurl10.1145"},{"key":"e_1_3_2_1_2_1","doi-asserted-by":"publisher","DOI":"10.1145\/3132847.3132973"},{"key":"e_1_3_2_1_3_1","volume-title":"Cross-modal Ambiguity Learning for Multimodal Fake News Detection. In The ACM Web Conference. 2897--2905","author":"Yixuan","unstructured":"Yixuan Chen et al. 2022. Cross-modal Ambiguity Learning for Multimodal Fake News Detection. In The ACM Web Conference. 2897--2905."},{"key":"e_1_3_2_1_4_1","volume-title":"InternVL: Scaling up Vision Foundation Models and Aligning for Generic Visual-Linguistic Tasks. arXiv preprint arXiv:2312.14238","author":"Chen Zhe","year":"2023","unstructured":"Zhe Chen, Jiannan Wu, Wenhai Wang, Weijie Su, Guo Chen, Sen Xing, Muyan Zhong, Qinglong Zhang, Xizhou Zhu, Lewei Lu, Bin Li, Ping Luo, Tong Lu, Yu Qiao, and Jifeng Dai. 2023. InternVL: Scaling up Vision Foundation Models and Aligning for Generic Visual-Linguistic Tasks. arXiv preprint arXiv:2312.14238 (2023)."},{"key":"e_1_3_2_1_5_1","unstructured":"Tsun-Hin Cheung and Kin-Man Lam. 2023. FactLLaMA: Optimizing Instruction-Following Language Models with External Knowledge for Automated Fact-Checking. arxiv: 2309.00240 [cs.CL] https:\/\/arxiv.org\/abs\/2309.00240"},{"key":"e_1_3_2_1_6_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1"},{"key":"e_1_3_2_1_7_1","volume-title":"An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale. arXiv preprint arXiv:2010.11929","author":"Dosovitskiy Alexey","year":"2021","unstructured":"Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, and Neil Houlsby. 2021. An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale. arXiv preprint arXiv:2010.11929 (2021). https:\/\/arxiv.org\/pdf\/2010.11929"},{"key":"e_1_3_2_1_8_1","unstructured":"Yilun Du Shuang Li Antonio Torralba Joshua B. Tenenbaum and Igor Mordatch. 2023. Improving Factuality and Reasoning in Language Models through Multiagent Debate. arxiv: 2305.14325 [cs.CL] https:\/\/arxiv.org\/abs\/2305.14325"},{"key":"e_1_3_2_1_9_1","volume-title":"Good Advisor: Exploring the Role of Large Language Models in Fake News Detection. Proceedings of the AAAI Conference on Artificial Intelligence","volume":"38","author":"Hu Beizhe","year":"2024","unstructured":"Beizhe Hu, Qiang Sheng, Juan Cao, Yuhui Shi, Yang Li, Danding Wang, and Peng Qi. 2024. Bad Actor, Good Advisor: Exploring the Role of Large Language Models in Fake News Detection. Proceedings of the AAAI Conference on Artificial Intelligence, Vol. 38, 20 (2024), 22105--22113."},{"key":"e_1_3_2_1_10_1","unstructured":"Beizhe Hu Qiang Sheng Juan Cao Yongchun Zhu Danding Wang Zhengjia Wang and Zhiwei Jin. 2023b. Learn over Past Evolve for Future: Forecasting Temporal Trends for Fake News Detection. arxiv: 2306.14728 [cs.CL] https:\/\/arxiv.org\/abs\/2306.14728"},{"key":"e_1_3_2_1_11_1","doi-asserted-by":"publisher","DOI":"10.1016\/j.aiopen.2022.09.001nolinkurl10.1016"},{"key":"e_1_3_2_1_12_1","doi-asserted-by":"publisher","DOI":"10.1016\/j.aiopen.2022.09.001nolinkurl10.1016"},{"key":"e_1_3_2_1_13_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.acl-long.62"},{"key":"e_1_3_2_1_14_1","doi-asserted-by":"publisher","DOI":"10.1145\/3539618.3591896"},{"key":"e_1_3_2_1_15_1","unstructured":"Kush Juvekar and Anupam Purwar. 2024. COS-Mix: Cosine Similarity and Distance Fusion for Improved Information Retrieval. arxiv: 2406.00638 [cs.IR] https:\/\/arxiv.org\/abs\/2406.00638"},{"key":"e_1_3_2_1_16_1","volume-title":"Proceedings of the 27th International Conference on Computational Linguistics, Emily M. Bender, Leon Derczynski, and Pierre Isabelle (Eds.). Association for Computational Linguistics","author":"Kochkina Elena","year":"2018","unstructured":"Elena Kochkina, Maria Liakata, and Arkaitz Zubiaga. 2018. All-in-one: Multi-task Learning for Rumour Verification. In Proceedings of the 27th International Conference on Computational Linguistics, Emily M. Bender, Leon Derczynski, and Pierre Isabelle (Eds.). Association for Computational Linguistics, Santa Fe, New Mexico, USA, 3402--3413. https:\/\/aclanthology.org\/C18--1288"},{"key":"e_1_3_2_1_17_1","unstructured":"Bo Li Kaichen Zhang Hao Zhang Dong Guo Renrui Zhang Feng Li Yuanhan Zhang Ziwei Liu and Chunyuan Li. 2024c. LLaVA-NeXT: Stronger LLMs Supercharge Multimodal Capabilities in the Wild. https:\/\/llava-vl.github.io\/blog\/2024-05--10-llava-next-stronger-llms\/"},{"key":"e_1_3_2_1_18_1","volume-title":"Blip-2: Bootstrapping language-image pre-training with frozen image encoders and large language models. arXiv preprint arXiv:2301.12597","author":"Li Junnan","year":"2023","unstructured":"Junnan Li, Dongxu Li, Silvio Savarese, and Steven Hoi. 2023. Blip-2: Bootstrapping language-image pre-training with frozen image encoders and large language models. arXiv preprint arXiv:2301.12597 (2023)."},{"key":"e_1_3_2_1_19_1","unstructured":"Zhaowei Li Wei Wang YiQing Cai Xu Qi Pengyu Wang Dong Zhang Hang Song Botian Jiang Zhida Huang and Tao Wang. 2024a. UnifiedMLLM: Enabling Unified Representation for Multi-modal Multi-tasks With Large Language Model. arxiv: 2408.02503 [cs.CL] https:\/\/arxiv.org\/abs\/2408.02503"},{"key":"e_1_3_2_1_20_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1"},{"key":"e_1_3_2_1_21_1","volume-title":"Towards Explainable Harmful Meme Detection through Multimodal Debate between Large Language Models. arXiv preprint arXiv:2401.13298","author":"Lin Hongzhan","year":"2024","unstructured":"Hongzhan Lin, Ziyang Luo, Wei Gao, Jing Ma, Bo Wang, and Ruichao Yang. 2024. Towards Explainable Harmful Meme Detection through Multimodal Debate between Large Language Models. arXiv preprint arXiv:2401.13298 (2024)."},{"key":"e_1_3_2_1_22_1","doi-asserted-by":"publisher","DOI":"10.1145\/3560815nolinkurl10.1145"},{"key":"e_1_3_2_1_23_1","volume-title":"RoBERTa: A Robustly Optimized BERT Pretraining Approach. arXiv preprint arXiv:1907.11692","author":"Liu Yinhan","year":"2019","unstructured":"Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, and Veselin Stoyanov. 2019. RoBERTa: A Robustly Optimized BERT Pretraining Approach. arXiv preprint arXiv:1907.11692 (2019). https:\/\/arxiv.org\/pdf\/1907.11692"},{"key":"e_1_3_2_1_24_1","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v32i1.11268"},{"key":"e_1_3_2_1_25_1","unstructured":"Zhiwei Liu Kailai Yang Qianqian Xie Christine de Kock Sophia Ananiadou and Eduard Hovy. 2024. AEmoLLM: Retrieval Augmented LLMs for Cross-Domain Misinformation Detection Using In-Context Learning based on Emotional Information. arxiv: 2406.11093 [cs.CL] https:\/\/arxiv.org\/abs\/2406.11093"},{"key":"e_1_3_2_1_26_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.acl-main.48"},{"key":"e_1_3_2_1_27_1","unstructured":"Xing Han L\u00f9. 2024. BM25S: Orders of magnitude faster lexical search via eager sparse scoring. arxiv: 2407.03618 [cs.IR] https:\/\/arxiv.org\/abs\/2407.03618"},{"key":"e_1_3_2_1_28_1","unstructured":"Yida Mu Xingyi Song Kalina Bontcheva and Nikolaos Aletras. 2024. Examining the Limitations of Computational Rumor Detection Models Trained on Static Datasets. arxiv: 2309.11576 [cs.CL] https:\/\/arxiv.org\/abs\/2309.11576"},{"key":"e_1_3_2_1_29_1","volume-title":"Let Silence Speak: Enhancing Fake News Detection with Generated Comments from Large Language Models. CoRR","author":"Nan Qiong","year":"2024","unstructured":"Qiong Nan, Qiang Sheng, Juan Cao, Beizhe Hu, Danding Wang, and Jintao Li. 2024. Let Silence Speak: Enhancing Fake News Detection with Generated Comments from Large Language Models. CoRR, Vol. abs\/2405.16631 (2024). http:\/\/dblp.uni-trier.de\/db\/journals\/corr\/corr2405.html#abs-2405--16631"},{"key":"e_1_3_2_1_31_1","doi-asserted-by":"publisher","DOI":"10.1145\/3474085.3481548"},{"key":"e_1_3_2_1_32_1","volume-title":"Hierarchical Multi-modal Contextual Attention Network for Fake News Detection. In The International ACM SIGIR Conference on Research and Development in Information Retrieval, Virtual Event. 153--162","author":"Qian Shengsheng","year":"2021","unstructured":"Shengsheng Qian, Jinguang Wang, Jun Hu, Quan Fang, and Changsheng Xu. 2021. Hierarchical Multi-modal Contextual Attention Network for Fake News Detection. In The International ACM SIGIR Conference on Research and Development in Information Retrieval, Virtual Event. 153--162."},{"key":"e_1_3_2_1_33_1","unstructured":"Chengwei Qin Aston Zhang Zhuosheng Zhang Jiaao Chen Michihiro Yasunaga and Diyi Yang. 2023. Is ChatGPT a General-Purpose Natural Language Processing Task Solver? arxiv: 2302.06476 [cs.CL] https:\/\/arxiv.org\/abs\/2302.06476"},{"key":"e_1_3_2_1_34_1","unstructured":"Yoel Roth. 2022. The vast majority of content we take action on for misinformation is identified proactively. https:\/\/twitter.com\/yoyoel\/status\/1483094057471524867. Accessed: 2023-08--13."},{"key":"e_1_3_2_1_35_1","doi-asserted-by":"publisher","DOI":"10.1145\/3137597.3137600nolinkurl10.1145"},{"key":"e_1_3_2_1_36_1","doi-asserted-by":"publisher","DOI":"10.1145\/3412841.3442043"},{"key":"e_1_3_2_1_37_1","unstructured":"Jinyan Su Claire Cardie and Preslav Nakov. 2024. Adapting Fake News Detection to the Era of Large Language Models. arxiv: 2311.04917 [cs.CL] https:\/\/arxiv.org\/abs\/2311.04917"},{"key":"e_1_3_2_1_38_1","unstructured":"Sahar Tahmasebi Eric M\u00fcller-Budack and Ralph Ewerth. 2024. Multimodal Misinformation Detection using Large Vision-Language Models. arxiv: 2407.14321 [cs.CL] https:\/\/arxiv.org\/abs\/2407.14321"},{"key":"e_1_3_2_1_39_1","doi-asserted-by":"publisher","DOI":"10.3961\/jpmph.20.094"},{"key":"e_1_3_2_1_40_1","volume-title":"International Conference on Machine Learning","volume":"139","author":"Touvron Hugo","year":"2021","unstructured":"Hugo Touvron, Matthieu Cord, Matthijs Douze, Francisco Massa, Alexandre Sablayrolles, and Herve Jegou. 2021. Training data-efficient image transformers distillation through attention. In International Conference on Machine Learning, Vol. 139. 10347--10357."},{"key":"e_1_3_2_1_41_1","doi-asserted-by":"publisher","DOI":"10.1126\/science.aap9559nolinkurl10.1126"},{"key":"e_1_3_2_1_42_1","volume-title":"DELL: Generating Reactions and Explanations for LLM-Based Misinformation Detection. arXiv preprint arXiv:2402.10426","author":"Wan Herun","year":"2024","unstructured":"Herun Wan, Shangbin Feng, Zhaoxuan Tan, Heng Wang, Yulia Tsvetkov, and Minnan Luo. 2024. DELL: Generating Reactions and Explanations for LLM-Based Misinformation Detection. arXiv preprint arXiv:2402.10426 (2024)."},{"key":"e_1_3_2_1_43_1","doi-asserted-by":"publisher","DOI":"10.1145\/3589334.3645471"},{"key":"e_1_3_2_1_44_1","unstructured":"Jiaqi Wang Hanqi Jiang Yiheng Liu Chong Ma Xu Zhang Yi Pan Mengyuan Liu Peiran Gu Sichen Xia Wenjun Li Yutong Zhang Zihao Wu Zhengliang Liu Tianyang Zhong Bao Ge Tuo Zhang Ning Qiang Xintao Hu Xi Jiang Xin Zhang Wei Zhang Dinggang Shen Tianming Liu and Shu Zhang. 2024a. A Comprehensive Review of Multimodal Large Language Models: Performance and Challenges Across Different Tasks. arxiv: 2408.01319 [cs.AI] https:\/\/arxiv.org\/abs\/2408.01319"},{"key":"e_1_3_2_1_45_1","doi-asserted-by":"publisher","DOI":"10.1145\/3581783.3613850"},{"key":"e_1_3_2_1_46_1","unstructured":"Peiyi Wang Lei Li Liang Chen Zefan Cai Dawei Zhu Binghuai Lin Yunbo Cao Qi Liu Tianyu Liu and Zhifang Sui. 2023a. Large Language Models are not Fair Evaluators. arxiv: 2305.17926 [cs.CL] https:\/\/arxiv.org\/abs\/2305.17926"},{"key":"e_1_3_2_1_47_1","volume-title":"Aakanksha Chowdhery, and Denny Zhou.","author":"Wang Xuezhi","year":"2023","unstructured":"Xuezhi Wang, Jason Wei, Dale Schuurmans, Quoc Le, Ed Chi, Sharan Narang, Aakanksha Chowdhery, and Denny Zhou. 2023b. Self-Consistency Improves Chain of Thought Reasoning in Language Models. arxiv: 2203.11171 [cs.CL] https:\/\/arxiv.org\/abs\/2203.11171"},{"key":"e_1_3_2_1_48_1","doi-asserted-by":"publisher","DOI":"10.1145\/3372278.3390713"},{"key":"e_1_3_2_1_49_1","volume-title":"Advances in Neural Information Processing Systems","volume":"35","author":"Wei Jason","year":"2022","unstructured":"Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Brian Ichter, Fei Xia, Ed Chi, Quoc V Le, and Denny Zhou. 2022. Chain-of-Thought Prompting Elicits Reasoning in Large Language Models. In Advances in Neural Information Processing Systems, Vol. 35. Curran Associates, Inc., 24824--24837. https:\/\/proceedings.neurips.cc\/paper_files\/paper\/2022\/file\/9d5609613524ecf4f15af0f7b31abca4-Paper-Conference.pdf"},{"key":"e_1_3_2_1_50_1","unstructured":"Yang Wu Pengwei Zhan Yunjian Zhang LiMing Wang and Zhen Xu. 2021. Multimodal Fusion with Co-Attention Networks for Fake News Detection. In Findings of the Association for Computational Linguistics. 2560--2569."},{"key":"e_1_3_2_1_51_1","unstructured":"Junhao Xu Longdi Xian Zening Liu Mingliang Chen Qiuyang Yin and Fenghua Song. 2024. The Future of Combating Rumors? Retrieval Discrimination and Generation. arxiv: 2403.20204 [cs.AI] https:\/\/arxiv.org\/abs\/2403.20204"},{"key":"e_1_3_2_1_52_1","volume-title":"The Earth is Flat because...: Investigating LLMs' Belief towards Misinformation via Persuasive Conversation. arXiv preprint arXiv:2312.09085","author":"Xu Rongwu","year":"2023","unstructured":"Rongwu Xu, Brian S Lin, Shujian Yang, Tianqi Zhang, Weiyan Shi, Tianwei Zhang, Zhixuan Fang, Wei Xu, and Han Qiu. 2023. The Earth is Flat because...: Investigating LLMs' Belief towards Misinformation via Persuasive Conversation. arXiv preprint arXiv:2312.09085 (2023)."},{"key":"e_1_3_2_1_53_1","volume-title":"LEMMA: Towards LVLM-Enhanced Multimodal Misinformation Detection with External Knowledge Augmentation. arXiv preprint arXiv:2402.11943","author":"Xuan Keyang","year":"2024","unstructured":"Keyang Xuan, Li Yi, Fan Yang, Ruochen Wu, Yi R Fung, and Heng Ji. 2024. LEMMA: Towards LVLM-Enhanced Multimodal Misinformation Detection with External Knowledge Augmentation. arXiv preprint arXiv:2402.11943 (2024)."},{"key":"e_1_3_2_1_54_1","doi-asserted-by":"publisher","DOI":"10.1109\/INES49302.2020.9147195nolinkurl10.1109"},{"key":"e_1_3_2_1_55_1","unstructured":"Duzhen Zhang Yahan Yu Jiahua Dong Chenxing Li Dan Su Chenhui Chu and Dong Yu. 2024. MM-LLMs: Recent Advances in MultiModal Large Language Models. arxiv: 2401.13601 [cs.CL] https:\/\/arxiv.org\/abs\/2401.13601"},{"key":"e_1_3_2_1_56_1","volume-title":"Proceedings of the International Conference on Learning Representations (ICLR'25)","author":"Zhang Hangtao","year":"2025","unstructured":"Hangtao Zhang, Chenyu Zhu, Xianlong Wang, Ziqi Zhou, Changgan Yin, Minghui Li, Lulu Xue, Yichen Wang, Shengshan Hu, Aishan Liu, et al. 2025. BadRobot: Manipulating embodied LLMs in the physical world. In Proceedings of the International Conference on Learning Representations (ICLR'25)."},{"key":"e_1_3_2_1_57_1","unstructured":"Xuan Zhang and Wei Gao. 2023. Towards LLM-based Fact Verification on News Claims with a Hierarchical Step-by-Step Prompting Method. arxiv: 2310.00305 [cs.CL] https:\/\/arxiv.org\/abs\/2310.00305"},{"key":"e_1_3_2_1_58_1","volume-title":"A Survey of Large Language Models. arXiv preprint arXiv:2303.18223","author":"Zhao Wayne Xin","year":"2023","unstructured":"Wayne Xin Zhao, Kun Zhou, Junyi Li, Tianyi Tang, Xiaolei Wang, Yupeng Hou, Yingqian Min, Beichen Zhang, Junjie Zhang, Zican Dong, Yifan Du, Chen Yang, Yushuo Chen, Zhipeng Chen, Jinhao Jiang, Ruiyang Ren, Yifan Li, Xinyu Tang, Zikang Liu, Peiyu Liu, Jian-Yun Nie, and Ji-Rong Wen. 2023. A Survey of Large Language Models. arXiv preprint arXiv:2303.18223 (2023). https:\/\/arxiv.org\/pdf\/2303.18223"},{"key":"e_1_3_2_1_59_1","volume-title":"Proceedings of the Thirty-First International Joint Conference on Artificial Intelligence. 2413--2419","author":"Jiaqi","unstructured":"Jiaqi Zheng et al. 2022. MFAN: Multi-modal Feature-enhanced Attention Networks for Rumor Detection. In Proceedings of the Thirty-First International Joint Conference on Artificial Intelligence. 2413--2419."},{"key":"e_1_3_2_1_60_1","unstructured":"Lianmin Zheng Wei-Lin Chiang Ying Sheng Siyuan Zhuang Zhanghao Wu Yonghao Zhuang Zi Lin Zhuohan Li Dacheng Li Eric P. Xing Hao Zhang Joseph E. Gonzalez and Ion Stoica. 2023. Judging LLM-as-a-Judge with MT-Bench and Chatbot Arena. arxiv: 2306.05685 [cs.CL] https:\/\/arxiv.org\/abs\/2306.05685"},{"key":"e_1_3_2_1_61_1","volume-title":"Proceedings of the 31st International Conference on Computational Linguistics, Owen Rambow, Leo Wanner, Marianna Apidianaki, Hend Al-Khalifa","author":"Zheng Xiaofan","year":"2025","unstructured":"Xiaofan Zheng, Minnan Luo, and Xinghao Wang. 2025. Unveiling Fake News with Adversarial Arguments Generated by Multimodal Large Language Models. In Proceedings of the 31st International Conference on Computational Linguistics, Owen Rambow, Leo Wanner, Marianna Apidianaki, Hend Al-Khalifa, Barbara Di Eugenio, and Steven Schockaert (Eds.). Association for Computational Linguistics, Abu Dhabi, UAE, 7862--7869. https:\/\/aclanthology.org\/2025.coling-main.526\/"},{"key":"e_1_3_2_1_62_1","doi-asserted-by":"publisher","DOI":"10.3390\/electronics10050593nolinkurl10.3390"},{"key":"e_1_3_2_1_63_1","doi-asserted-by":"publisher","DOI":"10.1145\/3477495.3531816"}],"event":{"name":"WWW '25: The ACM Web Conference 2025","location":"Sydney NSW Australia","acronym":"WWW '25","sponsor":["SIGWEB ACM Special Interest Group on Hypertext, Hypermedia, and Web"]},"container-title":["Proceedings of the ACM on Web Conference 2025"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3696410.3714532","content-type":"unspecified","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3696410.3714532","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,6,19]],"date-time":"2025-06-19T01:18:33Z","timestamp":1750295913000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3696410.3714532"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,4,22]]},"references-count":62,"alternative-id":["10.1145\/3696410.3714532","10.1145\/3696410"],"URL":"https:\/\/doi.org\/10.1145\/3696410.3714532","relation":{},"subject":[],"published":{"date-parts":[[2025,4,22]]},"assertion":[{"value":"2025-04-22","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}