{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,12,12]],"date-time":"2025-12-12T02:11:53Z","timestamp":1765505513482,"version":"3.48.0"},"publisher-location":"New York, NY, USA","reference-count":43,"publisher":"ACM","content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2025,11,10]]},"DOI":"10.1145\/3746252.3760970","type":"proceedings-article","created":{"date-parts":[[2025,11,8]],"date-time":"2025-11-08T00:36:36Z","timestamp":1762562196000},"page":"5396-5400","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":0,"title":["Forecasting the Buzz: Enriching Hashtag Popularity Prediction with LLM Reasoning"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0009-0007-7171-0121","authenticated-orcid":false,"given":"Yifei","family":"Xu","sequence":"first","affiliation":[{"name":"National University of Singapore, Singapore, Singapore"}]},{"ORCID":"https:\/\/orcid.org\/0009-0001-3843-7115","authenticated-orcid":false,"given":"Jiaying","family":"Wu","sequence":"additional","affiliation":[{"name":"National University of Singapore, Singapore, Singapore"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-3294-3383","authenticated-orcid":false,"given":"Herun","family":"Wan","sequence":"additional","affiliation":[{"name":"Xi'an Jiaotong University, Xi'an, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0007-8480-9067","authenticated-orcid":false,"given":"Yang","family":"Li","sequence":"additional","affiliation":[{"name":"Institute of Medical Information, Chinese Academy of Medical Sciences, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-1465-3518","authenticated-orcid":false,"given":"Zhen","family":"Hou","sequence":"additional","affiliation":[{"name":"Institute of Medical Information, Chinese Academy of Medical Sciences, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-8507-3716","authenticated-orcid":false,"given":"Min-Yen","family":"Kan","sequence":"additional","affiliation":[{"name":"National University of Singapore, Singapore, Singapore"}]}],"member":"320","published-online":{"date-parts":[[2025,11,10]]},"reference":[{"key":"e_1_3_2_1_1_1","volume-title":"Advances in Neural Information Processing Systems","volume":"21","author":"Agarwal Deepak","year":"2008","unstructured":"Deepak Agarwal, Bee-Chung Chen, Pradheep Elango, Nitin Motgi, Seung-Taek Park, Raghu Ramakrishnan, Scott Roy, and Joe Zachariah. 2008. Online models for content optimization. Advances in Neural Information Processing Systems, Vol. 21 (2008)."},{"key":"e_1_3_2_1_2_1","volume-title":"Twitter hashtags from ad hoc to calculated publics. Hashtag publics: The power and politics of discursive networks","author":"Bruns Axel","year":"2015","unstructured":"Axel Bruns and Jean Burgess. 2015. Twitter hashtags from ad hoc to calculated publics. Hashtag publics: The power and politics of discursive networks (2015), 13-28."},{"key":"e_1_3_2_1_3_1","doi-asserted-by":"publisher","DOI":"10.1007\/s13278-023-01024-9"},{"key":"e_1_3_2_1_4_1","volume-title":"ClinicalBench: Can LLMs Beat Traditional ML Models in Clinical Prediction? arXiv preprint arXiv:2411.06469","author":"Chen Canyu","year":"2024","unstructured":"Canyu Chen, Jian Yu, Shan Chen, Che Liu, Zhongwei Wan, Danielle Bitterman, Fei Wang, and Kai Shu. 2024. ClinicalBench: Can LLMs Beat Traditional ML Models in Clinical Prediction? arXiv preprint arXiv:2411.06469 (2024)."},{"key":"e_1_3_2_1_5_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.acl-main.747"},{"key":"e_1_3_2_1_6_1","first-page":"4171","volume-title":"Proceedings of the 2019 conference of the North American chapter of the association for computational linguistics: human language technologies","volume":"1","author":"Devlin Jacob","year":"2019","unstructured":"Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. Bert: Pre-training of deep bidirectional transformers for language understanding. In Proceedings of the 2019 conference of the North American chapter of the association for computational linguistics: human language technologies, volume 1 (long and short papers). 4171-4186."},{"key":"e_1_3_2_1_7_1","doi-asserted-by":"publisher","DOI":"10.1145\/3343031.3356062"},{"key":"e_1_3_2_1_8_1","doi-asserted-by":"publisher","DOI":"10.1108\/YC-08-2022-1588"},{"key":"e_1_3_2_1_9_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2024.acl-long.196"},{"key":"e_1_3_2_1_10_1","doi-asserted-by":"crossref","unstructured":"Chen Gao Xiaochong Lan Zhihong Lu Jinzhu Mao Jinghua Piao Huandong Wang Depeng Jin and Yong Li. 2023. S3: Social-network Simulation System with Large Language Model-Empowered Agents. arXiv:2307.14984 [cs.SI]","DOI":"10.2139\/ssrn.4607026"},{"key":"e_1_3_2_1_11_1","doi-asserted-by":"publisher","DOI":"10.1016\/j.jjimei.2022.100116"},{"key":"e_1_3_2_1_12_1","volume-title":"The Twelfth International Conference on Learning Representations.","author":"He Xiaoxin","year":"2024","unstructured":"Xiaoxin He, Xavier Bresson, Thomas Laurent, Adam Perold, Yann LeCun, and Bryan Hooi. 2024. Harnessing explanations: Llm-to-lm interpreter for enhanced text-attributed graph representation learning. In The Twelfth International Conference on Learning Representations."},{"key":"e_1_3_2_1_13_1","doi-asserted-by":"publisher","DOI":"10.1145\/3664647.3689000"},{"key":"e_1_3_2_1_14_1","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v38i20.30214"},{"key":"e_1_3_2_1_15_1","doi-asserted-by":"publisher","DOI":"10.1145\/3703155"},{"key":"e_1_3_2_1_16_1","unstructured":"Aaron Hurst Adam Lerer Adam P Goucher Adam Perelman Aditya Ramesh Aidan Clark AJ Ostrow Akila Welihinda Alan Hayes Alec Radford et al. 2024. Gpt-4o system card. arXiv preprint arXiv:2410.21276 (2024)."},{"key":"e_1_3_2_1_17_1","volume-title":"Lightgbm: A highly efficient gradient boosting decision tree. Advances in neural information processing systems","author":"Ke Guolin","year":"2017","unstructured":"Guolin Ke, Qi Meng, Thomas Finley, Taifeng Wang, Wei Chen, Weidong Ma, Qiwei Ye, and Tie-Yan Liu. 2017. Lightgbm: A highly efficient gradient boosting decision tree. Advances in neural information processing systems, Vol. 30 (2017)."},{"key":"e_1_3_2_1_18_1","volume-title":"Maram Hasanain, Sahinur Rahman Laskar, Naeemul Hassan, and Firoj Alam.","author":"Kmainasi Mohamed Bayan","year":"2025","unstructured":"Mohamed Bayan Kmainasi, Ali Ezzat Shahroor, Maram Hasanain, Sahinur Rahman Laskar, Naeemul Hassan, and Firoj Alam. 2025. LlamaLens: Specialized Multilingual LLM for Analyzing News and Social Media Content. In Findings of the Association for Computational Linguistics: NAACL 2025. 5627-5649."},{"key":"e_1_3_2_1_19_1","doi-asserted-by":"publisher","DOI":"10.3389\/fsoc.2022.1081603"},{"key":"e_1_3_2_1_20_1","volume-title":"Mobilizing social media users to become advertisers: Corporate hashtag campaigns as a public health concern. Digital health","author":"Laestadius Linnea I","year":"2017","unstructured":"Linnea I Laestadius and Megan M Wahl. 2017. Mobilizing social media users to become advertisers: Corporate hashtag campaigns as a public health concern. Digital health, Vol. 3 (2017)."},{"key":"e_1_3_2_1_21_1","volume-title":"Matthew Wiener, et al","author":"Liaw Andy","year":"2002","unstructured":"Andy Liaw, Matthew Wiener, et al., 2002. Classification and regression by randomForest. R news, Vol. 2, 3 (2002), 18-22."},{"key":"e_1_3_2_1_22_1","unstructured":"Yinhan Liu Myle Ott Naman Goyal Jingfei Du Mandar Joshi Danqi Chen Omer Levy Mike Lewis Luke Zettlemoyer and Veselin Stoyanov. 2019. RoBERTa: A Robustly Optimized BERT Pretraining Approach. arXiv:1907.11692 [cs.CL]"},{"key":"e_1_3_2_1_23_1","doi-asserted-by":"publisher","DOI":"10.1145\/2348283.2348525"},{"key":"e_1_3_2_1_24_1","doi-asserted-by":"publisher","DOI":"10.1145\/3627673.3679519"},{"key":"e_1_3_2_1_25_1","unstructured":"OpenAI. 2025. OpenAI o3-mini."},{"key":"e_1_3_2_1_26_1","volume-title":"Large language models can infer psychological dispositions of social media users. PNAS nexus","author":"Peters Heinrich","year":"2024","unstructured":"Heinrich Peters and Sandra C Matz. 2024. Large language models can infer psychological dispositions of social media users. PNAS nexus, Vol. 3, 6 (2024), pgae231."},{"key":"e_1_3_2_1_27_1","volume-title":"Proceedings of the 32nd International Conference on Neural Information Processing Systems. 6639-6649","author":"Prokhorenkova Liudmila","year":"2018","unstructured":"Liudmila Prokhorenkova, Gleb Gusev, Aleksandr Vorobev, Anna Veronika Dorogush, and Andrey Gulin. 2018. CatBoost: unbiased boosting with categorical features. In Proceedings of the 32nd International Conference on Neural Information Processing Systems. 6639-6649."},{"key":"e_1_3_2_1_28_1","doi-asserted-by":"publisher","DOI":"10.1016\/j.intmar.2020.05.001"},{"key":"e_1_3_2_1_29_1","volume-title":"Understanding LLM Embeddings for Regression. arXiv preprint arXiv:2411.14708","author":"Tang Eric","year":"2024","unstructured":"Eric Tang, Bangding Yang, and Xingyou Song. 2024. Understanding LLM Embeddings for Regression. arXiv preprint arXiv:2411.14708 (2024)."},{"key":"e_1_3_2_1_30_1","volume-title":"Simulating social media using large language models to evaluate alternative news feed algorithms. arXiv preprint arXiv:2310.05984","author":"T\u00f6rnberg Petter","year":"2023","unstructured":"Petter T\u00f6rnberg, Diliara Valeeva, Justus Uitermark, and Christopher Bail. 2023. Simulating social media using large language models to evaluate alternative news feed algorithms. arXiv preprint arXiv:2310.05984 (2023)."},{"key":"e_1_3_2_1_31_1","doi-asserted-by":"publisher","DOI":"10.1007\/s43039-021-00035-8"},{"key":"e_1_3_2_1_32_1","doi-asserted-by":"publisher","DOI":"10.1145\/3664647.3688999"},{"key":"e_1_3_2_1_33_1","doi-asserted-by":"publisher","DOI":"10.1016\/j.jbusres.2024.114746"},{"key":"e_1_3_2_1_34_1","unstructured":"Shenzhi Wang Yaowei Zheng Guoyin Wang Shiji Song and Gao Huang. 2024. Llama3-8B-Chinese-Chat (Revision 6622a23). https:\/\/huggingface.co\/shenzhi-wang\/Llama3-8B-Chinese-Chat"},{"key":"e_1_3_2_1_35_1","doi-asserted-by":"publisher","DOI":"10.1145\/3581783.3613853"},{"key":"e_1_3_2_1_36_1","doi-asserted-by":"publisher","DOI":"10.1145\/3503161.3551576"},{"key":"e_1_3_2_1_37_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.634"},{"key":"e_1_3_2_1_38_1","volume-title":"An Empirical Evaluation of Confidence Elicitation in LLMs. In The Twelfth International Conference on Learning Representations.","author":"Xiong Miao","year":"2024","unstructured":"Miao Xiong, Zhiyuan Hu, Xinyang Lu, YIFEI LI, Jie Fu, Junxian He, and Bryan Hooi. 2024. Can LLMs Express Their Uncertainty? An Empirical Evaluation of Confidence Elicitation in LLMs. In The Twelfth International Conference on Learning Representations."},{"key":"e_1_3_2_1_39_1","volume-title":"SMTPD: A New Benchmark for Temporal Prediction of Social Media Popularity. arXiv:2503.04446 [cs.SI]","author":"Xu Yijie","year":"2025","unstructured":"Yijie Xu, Bolun Zheng, Wei Zhu, Hangjia Pan, Yuchen Yao, Ning Xu, Anan Liu, Quan Zhang, and Chenggang Yan. 2025. SMTPD: A New Benchmark for Temporal Prediction of Social Media Popularity. arXiv:2503.04446 [cs.SI]"},{"key":"e_1_3_2_1_40_1","volume-title":"The Thirteenth International Conference on Learning Representations.","author":"Yang Haotong","year":"2025","unstructured":"Haotong Yang, Yi Hu, Shijia Kang, Zhouchen Lin, and Muhan Zhang. 2025. Number Cookbook: Number Understanding of Language Models and How to Improve It. In The Thirteenth International Conference on Learning Representations."},{"key":"e_1_3_2_1_41_1","doi-asserted-by":"publisher","DOI":"10.1145\/1935826.1935863"},{"key":"e_1_3_2_1_42_1","doi-asserted-by":"publisher","DOI":"10.1145\/3589334.3648137"},{"key":"e_1_3_2_1_43_1","volume-title":"Large language models for social networks: Applications, challenges, and solutions. arXiv preprint arXiv:2401.02575","author":"Zeng Jingying","year":"2024","unstructured":"Jingying Zeng, Richard Huang, Waleed Malik, Langxuan Yin, Bojan Babic, Danny Shacham, Xiao Yan, Jaewon Yang, and Qi He. 2024. Large language models for social networks: Applications, challenges, and solutions. arXiv preprint arXiv:2401.02575 (2024)."}],"event":{"name":"CIKM '25: The 34th ACM International Conference on Information and Knowledge Management","sponsor":["SIGIR ACM Special Interest Group on Information Retrieval","SIGWEB ACM Special Interest Group on Hypertext, Hypermedia, and Web"],"location":"Seoul Republic of Korea","acronym":"CIKM '25"},"container-title":["Proceedings of the 34th ACM International Conference on Information and Knowledge Management"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3746252.3760970","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,12,12]],"date-time":"2025-12-12T02:08:11Z","timestamp":1765505291000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3746252.3760970"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,11,10]]},"references-count":43,"alternative-id":["10.1145\/3746252.3760970","10.1145\/3746252"],"URL":"https:\/\/doi.org\/10.1145\/3746252.3760970","relation":{},"subject":[],"published":{"date-parts":[[2025,11,10]]},"assertion":[{"value":"2025-11-10","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}