{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,8,21]],"date-time":"2025-08-21T16:50:46Z","timestamp":1755795046658,"version":"3.44.0"},"publisher-location":"New York, NY, USA","reference-count":58,"publisher":"ACM","funder":[{"DOI":"10.13039\/https:\/\/doi.org\/10.13039\/100000001","name":"NSF (National Science Foundation)","doi-asserted-by":"publisher","award":["IIS-1942680, CNS-1952085, DGE-2021871"],"award-info":[{"award-number":["IIS-1942680, CNS-1952085, DGE-2021871"]}],"id":[{"id":"10.13039\/https:\/\/doi.org\/10.13039\/100000001","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/https:\/\/doi.org\/10.13039\/501100010428","name":"Innovation and Technology Fund","doi-asserted-by":"publisher","award":["ITP\/012\/25LP"],"award-info":[{"award-number":["ITP\/012\/25LP"]}],"id":[{"id":"10.13039\/https:\/\/doi.org\/10.13039\/501100010428","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2025,8,3]]},"DOI":"10.1145\/3711896.3737177","type":"proceedings-article","created":{"date-parts":[[2025,8,3]],"date-time":"2025-08-03T21:07:39Z","timestamp":1754255259000},"page":"1951-1962","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":1,"title":["UrbanMind: Urban Dynamics Prediction with Multifaceted Spatial-Temporal Large Language Models"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-7202-0700","authenticated-orcid":false,"given":"Yuhang","family":"Liu","sequence":"first","affiliation":[{"name":"State University of New York at Binghamton, Binghamton, NY, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-0947-1875","authenticated-orcid":false,"given":"Yingxue","family":"Zhang","sequence":"additional","affiliation":[{"name":"State University of New York at Binghamton, Binghamton, NY, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-0289-1452","authenticated-orcid":false,"given":"Xin","family":"Zhang","sequence":"additional","affiliation":[{"name":"San Diego State University, San Diego, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-8475-9203","authenticated-orcid":false,"given":"Ling","family":"Tian","sequence":"additional","affiliation":[{"name":"University of Electronic Science and Technology of China, Chengdu, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-8972-503X","authenticated-orcid":false,"given":"Yanhua","family":"Li","sequence":"additional","affiliation":[{"name":"Worcester Polytechnic Institute, Worcester, MA, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-2032-0381","authenticated-orcid":false,"given":"Jun","family":"Luo","sequence":"additional","affiliation":[{"name":"Logistics and Supply Chain MultiTech R&amp;D Centre, Hong Kong, China"}]}],"member":"320","published-online":{"date-parts":[[2025,8,3]]},"reference":[{"key":"e_1_3_2_2_1_1","unstructured":"Jean-Baptiste Alayrac Jeff Donahue Pauline Luc Antoine Miech Iain Barr Yana Hasson Karel Lenc Arthur Mensch Katherine Millican Malcolm Reynolds et al. 2022. Flamingo: a Visual Language Model for Few-Shot Learning. Advances in neural information processing systems 35 (2022) 23716-23736."},{"key":"e_1_3_2_2_2_1","first-page":"1877","article-title":"Language Models are Few-Shot Learners","volume":"33","author":"Brown Tom B.","year":"2020","unstructured":"Tom B. Brown, Benjamin Mann, Nick Ryder, et al . 2020. Language Models are Few-Shot Learners. Advances in Neural Information Processing Systems 33 (2020), 1877-1901.","journal-title":"Advances in Neural Information Processing Systems"},{"key":"e_1_3_2_2_3_1","volume-title":"GATGPT: A Pre-trained Large Language Model with Graph Attention Network for Spatiotemporal Impu- tation. arXiv:2311.14332 [cs.LG] https:\/\/arxiv.org\/abs\/2311.14332","author":"Chen Yakun","year":"2023","unstructured":"Yakun Chen, Xianzhi Wang, and Guandong Xu. 2023. GATGPT: A Pre-trained Large Language Model with Graph Attention Network for Spatiotemporal Impu- tation. arXiv:2311.14332 [cs.LG] https:\/\/arxiv.org\/abs\/2311.14332"},{"key":"e_1_3_2_2_4_1","volume-title":"Gatgpt: A pre-trained large language model with graph attention network for spatiotemporal imputation. arXiv preprint arXiv:2311.14332","author":"Chen Yakun","year":"2023","unstructured":"Yakun Chen, Xianzhi Wang, and Guandong Xu. 2023. Gatgpt: A pre-trained large language model with graph attention network for spatiotemporal imputation. arXiv preprint arXiv:2311.14332 (2023)."},{"key":"e_1_3_2_2_5_1","volume-title":"Vicuna: An Open-Source Chatbot Impressing GPT-4 with 90%* ChatGPT Quality. See https:\/\/vicuna.lmsys.org (accessed","author":"Chiang Wei-Lin","year":"2023","unstructured":"Wei-Lin Chiang, Zhuohan Li, Ziqing Lin, Ying Sheng, Zhanghao Wu, Hao Zhang, Lianmin Zheng, Siyuan Zhuang, Yonghao Zhuang, Joseph E Gonzalez, et al. 2023. Vicuna: An Open-Source Chatbot Impressing GPT-4 with 90%* ChatGPT Quality. See https:\/\/vicuna.lmsys.org (accessed 14 April 2023) 2, 3 (2023), 6."},{"key":"e_1_3_2_2_6_1","doi-asserted-by":"publisher","DOI":"10.1109\/TITS.2019.2950416"},{"key":"e_1_3_2_2_7_1","volume-title":"GLM: General Language Model Pretraining with Autoregressive Blank Infilling. arXiv preprint arXiv:2203.02155","author":"Du Zhengxiao","year":"2022","unstructured":"Zhengxiao Du, Yujie Qian, Xiao Liu, et al. 2022. GLM: General Language Model Pretraining with Autoregressive Blank Infilling. arXiv preprint arXiv:2203.02155 (2022)."},{"key":"e_1_3_2_2_8_1","unstructured":"Abhimanyu Dubey Abhinav Jauhri Abhinav Pandey Abhishek Kadian Ahmad Al-Dahle Aiesha Letman Akhil Mathur Alan Schelten Amy Yang Angela Fan et al. 2024. The llama 3 herd of models. arXiv preprint arXiv:2407.21783 (2024)."},{"key":"e_1_3_2_2_9_1","unstructured":"Christoph Feichtenhofer Haoqi Fan Yanghao Li and Kaiming He. 2022. Masked Autoencoders As Spatiotemporal Learners. arXiv:2205.09113 [cs.CV] https: \/\/arxiv.org\/abs\/2205.09113"},{"key":"e_1_3_2_2_10_1","doi-asserted-by":"publisher","DOI":"10.1145\/3678717.3691235"},{"key":"e_1_3_2_2_11_1","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2020\/326"},{"key":"e_1_3_2_2_12_1","volume-title":"Binbin Yong, and Qiang Wu.","author":"Huang Songtao","year":"2024","unstructured":"Songtao Huang, Hongjin Song, Tianqi Jiang, Akbar Telikani, Jun Shen, Qing- guo Zhou, Binbin Yong, and Qiang Wu. 2024. DST-GTN: Dynamic Spatio-Temporal Graph Transformer Network for Traffic Forecasting. arXiv preprint arXiv:2404.11996 (2024)."},{"key":"e_1_3_2_2_13_1","volume-title":"A Lightweight and Accurate Spatial-Temporal Transformer for Traffic Forecasting. arXiv preprint arXiv:2201.00008","author":"Li Guanyao","year":"2022","unstructured":"Guanyao Li, Shuhan Zhong, S.-H. Gary Chan, Ruiyuan Li, Chih-Chieh Hung, and Wen-Chih Peng. 2022. A Lightweight and Accurate Spatial-Temporal Transformer for Traffic Forecasting. arXiv preprint arXiv:2201.00008 (2022)."},{"key":"e_1_3_2_2_14_1","volume-title":"Proceedings of the 28th ACM SIGKDD Conference on Knowledge Discovery and Data Mining (KDD '22)","author":"Li Ke","year":"2022","unstructured":"Ke Li, Yanjie Zhu, Chao Zhang, and Zhenhui Li. 2022. Selective Cross-City Transfer Learning for Traffic Prediction via Source Domain Mixing. In Proceedings of the 28th ACM SIGKDD Conference on Knowledge Discovery and Data Mining (KDD '22). 2014-2024."},{"key":"e_1_3_2_2_15_1","volume-title":"STS-CCL: Spatial- Temporal Synchronous Contextual Contrastive Learning for Urban Traffic Fore- casting. arXiv preprint arXiv:2307.02507","author":"Li Lincan","year":"2023","unstructured":"Lincan Li, Kaixiang Yang, Fengji Luo, and Jichao Bi. 2023. STS-CCL: Spatial- Temporal Synchronous Contextual Contrastive Learning for Urban Traffic Fore- casting. arXiv preprint arXiv:2307.02507 (2023)."},{"key":"e_1_3_2_2_16_1","unstructured":"Y. Li et al. 2024. TST-Trans: A Transformer Network for Urban Traffic Flow Prediction. IEEE Transactions on Intelligent Transportation Systems (2024)."},{"key":"e_1_3_2_2_17_1","volume-title":"Diffusion convolu- tional recurrent neural network: Data-driven traffic forecasting. arXiv preprint arXiv:1707.01926","author":"Li Yaguang","year":"2017","unstructured":"Yaguang Li, Rose Yu, Cyrus Shahabi, and Yan Liu. 2017. Diffusion convolu- tional recurrent neural network: Data-driven traffic forecasting. arXiv preprint arXiv:1707.01926 (2017)."},{"key":"e_1_3_2_2_18_1","volume-title":"Urban Region Embedding via Multi-View Contrastive Prediction. arXiv preprint arXiv:2312.09681","author":"Li Zechen","year":"2023","unstructured":"Zechen Li, Weiming Huang, Kai Zhao, Min Yang, Yongshun Gong, and Meng Chen. 2023. Urban Region Embedding via Multi-View Contrastive Prediction. arXiv preprint arXiv:2312.09681 (2023)."},{"key":"e_1_3_2_2_19_1","doi-asserted-by":"publisher","DOI":"10.1145\/3637528.3671578"},{"key":"e_1_3_2_2_20_1","doi-asserted-by":"publisher","DOI":"10.1145\/3637528.3671578"},{"key":"e_1_3_2_2_21_1","volume-title":"GPT-ST: Gener- ative Pre-Training of Spatio-Temporal Graph Neural Networks. arXiv preprint arXiv:2311.04245","author":"Li Zhonghang","year":"2023","unstructured":"Zhonghang Li, Lianghao Xia, Yong Xu, and Chao Huang. 2023. GPT-ST: Gener- ative Pre-Training of Spatio-Temporal Graph Neural Networks. arXiv preprint arXiv:2311.04245 (2023)."},{"key":"e_1_3_2_2_22_1","volume-title":"Rethinking Spatio-Temporal Transformer for Traffic Prediction: Multi-level Multi-view Augmented Learning Framework.arXiv preprint arXiv:2406.11921","author":"Lin Jiaqi","year":"2024","unstructured":"Jiaqi Lin and Qianqian Ren. 2024. Rethinking Spatio-Temporal Transformer for Traffic Prediction: Multi-level Multi-view Augmented Learning Framework.arXiv preprint arXiv:2406.11921 (2024)."},{"key":"e_1_3_2_2_23_1","unstructured":"Chenxi Liu Sun Yang Qianxiong Xu Zhishuai Li Cheng Long Ziyue Li and Rui Zhao. 2024. Spatial-Temporal Large Language Model for Traffic Prediction. arXiv:2401.10134 [cs.LG] https:\/\/arxiv.org\/abs\/2401.10134"},{"key":"e_1_3_2_2_24_1","volume-title":"Spatial-temporal large language model for traffic prediction. arXiv preprint arXiv:2401.10134","author":"Liu Chenxi","year":"2024","unstructured":"Chenxi Liu, Sun Yang, Qianxiong Xu, Zhishuai Li, Cheng Long, Ziyue Li, and Rui Zhao. 2024. Spatial-temporal large language model for traffic prediction. arXiv preprint arXiv:2401.10134 (2024)."},{"key":"e_1_3_2_2_25_1","unstructured":"Lei Liu Shuo Yu Runze Wang Zhenxun Ma and Yanming Shen. 2024. How Can Large Language Models Understand Spatial-Temporal Data? arXiv:2401.14192 [cs.LG] https:\/\/arxiv.org\/abs\/2401.14192"},{"key":"e_1_3_2_2_26_1","volume-title":"How can large language models understand spatial-temporal data? arXiv preprint arXiv:2401.14192","author":"Liu Lei","year":"2024","unstructured":"Lei Liu, Shuo Yu, Runze Wang, Zhenxun Ma, and Yanming Shen. 2024. How can large language models understand spatial-temporal data? arXiv preprint arXiv:2401.14192 (2024)."},{"key":"e_1_3_2_2_27_1","volume-title":"Align Along Time and Space: A Graph Latent Diffusion Model for Traffic Dynamics Prediction. In 2024 IEEE International Conference on Data Mining (ICDM). IEEE, 271-280","author":"Liu Yuhang","year":"2024","unstructured":"Yuhang Liu, Yingxue Zhang, Xin Zhang, Yu Yang, Yiqun Xie, Sahar Ghanipoor Machiani, Yanhua Li, and Jun Luo. 2024. Align Along Time and Space: A Graph Latent Diffusion Model for Traffic Dynamics Prediction. In 2024 IEEE International Conference on Data Mining (ICDM). IEEE, 271-280."},{"key":"e_1_3_2_2_28_1","volume-title":"ChatGPT: Optimizing Language Models for Dialogue. OpenAI White Paper","author":"AI.","year":"2023","unstructured":"OpenAI. 2023. ChatGPT: Optimizing Language Models for Dialogue. OpenAI White Paper (2023). https:\/\/openai.com\/chatgpt"},{"key":"e_1_3_2_2_29_1","doi-asserted-by":"publisher","DOI":"10.1007\/s40747-024-01754-z"},{"key":"e_1_3_2_2_30_1","volume-title":"AI Governance and Accountability: An Analysis of Anthropic's Claude. arXiv preprint arXiv:2407.01557","author":"Priyanshu Aman","year":"2024","unstructured":"Aman Priyanshu, Yash Maurya, and Zuofei Hong. 2024. AI Governance and Accountability: An Analysis of Anthropic's Claude. arXiv preprint arXiv:2407.01557 (2024)."},{"key":"e_1_3_2_2_31_1","volume-title":"Large Language Models for Graph-Based Reasoning. arXiv preprint arXiv:2305.02340","author":"Qian Guanchu","year":"2023","unstructured":"Guanchu Qian, Pan Zhang, and Jundong Li. 2023. Large Language Models for Graph-Based Reasoning. arXiv preprint arXiv:2305.02340 (2023)."},{"key":"e_1_3_2_2_32_1","volume-title":"TPLLM: A Traffic Prediction Framework Based on Pretrained Large Lan- guage Models. arXiv:2403.02221 [cs.LG] https:\/\/arxiv.org\/abs\/2403.02221","author":"Ren Yilong","year":"2024","unstructured":"Yilong Ren, Yue Chen, Shuai Liu, Boyue Wang, Haiyang Yu, and Zhiyong Cui. 2024. TPLLM: A Traffic Prediction Framework Based on Pretrained Large Lan- guage Models. arXiv:2403.02221 [cs.LG] https:\/\/arxiv.org\/abs\/2403.02221"},{"key":"e_1_3_2_2_33_1","volume-title":"TPLLM: A traffic prediction framework based on pretrained large language models. arXiv preprint arXiv:2403.02221","author":"Ren Yilong","year":"2024","unstructured":"Yilong Ren, Yue Chen, Shuai Liu, Boyue Wang, Haiyang Yu, and Zhiyong Cui. 2024. TPLLM: A traffic prediction framework based on pretrained large language models. arXiv preprint arXiv:2403.02221 (2024)."},{"key":"e_1_3_2_2_34_1","volume-title":"DYffusion: A Dynamics-informed Diffusion Model for Spatiotemporal Forecasting. Advances in neural information processing systems 36","author":"Cachay Salva R\u00fchling","year":"2023","unstructured":"Salva R\u00fchling Cachay, Bo Zhao, Hailey Joren, and Rose Yu. 2023. DYffusion: A Dynamics-informed Diffusion Model for Spatiotemporal Forecasting. Advances in neural information processing systems 36 (2023), 45259-45287."},{"key":"e_1_3_2_2_35_1","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-04167-0_33"},{"key":"e_1_3_2_2_36_1","volume-title":"CCDSReFormer: Traffic Flow Prediction with a Criss-Crossed Dual- Stream Enhanced Rectified Transformer Model. arXiv preprint arXiv:2403.17753","author":"Shao Zhiqi","year":"2024","unstructured":"Zhiqi Shao, Michael G. H. Bell, Ze Wang, D. Glenn Geers, Xusheng Yao, and Junbin Gao. 2024. CCDSReFormer: Traffic Flow Prediction with a Criss-Crossed Dual- Stream Enhanced Rectified Transformer Model. arXiv preprint arXiv:2403.17753 (2024)."},{"key":"e_1_3_2_2_37_1","volume-title":"Learning to (learn at test time). arXiv preprint arXiv:2310.13807","author":"Sun Yu","year":"2023","unstructured":"Yu Sun, Xinhao Li, Karan Dalal, Chloe Hsu, Sanmi Koyejo, Carlos Guestrin, Xiaolong Wang, Tatsunori Hashimoto, and Xinlei Chen. 2023. Learning to (learn at test time). arXiv preprint arXiv:2310.13807 (2023)."},{"key":"e_1_3_2_2_38_1","unstructured":"Yu Sun Xinhao Li Karan Dalal Jiarui Xu Arjun Vikram Genghan Zhang Yann Dubois Xinlei Chen Xiaolong Wang Sanmi Koyejo et al. 2024. Learning to (learn at test time): Rnns with expressive hidden states. arXiv preprint arXiv:2407.04620 (2024)."},{"key":"e_1_3_2_2_39_1","first-page":"60162","article-title":"Are language models actually useful for time series forecasting","volume":"37","author":"Tan Mingtian","year":"2024","unstructured":"Mingtian Tan, Mike Merrill, Vinayak Gupta, Tim Althoff, and Tom Hartvigsen. 2024. Are language models actually useful for time series forecasting? Advances in Neural Information Processing Systems 37 (2024), 60162-60191.","journal-title":"Advances in Neural Information Processing Systems"},{"key":"e_1_3_2_2_40_1","unstructured":"Hugo Touvron Thibaut Lavril Gautier Izacard et al. 2023. LLaMA: Open and Efficient Foundation Language Models. arXiv preprint arXiv:2302.13971 (2023)."},{"key":"e_1_3_2_2_41_1","volume-title":"UP-Diff: Latent Diffusion Model for Remote Sensing Urban Prediction. arXiv preprint arXiv:2407.11578","author":"Wang Zeyu","year":"2024","unstructured":"Zeyu Wang, Zecheng Hao, Jingyu Lin, Yuchao Feng, and Yufei Guo. 2024. UP-Diff: Latent Diffusion Model for Remote Sensing Urban Prediction. arXiv preprint arXiv:2407.11578 (2024)."},{"key":"e_1_3_2_2_42_1","doi-asserted-by":"publisher","DOI":"10.1109\/TITS.2022.3179391"},{"key":"e_1_3_2_2_43_1","doi-asserted-by":"publisher","DOI":"10.1016\/j.trb.2022.11.009"},{"key":"e_1_3_2_2_44_1","volume-title":"Spatio-temporal graph con- volutional networks: A deep learning framework for traffic forecasting. arXiv preprint arXiv:1709.04875","author":"Yu Bing","year":"2017","unstructured":"Bing Yu, Haoteng Yin, and Zhanxing Zhu. 2017. Spatio-temporal graph con- volutional networks: A deep learning framework for traffic forecasting. arXiv preprint arXiv:1709.04875 (2017)."},{"key":"e_1_3_2_2_45_1","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2018\/505"},{"key":"e_1_3_2_2_46_1","volume-title":"UniST: A Prompt-Empowered Universal Model for Urban Spatio-Temporal Prediction. arXiv preprint arXiv:2402.11838","author":"Yuan Yuan","year":"2024","unstructured":"Yuan Yuan, Jingtao Ding, Jie Feng, Depeng Jin, and Yong Li. 2024. UniST: A Prompt-Empowered Universal Model for Urban Spatio-Temporal Prediction. arXiv preprint arXiv:2402.11838 (2024).47] Chengyang Zhang, Yong Zhang, Qitan Shao, Bo Li, Yisheng Lv, Xinglin Piao, and Baocai Yin. 2023. ChatTraffic: Text-to-Traffic Generation via Diffusion Model. arXiv preprint arXiv:2311.16203 (2023)."},{"key":"e_1_3_2_2_47_1","first-page":"2827","volume-title":"Proceedings of the Twenty-Eighth Interna- tional Joint Conference on Artificial Intelligence (IJCAI-19)","author":"Zhang Junbo","year":"2019","unstructured":"Junbo Zhang, Yu Zheng, and Dong Qi. 2019. Cross-City Transfer Learning for Deep Spatio-Temporal Prediction. In Proceedings of the Twenty-Eighth Interna- tional Joint Conference on Artificial Intelligence (IJCAI-19). 2827-2833."},{"key":"e_1_3_2_2_48_1","volume-title":"Proceedings of the Thirty-First International Joint Conference on Artificial Intelligence (IJCAI-22)","author":"Zhang Junbo","year":"2022","unstructured":"Junbo Zhang, Yu Zheng, and Dong Qi. 2022. When Transfer Learning Meets Cross- City Urban Flow Prediction: A Deep Spatio-Temporal Network with Attention Mechanism. In Proceedings of the Thirty-First International Joint Conference on Artificial Intelligence (IJCAI-22). 2030-2036."},{"key":"e_1_3_2_2_49_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICDM51629.2021.00102"},{"key":"e_1_3_2_2_50_1","doi-asserted-by":"crossref","unstructured":"Yingxue Zhang Yanhua Li Xun Zhou Xiangnan Kong and Jun Luo. 2019. Traf- ficGAN: Off-Deployment Traffic Estimation with Traffic Generative Adversarial Networks. In ICDM.","DOI":"10.1109\/ICDM.2019.00193"},{"key":"e_1_3_2_2_51_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICDM.2019.00193"},{"key":"e_1_3_2_2_52_1","doi-asserted-by":"publisher","DOI":"10.1145\/3394486.3403127"},{"key":"e_1_3_2_2_53_1","volume-title":"STrans- GAN: Spatially-Transferable Generative Adversarial Networks for Urban Traffic Estimation. In 2022 IEEE International Conference on Data Mining (ICDM). IEEE, 743-752","author":"Zhang Yingxue","year":"2022","unstructured":"Yingxue Zhang, Yanhua Li, Xun Zhou, Xiangnan Kong, and Jun Luo. 2022. STrans- GAN: Spatially-Transferable Generative Adversarial Networks for Urban Traffic Estimation. In 2022 IEEE International Conference on Data Mining (ICDM). IEEE, 743-752."},{"key":"e_1_3_2_2_54_1","doi-asserted-by":"publisher","unstructured":"Yingxue Zhang Yanhua Li Xun Zhou Zhenming Liu and Jun Luo. 2021. C3-GAN: Complex-Condition-Controlled Urban Traffic Estima- tion through Generative Adversarial Networks. In 2021 IEEE International Co10.1016\/j.knosys.2023.110591nference on Data Mining (ICDM). 1505-1510. https:\/\/doi.org\/10.1109\/ICDM51629.2021.00196","DOI":"10.1109\/ICDM51629.2021.00196"},{"key":"e_1_3_2_2_55_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICDM50108.2020.00187"},{"key":"e_1_3_2_2_56_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICDM54844.2022.00084"},{"key":"e_1_3_2_2_57_1","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v34i01.5477"},{"key":"e_1_3_2_2_58_1","article-title":"SAMSGL: Series- aligned multi-scale graph learning for spatiotemporal forecasting. Chaos","volume":"34","author":"Zou Xiaobei","year":"2024","unstructured":"Xiaobei Zou, Luolin Xiong, Yang Tang, and J\u00fcrgen Kurths. 2024. SAMSGL: Series- aligned multi-scale graph learning for spatiotemporal forecasting. Chaos: An Interdisciplinary Journal of Nonlinear Science 34, 6 (2024).","journal-title":"An Interdisciplinary Journal of Nonlinear Science"}],"event":{"name":"KDD '25: The 31st ACM SIGKDD Conference on Knowledge Discovery and Data Mining","sponsor":["SIGMOD ACM Special Interest Group on Management of Data","SIGKDD ACM Special Interest Group on Knowledge Discovery in Data"],"location":"Toronto ON Canada","acronym":"KDD '25"},"container-title":["Proceedings of the 31st ACM SIGKDD Conference on Knowledge Discovery and Data Mining V.2"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3711896.3737177","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,8,16]],"date-time":"2025-08-16T14:44:27Z","timestamp":1755355467000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3711896.3737177"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,8,3]]},"references-count":58,"alternative-id":["10.1145\/3711896.3737177","10.1145\/3711896"],"URL":"https:\/\/doi.org\/10.1145\/3711896.3737177","relation":{},"subject":[],"published":{"date-parts":[[2025,8,3]]},"assertion":[{"value":"2025-08-03","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}