{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,7]],"date-time":"2026-04-07T16:34:20Z","timestamp":1775579660852,"version":"3.50.1"},"publisher-location":"New York, NY, USA","reference-count":13,"publisher":"ACM","license":[{"start":{"date-parts":[[2025,5,8]],"date-time":"2025-05-08T00:00:00Z","timestamp":1746662400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by\/4.0\/"}],"funder":[{"name":"Natural Science Foundation of China","award":["62472405"],"award-info":[{"award-number":["62472405"]}]}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2025,5,8]]},"DOI":"10.1145\/3701716.3715293","type":"proceedings-article","created":{"date-parts":[[2025,5,23]],"date-time":"2025-05-23T16:09:41Z","timestamp":1748016581000},"page":"749-752","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":7,"title":["STBench: Assessing the Ability of Large Language Models in Spatio-Temporal Analysis"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-3258-1116","authenticated-orcid":false,"given":"Wenbin","family":"Li","sequence":"first","affiliation":[{"name":"Institute of Computing Technology, Chinese Academy of Sciences, Beijing, China and University of Chinese Academy of Sciences, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-1778-8319","authenticated-orcid":false,"given":"Di","family":"Yao","sequence":"additional","affiliation":[{"name":"Institute of Computing Technology, Chinese Academy of Sciences, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0007-2082-8867","authenticated-orcid":false,"given":"Ruibo","family":"Zhao","sequence":"additional","affiliation":[{"name":"Institute of Computing Technology, Chinese Academy of Sciences, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0006-2699-8018","authenticated-orcid":false,"given":"Wenjie","family":"Chen","sequence":"additional","affiliation":[{"name":"Institute of Computing Technology, Chinese Academy of Sciences, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0000-2435-6907","authenticated-orcid":false,"given":"Zijie","family":"Xu","sequence":"additional","affiliation":[{"name":"Institute of Computing Technology, Chinese Academy of Sciences, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0002-1181-6972","authenticated-orcid":false,"given":"Chengxue","family":"Luo","sequence":"additional","affiliation":[{"name":"Institute of Computing Technology, Chinese Academy of Sciences, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-7941-5011","authenticated-orcid":false,"given":"Chang","family":"Gong","sequence":"additional","affiliation":[{"name":"Institute of Computing Technology, Chinese Academy of Sciences, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-7670-7729","authenticated-orcid":false,"given":"Quanliang","family":"Jing","sequence":"additional","affiliation":[{"name":"Institute of Computing Technology, Chinese Academy of Sciences, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-1275-5972","authenticated-orcid":false,"given":"Haining","family":"Tan","sequence":"additional","affiliation":[{"name":"Institute of Computing Technology, Chinese Academy of Sciences, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0008-9372-9410","authenticated-orcid":false,"given":"Jingping","family":"Bi","sequence":"additional","affiliation":[{"name":"Institute of Computing Technology, Chinese Academy of Sciences, Beijing, China"}]}],"member":"320","published-online":{"date-parts":[[2025,5,23]]},"reference":[{"key":"e_1_3_2_2_1_1","volume-title":"Language Models: A Crucial Step Towards Equitable Representations. CoRR","author":"D\u00e9coupes R\u00e9my","year":"2024","unstructured":"R\u00e9my D\u00e9coupes, Roberto Interdonato, Mathieu Roche, et al. 2024. Evaluation of Geographical Distortions in Language Models: A Crucial Step Towards Equitable Representations. CoRR, Vol. abs\/2404.17401 (2024)."},{"key":"e_1_3_2_2_2_1","unstructured":"Jie Feng Jun Zhang Junbo Yan et al. 2024. CityBench: Evaluating the Capabilities of Large Language Model as World Model. CoRR Vol. abs\/2406.13945 (2024)."},{"key":"e_1_3_2_2_3_1","volume-title":"Language Models Represent Space and Time. In ICLR","author":"Gurnee Wes","year":"2024","unstructured":"Wes Gurnee and Max Tegmark. 2024. Language Models Represent Space and Time. In ICLR 2024."},{"key":"e_1_3_2_2_4_1","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v38i17.29811"},{"key":"e_1_3_2_2_5_1","unstructured":"Wenbin Li Di Yao Ruibo Zhao et al. 2024c. STBench: Assessing the Ability of Large Language Models in Spatio-Temporal Analysis. CoRR Vol. abs\/2406.19065 (2024)."},{"key":"e_1_3_2_2_6_1","first-page":"5351","article-title":"UrbanGPT","volume":"2024","author":"Li Zhonghang","year":"2024","unstructured":"Zhonghang Li, Lianghao Xia, Jiabin Tang, et al. 2024b. UrbanGPT: Spatio-Temporal Large Language Models. In KDD 2024. 5351--5362.","journal-title":"Spatio-Temporal Large Language Models. In KDD"},{"key":"e_1_3_2_2_7_1","volume-title":"NeurIPS","author":"Li Zhonghang","year":"2023","unstructured":"Zhonghang Li, Lianghao Xia, Yong Xu, and Chao Huang. 2023. GPT-ST: Generative Pre-Training of Spatio-Temporal Graph Neural Networks. In NeurIPS 2023."},{"key":"e_1_3_2_2_8_1","doi-asserted-by":"publisher","DOI":"10.1145\/3653070"},{"key":"e_1_3_2_2_9_1","volume-title":"Towards Understanding the Spatial Literacy of ChatGPT. In SIGSPATIAL","author":"PETER MOONEY, WENCONG CUI, BOYUAN GUAN","year":"2023","unstructured":"PETER MOONEY, WENCONG CUI, BOYUAN GUAN, et al. 2023. Towards Understanding the Spatial Literacy of ChatGPT. In SIGSPATIAL 2023."},{"key":"e_1_3_2_2_10_1","volume-title":"ICLR","author":"Nie Yuqi","year":"2023","unstructured":"Yuqi Nie, Nam H. Nguyen, Phanwadee Sinthong, et al. 2023. A Time Series is Worth 64 Words: Long-term Forecasting with Transformers. In ICLR 2023."},{"key":"e_1_3_2_2_11_1","doi-asserted-by":"publisher","DOI":"10.1145\/3511808.3557702"},{"key":"e_1_3_2_2_12_1","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v36i10.21383"},{"key":"e_1_3_2_2_13_1","article-title":"Emergent Abilities of Large Language","volume":"2022","author":"Wei Jason","year":"2022","unstructured":"Jason Wei, Yi Tay, Rishi Bommasani, et al. 2022. Emergent Abilities of Large Language Models. Trans. Mach. Learn. Res., Vol. 2022 (2022).","journal-title":"Models. Trans. Mach. Learn. Res."}],"event":{"name":"WWW '25: The ACM Web Conference 2025","location":"Sydney NSW Australia","acronym":"WWW '25","sponsor":["SIGWEB ACM Special Interest Group on Hypertext, Hypermedia, and Web"]},"container-title":["Companion Proceedings of the ACM on Web Conference 2025"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3701716.3715293","content-type":"unspecified","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3701716.3715293","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,10,8]],"date-time":"2025-10-08T02:02:38Z","timestamp":1759888958000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3701716.3715293"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,5,8]]},"references-count":13,"alternative-id":["10.1145\/3701716.3715293","10.1145\/3701716"],"URL":"https:\/\/doi.org\/10.1145\/3701716.3715293","relation":{},"subject":[],"published":{"date-parts":[[2025,5,8]]},"assertion":[{"value":"2025-05-23","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}