{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,5,4]],"date-time":"2026-05-04T18:11:28Z","timestamp":1777918288756,"version":"3.51.4"},"publisher-location":"New York, NY, USA","reference-count":51,"publisher":"ACM","funder":[{"name":"National Key R&D Program of China","award":["2023YFF0725001"],"award-info":[{"award-number":["2023YFF0725001"]}]},{"DOI":"10.13039\/501100006374","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["92370204"],"award-info":[{"award-number":["92370204"]}],"id":[{"id":"10.13039\/501100006374","id-type":"DOI","asserted-by":"publisher"}]},{"name":"Guangdong Basic and Applied Basic Research Foundation","award":["2023B1515120057"],"award-info":[{"award-number":["2023B1515120057"]}]},{"name":"Education Bureau of Guangzhou Municipality"},{"name":"Baidu Scholarship"}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2025,8,3]]},"DOI":"10.1145\/3711896.3737171","type":"proceedings-article","created":{"date-parts":[[2025,8,3]],"date-time":"2025-08-03T21:07:39Z","timestamp":1754255259000},"page":"3831-3842","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":2,"title":["Unleashing The Power of Pre-Trained Language Models for Irregularly Sampled Time Series"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0000-0001-5085-5216","authenticated-orcid":false,"given":"Weijia","family":"Zhang","sequence":"first","affiliation":[{"name":"HKUST(GZ), Guangzhou, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0005-1877-0542","authenticated-orcid":false,"given":"Chenlong","family":"Yin","sequence":"additional","affiliation":[{"name":"HKUST(GZ), Guangzhou, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-4271-1567","authenticated-orcid":false,"given":"Hao","family":"Liu","sequence":"additional","affiliation":[{"name":"HKUST(GZ) &amp; HKUST, Guangzhou, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-6016-6465","authenticated-orcid":false,"given":"Hui","family":"Xiong","sequence":"additional","affiliation":[{"name":"HKUST(GZ) &amp; HKUST, Guangzhou, China"}]}],"member":"320","published-online":{"date-parts":[[2025,8,3]]},"reference":[{"key":"e_1_3_2_2_1_1","doi-asserted-by":"crossref","unstructured":"L. Shen H. L. Li-Wei M. Feng M. Ghassemi B. Moody P. Szolovits L. A. Celi A. Johnson T. Pollard and R. G. Mark. 2016. MIMIC-III a freely accessible critical care database. Scientific data 3(1):1-9(2016).","DOI":"10.1038\/sdata.2016.35"},{"key":"e_1_3_2_2_2_1","doi-asserted-by":"publisher","DOI":"10.1145\/3097983.3097997"},{"key":"e_1_3_2_2_3_1","volume-title":"Tim Januschowski, and Stephan G\u00fcnnemann.","author":"Bilo\u0161 Marin","year":"2021","unstructured":"Marin Bilo\u0161, Johanna Sommer, Syama Sundar Rangapuram, Tim Januschowski, and Stephan G\u00fcnnemann. 2021. Neural flows: Efficient alternative to neural ODEs. Advances in neural information processing systems, Vol. 34 (2021), 21325-21337."},{"key":"e_1_3_2_2_4_1","volume-title":"TEMPO: Prompt-based Generative Pre-trained Transformer for Time Series Forecasting. In International Conference on Learning Representations.","author":"Cao Defu","year":"2024","unstructured":"Defu Cao, Furong Jia, Sercan O Arik, Tomas Pfister, Yixiang Zheng, Wen Ye, and Yan Liu. 2024. TEMPO: Prompt-based Generative Pre-trained Transformer for Time Series Forecasting. In International Conference on Learning Representations."},{"key":"e_1_3_2_2_5_1","volume-title":"Recurrent neural networks for multivariate time series with missing values. Scientific reports","author":"Che Zhengping","year":"2018","unstructured":"Zhengping Che, Sanjay Purushotham, Kyunghyun Cho, David Sontag, and Yan Liu. 2018. Recurrent neural networks for multivariate time series with missing values. Scientific reports, Vol. 8, 1 (2018), 6085."},{"key":"e_1_3_2_2_6_1","volume-title":"Neural ordinary differential equations. Advances in neural information processing systems","author":"Chen Ricky TQ","year":"2018","unstructured":"Ricky TQ Chen, Yulia Rubanova, Jesse Bettencourt, and David K Duvenaud. 2018. Neural ordinary differential equations. Advances in neural information processing systems, Vol. 31 (2018)."},{"key":"e_1_3_2_2_7_1","doi-asserted-by":"crossref","unstructured":"Zhikai Chen Haitao Mao Hang Li Wei Jin Hongzhi Wen Xiaochi Wei Shuaiqiang Wang Dawei Yin Wenqi Fan Hui Liu et al. 2024. Exploring the potential of large language models (llms) in learning on graphs. ACM SIGKDD Explorations Newsletter(2024) 42-61.","DOI":"10.1145\/3655103.3655110"},{"key":"e_1_3_2_2_8_1","volume-title":"GRU-ODE-Bayes: Continuous modeling of sporadically-observed time series. Advances in neural information processing systems","author":"Brouwer Edward De","year":"2019","unstructured":"Edward De Brouwer, Jaak Simm, Adam Arany, and Yves Moreau. 2019. GRU-ODE-Bayes: Continuous modeling of sporadically-observed time series. Advances in neural information processing systems, Vol. 32 (2019)."},{"key":"e_1_3_2_2_9_1","doi-asserted-by":"crossref","unstructured":"Robert F Engle and Jeffrey R Russell. 1998. Autoregressive conditional duration: a new model for irregularly spaced transaction data. Econometrica(1998) 1127-1162.","DOI":"10.2307\/2999632"},{"key":"e_1_3_2_2_10_1","first-page":"19622","article-title":"Large language models are zero-shot time series forecasters","volume":"36","author":"Gruver Nate","year":"2023","unstructured":"Nate Gruver, Marc Finzi, Shikai Qiu, and Andrew G Wilson. 2023. Large language models are zero-shot time series forecasters. Advances in Neural Information Processing Systems, Vol. 36 (2023), 19622-19635.","journal-title":"Advances in Neural Information Processing Systems"},{"key":"e_1_3_2_2_11_1","volume-title":"International Conference on Machine Learning. PMLR, 4353-4363","author":"Horn Max","year":"2020","unstructured":"Max Horn, Michael Moor, Christian Bock, Bastian Rieck, and Karsten Borgwardt. 2020. Set functions for time series. In International Conference on Machine Learning. PMLR, 4353-4363."},{"key":"e_1_3_2_2_12_1","volume-title":"George Moody and Roger Mark","author":"Leo Celi Ikaro Silva Daniel Scott","year":"2012","unstructured":"Daniel Scott Leo Celi Ikaro Silva, George Moody and Roger Mark. 2012. Predicting in-hospital mortality of icu patients: The physionet computing in cardiology challenge 2012. Computing in cardiology, 39:245-248(2012)."},{"key":"e_1_3_2_2_13_1","doi-asserted-by":"publisher","DOI":"10.1145\/3447548.3467419"},{"key":"e_1_3_2_2_14_1","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v37i7.25969"},{"key":"e_1_3_2_2_15_1","doi-asserted-by":"publisher","DOI":"10.1145\/3485447.3512030"},{"key":"e_1_3_2_2_16_1","volume-title":"International Conference on Learning Representations.","author":"Jin Ming","year":"2024","unstructured":"Ming Jin, Shiyu Wang, Lintao Ma, Zhixuan Chu, James Y Zhang, Xiaoming Shi, Pin-Yu Chen, Yuxuan Liang, Yuan-Fang Li, Shirui Pan, et al., 2024a. Time-LLM: Time Series Forecasting by Reprogramming Large Language Models. In International Conference on Learning Representations."},{"key":"e_1_3_2_2_17_1","volume-title":"International Conference on Machine Learning.","author":"Jin Ming","year":"2024","unstructured":"Ming Jin, Yifan Zhang, Wei Chen, Kexin Zhang, Yuxuan Liang, Bin Yang, Jindong Wang, Shirui Pan, and Qingsong Wen. 2024b. Position: What Can Large Language Models Tell Us about Time Series Analysis. In International Conference on Machine Learning."},{"key":"e_1_3_2_2_18_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.emnlp-main.243"},{"key":"e_1_3_2_2_19_1","doi-asserted-by":"publisher","DOI":"10.52202\/075280-2139"},{"key":"e_1_3_2_2_20_1","unstructured":"Zachary C Lipton David Kale and Randall Wetzel. 2016. Directly modeling missing data in sequences with rnns: Improved classification of clinical time series. In Machine learning for healthcare conference. 253-270."},{"key":"e_1_3_2_2_21_1","first-page":"122154","article-title":"Autotimes: Autoregressive time series forecasters via large language models","volume":"37","author":"Liu Yong","year":"2024","unstructured":"Yong Liu, Guo Qin, Xiangdong Huang, Jianmin Wang, and Mingsheng Long. 2024. Autotimes: Autoregressive time series forecasters via large language models. Advances in Neural Information Processing Systems, Vol. 37 (2024), 122154-122184.","journal-title":"Advances in Neural Information Processing Systems"},{"key":"e_1_3_2_2_22_1","doi-asserted-by":"publisher","DOI":"10.1145\/3690624.3709323"},{"key":"e_1_3_2_2_23_1","unstructured":"Williams Jr C. Menne M. and R. Vose. [n.d.]. Long-term daily climate records from stations across the contiguous united states. ( [n. d.])."},{"key":"e_1_3_2_2_24_1","volume-title":"Thien Huu Nguyen, Oscar Sainz, Eneko Agirre, Ilana Heintz, and Dan Roth.","author":"Min Bonan","year":"2023","unstructured":"Bonan Min, Hayley Ross, Elior Sulem, Amir Pouran Ben Veyseh, Thien Huu Nguyen, Oscar Sainz, Eneko Agirre, Ilana Heintz, and Dan Roth. 2023. Recent advances in natural language processing via large pre-trained language models: A survey. Comput. Surveys(2023), 1-40."},{"key":"e_1_3_2_2_25_1","volume-title":"Proceedings of the 30th International Conference on Neural Information Processing Systems. 3889-3897","author":"Neil Daniel","year":"2016","unstructured":"Daniel Neil, Michael Pfeiffer, and Shih-Chii Liu. 2016. Phased LSTM: accelerating recurrent network training for long or event-based sequences. In Proceedings of the 30th International Conference on Neural Information Processing Systems. 3889-3897."},{"key":"e_1_3_2_2_26_1","volume-title":"International Conference on Machine Learning.","author":"Pan Zijie","year":"2024","unstructured":"Zijie Pan, Yushan Jiang, Sahil Garg, Anderson Schneider, Yuriy Nevmyvaka, and Dongjin Song. 2024. S2IP-LLM: Semantic Space Informed Prompt Learning with LLM for Time Series Forecasting. In International Conference on Machine Learning."},{"key":"e_1_3_2_2_27_1","unstructured":"Alec Radford Jeffrey Wu Rewon Child David Luan Dario Amodei Ilya Sutskever et al. 2019. Language models are unsupervised multitask learners. OpenAI blog Vol. 1 8 (2019) 9."},{"key":"e_1_3_2_2_28_1","doi-asserted-by":"publisher","DOI":"10.1109\/ISWC.2012.13"},{"key":"e_1_3_2_2_29_1","volume-title":"Early Prediction of Sepsis from Clinical Data: the PhysioNet\/Computing in Cardiology Challenge","author":"Reyna Matthew A","year":"2019","unstructured":"Matthew A Reyna, Chris Josef, Salman Seyedi, Russell Jeter, Supreeth P Shashikumar, M Brandon Westover, Ashish Sharma, Shamim Nemati, and Gari D Clifford. 2019. Early Prediction of Sepsis from Clinical Data: the PhysioNet\/Computing in Cardiology Challenge 2019. In Computing in Cardiology. IEEE, 1."},{"key":"e_1_3_2_2_30_1","volume-title":"Ricky TQ Chen, and David K Duvenaud","author":"Rubanova Yulia","year":"2019","unstructured":"Yulia Rubanova, Ricky TQ Chen, and David K Duvenaud. 2019. Latent ordinary differential equations for irregularly-sampled time series. Advances in neural information processing systems, Vol. 32 (2019)."},{"key":"e_1_3_2_2_31_1","volume-title":"International Conference on Machine Learning. PMLR","author":"Schirmer Mona","year":"2022","unstructured":"Mona Schirmer, Mazin Eltayeb, Stefan Lessmann, and Maja Rudolph. 2022. Modeling irregular time series with continuous recurrent units. In International Conference on Machine Learning. PMLR, 19388-19405."},{"key":"e_1_3_2_2_32_1","volume-title":"Interpolation-Prediction Networks for Irregularly Sampled Time Series. In International Conference on Learning Representations.","author":"Shukla Satya Narayan","year":"2018","unstructured":"Satya Narayan Shukla and Benjamin Marlin. 2018. Interpolation-Prediction Networks for Irregularly Sampled Time Series. In International Conference on Learning Representations."},{"key":"e_1_3_2_2_33_1","volume-title":"Multi-Time Attention Networks for Irregularly Sampled Time Series. In International Conference on Learning Representations.","author":"Shukla Satya Narayan","year":"2021","unstructured":"Satya Narayan Shukla and Benjamin Marlin. 2021. Multi-Time Attention Networks for Irregularly Sampled Time Series. In International Conference on Learning Representations."},{"key":"e_1_3_2_2_34_1","unstructured":"Satya Narayan Shukla and Benjamin M Marlin. 2020. A survey on principles models and methods for learning from irregularly sampled time series. arXiv preprint arXiv:2012.00168(2020)."},{"key":"e_1_3_2_2_35_1","volume-title":"International Conference on Learning Representations.","author":"Sun Chenxi","year":"2024","unstructured":"Chenxi Sun, Hongyan Li, Yaliang Li, and Shenda Hong. 2024. TEST: Text Prototype Aligned Embedding to Activate LLM's Ability for Time Series. In International Conference on Learning Representations."},{"key":"e_1_3_2_2_36_1","volume-title":"Attention is all you need. Advances in neural information processing systems","author":"Vaswani Ashish","year":"2017","unstructured":"Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, \u0141ukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. Advances in neural information processing systems, Vol. 30 (2017)."},{"key":"e_1_3_2_2_37_1","volume-title":"Vedrana and Jana Krivec","author":"Kaluza Bostjan Piltaver Rok Lustrek Mitja","year":"2010","unstructured":"Lustrek Mitja Kaluza Bostjan Piltaver Rok Vidulin, Vedrana and Jana Krivec. 2010. Localization Data for Person Activity. UCI Machine Learning Repository."},{"key":"e_1_3_2_2_38_1","doi-asserted-by":"publisher","DOI":"10.1016\/j.ascom.2012.12.001"},{"key":"e_1_3_2_2_39_1","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v39i12.33384"},{"key":"e_1_3_2_2_40_1","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2025\/374"},{"key":"e_1_3_2_2_41_1","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v35i1.16145"},{"key":"e_1_3_2_2_42_1","doi-asserted-by":"publisher","DOI":"10.1145\/3394486.3403118"},{"key":"e_1_3_2_2_43_1","volume-title":"Promptcast: A new prompt-based learning paradigm for time series forecasting","author":"Xue Hao","year":"2023","unstructured":"Hao Xue and Flora D Salim. 2023. Promptcast: A new prompt-based learning paradigm for time series forecasting. IEEE Transactions on Knowledge and Data Engineering(2023)."},{"key":"e_1_3_2_2_44_1","doi-asserted-by":"publisher","DOI":"10.1093\/nsr\/nwae403"},{"key":"e_1_3_2_2_45_1","doi-asserted-by":"publisher","DOI":"10.1145\/3580305.3599543"},{"key":"e_1_3_2_2_46_1","volume-title":"Irregular Multivariate Time Series Forecasting: A Transformable Patching Graph Neural Networks Approach. In International Conference on Machine Learning.","author":"Zhang Weijia","year":"2024","unstructured":"Weijia Zhang, Chenlong Yin, Hao Liu, Xiaofang Zhou, and Hui Xiong. 2024a. Irregular Multivariate Time Series Forecasting: A Transformable Patching Graph Neural Networks Approach. In International Conference on Machine Learning."},{"key":"e_1_3_2_2_47_1","doi-asserted-by":"publisher","DOI":"10.1145\/3637528.3671665"},{"key":"e_1_3_2_2_48_1","volume-title":"Graph-Guided Network for Irregularly Sampled Multivariate Time Series. In International Conference on Learning Representations.","author":"Zhang Xiang","year":"2021","unstructured":"Xiang Zhang, Marko Zeman, Theodoros Tsiligkaridis, and Marinka Zitnik. 2021. Graph-Guided Network for Irregularly Sampled Multivariate Time Series. In International Conference on Learning Representations."},{"key":"e_1_3_2_2_49_1","unstructured":"Ce Zhou Qian Li Chen Li Jun Yu Yixin Liu Guangjing Wang Kai Zhang Cheng Ji Qiben Yan Lifang He et al. 2024. A comprehensive survey on pretrained foundation models: A history from bert to chatgpt. International Journal of Machine Learning and Cybernetics(2024) 1-65."},{"key":"e_1_3_2_2_50_1","unstructured":"Tian Zhou Peisong Niu Liang Sun Rong Jin et al. 2023a. One fits all: Power general time series analysis by pretrained lm. Advances in neural information processing systems(2023)."},{"key":"e_1_3_2_2_51_1","unstructured":"Tian Zhou Pei-Song Niu Xue Wang Liang Sun and Rong Jin. 2023b. One Fits All: Universal Time Series Analysis by Pretrained LM and Specially Designed Adaptors. arXiv preprint arXiv:2311.14782(2023)."}],"event":{"name":"KDD '25: The 31st ACM SIGKDD Conference on Knowledge Discovery and Data Mining","location":"Toronto ON Canada","acronym":"KDD '25","sponsor":["SIGKDD ACM Special Interest Group on Knowledge Discovery in Data","SIGMOD ACM Special Interest Group on Management of Data"]},"container-title":["Proceedings of the 31st ACM SIGKDD Conference on Knowledge Discovery and Data Mining V.2"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3711896.3737171","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,4,30]],"date-time":"2026-04-30T18:04:47Z","timestamp":1777572287000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3711896.3737171"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,8,3]]},"references-count":51,"alternative-id":["10.1145\/3711896.3737171","10.1145\/3711896"],"URL":"https:\/\/doi.org\/10.1145\/3711896.3737171","relation":{},"subject":[],"published":{"date-parts":[[2025,8,3]]},"assertion":[{"value":"2025-08-03","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}