{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,7]],"date-time":"2026-01-07T07:55:58Z","timestamp":1767772558481,"version":"3.44.0"},"publisher-location":"New York, NY, USA","reference-count":33,"publisher":"ACM","license":[{"start":{"date-parts":[[2025,7,20]],"date-time":"2025-07-20T00:00:00Z","timestamp":1752969600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by-sa\/4.0\/"}],"funder":[{"DOI":"10.13039\/https:\/\/doi.org\/10.13039\/501100003725","name":"National Research Foundation of Korea","doi-asserted-by":"publisher","award":["RS-2024-00413582"],"award-info":[{"award-number":["RS-2024-00413582"]}],"id":[{"id":"10.13039\/https:\/\/doi.org\/10.13039\/501100003725","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2025,7,20]]},"DOI":"10.1145\/3690624.3709254","type":"proceedings-article","created":{"date-parts":[[2025,4,4]],"date-time":"2025-04-04T18:44:43Z","timestamp":1743792283000},"page":"1209-1220","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":2,"title":["ST-MTM: Masked Time Series Modeling with Seasonal-Trend Decomposition for Time Series Forecasting"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0000-0003-2336-2568","authenticated-orcid":false,"given":"Hyunwoo","family":"Seo","sequence":"first","affiliation":[{"name":"Ulsan National Institute of Science and Technology, Ulsan, Republic of Korea"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-6112-9674","authenticated-orcid":false,"given":"Chiehyeon","family":"Lim","sequence":"additional","affiliation":[{"name":"Ulsan National Institute of Science and Technology, Ulsan, Republic of Korea"}]}],"member":"320","published-online":{"date-parts":[[2025,7,20]]},"reference":[{"key":"e_1_3_2_2_1_1","volume-title":"International Conference on Machine Learning. PMLR, 1298--1312","author":"Baevski Alexei","year":"2022","unstructured":"Alexei Baevski, Wei-Ning Hsu, Qiantong Xu, Arun Babu, Jiatao Gu, and Michael Auli. 2022. Data2vec: A general framework for self-supervised learning in speech, vision and language. In International Conference on Machine Learning. PMLR, 1298--1312."},{"key":"e_1_3_2_2_2_1","doi-asserted-by":"publisher","DOI":"10.1109\/MLSP.2019.8918693"},{"key":"e_1_3_2_2_3_1","doi-asserted-by":"publisher","DOI":"10.1111\/tgis.12644"},{"key":"e_1_3_2_2_4_1","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-01264-9_9"},{"key":"e_1_3_2_2_5_1","volume-title":"International conference on machine learning. PMLR, 1597--1607","author":"Chen Ting","year":"2020","unstructured":"Ting Chen, Simon Kornblith, Mohammad Norouzi, and Geoffrey Hinton. 2020. A simple framework for contrastive learning of visual representations. In International conference on machine learning. PMLR, 1597--1607."},{"key":"e_1_3_2_2_6_1","doi-asserted-by":"publisher","DOI":"10.1145\/3534678.3539329"},{"key":"e_1_3_2_2_7_1","first-page":"3","article-title":"STL: A seasonal-trend decomposition","volume":"6","author":"Cleveland Robert B","year":"1990","unstructured":"Robert B Cleveland, William S Cleveland, Jean E McRae, and Irma Terpenning. 1990. STL: A seasonal-trend decomposition. J. Off. Stat, Vol. 6, 1 (1990), 3--73.","journal-title":"J. Off. Stat"},{"key":"e_1_3_2_2_8_1","volume-title":"Disentangling Structured Components: Towards Adaptive, Interpretable and Scalable Time Series Forecasting","author":"Deng Jinliang","year":"2024","unstructured":"Jinliang Deng, Xiusi Chen, Renhe Jiang, Du Yin, Yi Yang, Xuan Song, and Ivor W Tsang. 2024. Disentangling Structured Components: Towards Adaptive, Interpretable and Scalable Time Series Forecasting. IEEE Transactions on Knowledge and Data Engineering (2024)."},{"key":"e_1_3_2_2_9_1","volume-title":"Bert: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805","author":"Devlin Jacob","year":"2018","unstructured":"Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2018. Bert: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805 (2018)."},{"key":"e_1_3_2_2_10_1","volume-title":"TimeSiam: A Pre-Training Framework for Siamese Time-Series Modeling. In Forty-first International Conference on Machine Learning.","author":"Dong Jiaxiang","year":"2024","unstructured":"Jiaxiang Dong, Haixu Wu, Yuxuan Wang, Yunzhong Qiu, Li Zhang, Jianmin Wang, and Mingsheng Long. 2024. TimeSiam: A Pre-Training Framework for Siamese Time-Series Modeling. In Forty-first International Conference on Machine Learning."},{"key":"e_1_3_2_2_11_1","volume-title":"A Simple Pre-Training Framework for Masked Time-Series Modeling. Advances in Neural Information Processing Systems","author":"Dong Jiaxiang","year":"2023","unstructured":"Jiaxiang Dong, Haixu Wu, Haoran Zhang, Li Zhang, Jianmin Wang, and Minsheong Long. 2023. SimMTM : A Simple Pre-Training Framework for Masked Time-Series Modeling. Advances in Neural Information Processing Systems (2023)."},{"key":"e_1_3_2_2_12_1","volume-title":"Xiaoli Li Kwoh, and Cuntai Guan","author":"Eldele Emadeldeen","year":"2021","unstructured":"Emadeldeen Eldele, Mohamed Ragab, Zhenghua Chen, Min Wu, Chee Keong, Xiaoli Li Kwoh, and Cuntai Guan. 2021. Time-Series Representation Learning via Temporal and Contextual Contrasting. (2021)."},{"key":"e_1_3_2_2_13_1","doi-asserted-by":"publisher","DOI":"10.1007\/s00500-020-04954-0"},{"key":"e_1_3_2_2_14_1","first-page":"14290","article-title":"Semmae: Semantic-guided masking for learning masked autoencoders","volume":"35","author":"Li Gang","year":"2022","unstructured":"Gang Li, Heliang Zheng, Daqing Liu, Chaoyue Wang, Bing Su, and Changwen Zheng. 2022. Semmae: Semantic-guided masking for learning masked autoencoders. Advances in Neural Information Processing Systems, Vol. 35 (2022), 14290--14302.","journal-title":"Advances in Neural Information Processing Systems"},{"key":"e_1_3_2_2_15_1","volume-title":"Ti-MAE: Self-Supervised Masked Time Series Autoencoders. arXiv preprint arXiv:2301.08871","author":"Li Zhe","year":"2023","unstructured":"Zhe Li, Zhongwen Rao, Lujia Pan, Pengyun Wang, and Zenglin Xu. 2023. Ti-MAE: Self-Supervised Masked Time Series Autoencoders. arXiv preprint arXiv:2301.08871 (2023)."},{"key":"e_1_3_2_2_16_1","volume-title":"Timer: Generative Pre-trained Transformers Are Large Time Series Models. In Forty-first International Conference on Machine Learning.","author":"Liu Yong","year":"2024","unstructured":"Yong Liu, Haoran Zhang, Chenyu Li, Xiangdong Huang, Jianmin Wang, and Mingsheng Long. 2024. Timer: Generative Pre-trained Transformers Are Large Time Series Models. In Forty-first International Conference on Machine Learning."},{"key":"e_1_3_2_2_17_1","volume-title":"The Eleventh International Conference on Learning Representations.","author":"Nie Yuqi","year":"2022","unstructured":"Yuqi Nie, Nam H Nguyen, Phanwadee Sinthong, and Jayant Kalagnanam. 2022. A Time Series is Worth 64 Words: Long-term Forecasting with Transformers. The Eleventh International Conference on Learning Representations."},{"volume-title":"Random variables and stochastic processes","author":"Papoulis Athanasios","key":"e_1_3_2_2_18_1","unstructured":"Athanasios Papoulis. 1991. Random variables and stochastic processes. McGraw Hill."},{"key":"e_1_3_2_2_19_1","unstructured":"Zezhi Shao Fei Wang Yongjun Xu Wei Wei Chengqing Yu Zhao Zhang Di Yao Guangyin Jin Xin Cao Gao Cong et al. 2023. Exploring progress in multivariate time series forecasting: Comprehensive benchmarking and heterogeneity analysis. arXiv preprint arXiv:2310.06119 (2023)."},{"key":"e_1_3_2_2_20_1","doi-asserted-by":"publisher","DOI":"10.1016\/j.asoc.2019.105524"},{"volume-title":"Probability and random processes for electrical and computer engineers","author":"Therrien Charles","key":"e_1_3_2_2_21_1","unstructured":"Charles Therrien and Murali Tummala. 2018. Probability and random processes for electrical and computer engineers. CRC press."},{"key":"e_1_3_2_2_22_1","first-page":"38775","article-title":"Learning latent seasonal-trend representations for time series forecasting","volume":"35","author":"Wang Zhiyuan","year":"2022","unstructured":"Zhiyuan Wang, Xovee Xu, Weifeng Zhang, Goce Trajcevski, Ting Zhong, and Fan Zhou. 2022. Learning latent seasonal-trend representations for time series forecasting. Advances in Neural Information Processing Systems, Vol. 35 (2022), 38775--38787.","journal-title":"Advances in Neural Information Processing Systems"},{"key":"e_1_3_2_2_23_1","volume-title":"Generalized harmonic analysis. Acta mathematica","author":"Wiener Norbert","year":"1930","unstructured":"Norbert Wiener. 1930. Generalized harmonic analysis. Acta mathematica, Vol. 55, 1 (1930), 117--258."},{"key":"e_1_3_2_2_24_1","volume-title":"CoST: Contrastive Learning of Disentangled Seasonal-Trend Representations for Time Series Forecasting. In International Conference on Learning Representations.","author":"Woo Gerald","year":"2021","unstructured":"Gerald Woo, Chenghao Liu, Doyen Sahoo, Akshat Kumar, and Steven Hoi. 2021. CoST: Contrastive Learning of Disentangled Seasonal-Trend Representations for Time Series Forecasting. In International Conference on Learning Representations."},{"key":"e_1_3_2_2_25_1","volume-title":"Etsformer: Exponential smoothing transformers for time-series forecasting. arXiv preprint arXiv:2202.01381","author":"Woo Gerald","year":"2022","unstructured":"Gerald Woo, Chenghao Liu, Doyen Sahoo, Akshat Kumar, and Steven Hoi. 2022. Etsformer: Exponential smoothing transformers for time-series forecasting. arXiv preprint arXiv:2202.01381 (2022)."},{"key":"e_1_3_2_2_26_1","volume-title":"The Eleventh International Conference on Learning Representations.","author":"Wu Haixu","year":"2022","unstructured":"Haixu Wu, Tengge Hu, Yong Liu, Hang Zhou, Jianmin Wang, and Mingsheng Long. 2022. TimesNet: Temporal 2D-Variation Modeling for General Time Series Analysis. In The Eleventh International Conference on Learning Representations."},{"key":"e_1_3_2_2_27_1","first-page":"22419","article-title":"Autoformer: Decomposition transformers with auto-correlation for long-term series forecasting","volume":"34","author":"Wu Haixu","year":"2021","unstructured":"Haixu Wu, Jiehui Xu, Jianmin Wang, and Mingsheng Long. 2021. Autoformer: Decomposition transformers with auto-correlation for long-term series forecasting. Advances in Neural Information Processing Systems, Vol. 34 (2021), 22419--22430.","journal-title":"Advances in Neural Information Processing Systems"},{"key":"e_1_3_2_2_28_1","volume-title":"Thirty-seventh Conference on Neural Information Processing Systems.","author":"Yi Kun","year":"2023","unstructured":"Kun Yi, Qi Zhang, Wei Fan, Shoujin Wang, Pengyang Wang, Hui He, Ning An, Defu Lian, Longbing Cao, and Zhendong Niu. 2023. Frequency-domain MLPs are More Effective Learners in Time Series Forecasting. In Thirty-seventh Conference on Neural Information Processing Systems."},{"key":"e_1_3_2_2_29_1","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v36i8.20881"},{"key":"e_1_3_2_2_30_1","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v37i9.26317"},{"key":"e_1_3_2_2_31_1","doi-asserted-by":"publisher","DOI":"10.1145\/3447548.3467401"},{"key":"e_1_3_2_2_32_1","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v35i12.17325"},{"key":"e_1_3_2_2_33_1","volume-title":"International Conference on Machine Learning. PMLR, 27268--27286","author":"Zhou Tian","year":"2022","unstructured":"Tian Zhou, Ziqing Ma, Qingsong Wen, Xue Wang, Liang Sun, and Rong Jin. 2022. Fedformer: Frequency enhanced decomposed transformer for long-term series forecasting. In International Conference on Machine Learning. PMLR, 27268--27286."}],"event":{"name":"KDD '25: The 31st ACM SIGKDD Conference on Knowledge Discovery and Data Mining","sponsor":["SIGMOD ACM Special Interest Group on Management of Data","SIGKDD ACM Special Interest Group on Knowledge Discovery in Data"],"location":"Toronto ON Canada","acronym":"KDD '25"},"container-title":["Proceedings of the 31st ACM SIGKDD Conference on Knowledge Discovery and Data Mining V.1"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3690624.3709254","content-type":"unspecified","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3690624.3709254","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,8,16]],"date-time":"2025-08-16T15:42:06Z","timestamp":1755358926000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3690624.3709254"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,7,20]]},"references-count":33,"alternative-id":["10.1145\/3690624.3709254","10.1145\/3690624"],"URL":"https:\/\/doi.org\/10.1145\/3690624.3709254","relation":{},"subject":[],"published":{"date-parts":[[2025,7,20]]},"assertion":[{"value":"2025-07-20","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}