{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,8,7]],"date-time":"2025-08-07T09:22:27Z","timestamp":1754558547773,"version":"3.40.3"},"publisher-location":"Singapore","reference-count":40,"publisher":"Springer Nature Singapore","isbn-type":[{"type":"print","value":"9789819757787"},{"type":"electronic","value":"9789819757794"}],"license":[{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025]]},"DOI":"10.1007\/978-981-97-5779-4_10","type":"book-chapter","created":{"date-parts":[[2025,1,10]],"date-time":"2025-01-10T07:16:58Z","timestamp":1736493418000},"page":"147-162","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":1,"title":["PT-Tuning: Bridging the Gap between Time Series Masked Reconstruction and Forecasting via Prompt Token Tuning"],"prefix":"10.1007","author":[{"given":"Hao","family":"Liu","sequence":"first","affiliation":[]},{"given":"Jinrui","family":"Gan","sequence":"additional","affiliation":[]},{"given":"Xiaoxuan","family":"Fan","sequence":"additional","affiliation":[]},{"given":"Yi","family":"Zhang","sequence":"additional","affiliation":[]},{"given":"Chuanxian","family":"Luo","sequence":"additional","affiliation":[]},{"given":"Jing","family":"Zhang","sequence":"additional","affiliation":[]},{"given":"Guangxin","family":"Jiang","sequence":"additional","affiliation":[]},{"given":"Yucheng","family":"Qian","sequence":"additional","affiliation":[]},{"given":"Changwei","family":"Zhao","sequence":"additional","affiliation":[]},{"given":"Huan","family":"Ma","sequence":"additional","affiliation":[]},{"given":"Zhenyu","family":"Guo","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2025,1,11]]},"reference":[{"unstructured":"Bahng, H., Jahanian, A., Sankaranarayanan, S., Isola, P.: Exploring visual prompts for adapting large-scale models. arXiv preprint:2203.17274 (2022)","key":"10_CR1"},{"unstructured":"Bao, H., Dong, L., Piao, S., Wei, F.: Beit: Bert pre-training of image transformers. In: ICLR (2022)","key":"10_CR2"},{"unstructured":"Brown, T.B., Mann, B., Ryder, N., Subbiah, M., Kaplan, J., Dhariwal, P., Neelakantan, A., Shyam, P., Sastry, G., Askell, A., Agarwal, S., Herbert-Voss, A., Krueger, G., Henighan, T., Child, R., Ramesh, A., Ziegler, D.M., Wu, J., Winter, C., Hesse, C., Chen, M., Sigler, E., Litwin, M., Gray, S., Chess, B., Clark, J., Berner, C., McCandlish, S., Radford, A., Sutskever, I., Amodei, D.: Language models are few-shot learners. arXiv preprint:2005.14165 (2020)","key":"10_CR3"},{"unstructured":"Challu, C., Olivares, K.G., Oreshkin, B.N., Garza, F., Mergenthaler, M., Dubrawski, A.: N-hits: Neural hierarchical interpolation for time series forecasting. arXiv preprint:2201.12886 (2022)","key":"10_CR4"},{"unstructured":"Devlin, J., Chang, M.W., Lee, K., Toutanova, K.: Bert: Pre-training of deep bidirectional transformers for language understanding. In: NAACL (2019)","key":"10_CR5"},{"unstructured":"Dong, J., Wu, H., Zhang, H., Zhang, L., Wang, J., Long, M.: Simmtm: A simple pre-training framework for masked time-series modeling. arXiv preprint:2302.00861 (2023)","key":"10_CR6"},{"doi-asserted-by":"crossref","unstructured":"Eldele, E., Ragab, M., Chen, Z., Wu, M., Kwoh, C.K., Li, X., Guan, C.: Time-series representation learning via temporal and contextual contrasting. In: IJCAI. pp. 2352\u20132359 (2021)","key":"10_CR7","DOI":"10.24963\/ijcai.2021\/324"},{"unstructured":"Gu, A., Goel, K., R\u00e9, C.: Efficiently modeling long sequences with structured state spaces. In: ICLR (2022)","key":"10_CR8"},{"doi-asserted-by":"crossref","unstructured":"He, K., Chen, X., Xie, S., Li, Y., Dollar, P., Girshick, R.: Masked autoencoder are scalable vision learners. In: CVPR. pp. 15979\u201315988 (2022)","key":"10_CR9","DOI":"10.1109\/CVPR52688.2022.01553"},{"doi-asserted-by":"crossref","unstructured":"Hochreiter, S., Schmidhuber, J.: Long short-term memory. Neural Computation 9(8), 1735\u20131780 (1997)","key":"10_CR10","DOI":"10.1162\/neco.1997.9.8.1735"},{"doi-asserted-by":"crossref","unstructured":"Jia, M., Tang, L., Chen, B.c., Cardie, C., Belongie, S., Hariharan, B., Lim, S.N.: Visual prompt tuning. In: ECCV (2022)","key":"10_CR11","DOI":"10.1007\/978-3-031-19827-4_41"},{"unstructured":"Kim, T., Kim, J., Tae, Y., Park, C., Choi, J.H., Choo, J.: Reversible instance normalization for accurate time-series forecasting against distribution shift. In: ICLR (2022)","key":"10_CR12"},{"doi-asserted-by":"crossref","unstructured":"Lai, G., Chang, W.C., Yang, Y., Liu, H.: Modeling long and short-term temporal patterns with deep neural networks. In: SIGIR. pp. 95\u2013104 (2018)","key":"10_CR13","DOI":"10.1145\/3209978.3210006"},{"doi-asserted-by":"crossref","unstructured":"Lester, B., Al-Rfou, R., Constant, N.: The power of scale for parameter-efficient prompt tuning. In: EMNLP. pp. 3045\u20133059 (2021)","key":"10_CR14","DOI":"10.18653\/v1\/2021.emnlp-main.243"},{"unstructured":"Li, X.L., Liang, P.: Prefix-tuning: Optimizing continuous prompts for generation. arXiv preprint:2101.00190 (2021)","key":"10_CR15"},{"unstructured":"Li, Z., Rao, Z., Pan, L., Wang, P., Xu, Z.: Ti-mae: Self-supervised masked time series autoencoders. arXiv preprint:2301.08871 (2023)","key":"10_CR16"},{"doi-asserted-by":"crossref","unstructured":"Liu, X., Ji, K., Fu, Y., Tam, W., Du, Z., Yang, Z., Tang, J.: P-tuning: Prompt tuning can be comparable to fine-tuning across scales and tasks. In: ACL. pp. 61\u201368 (2022)","key":"10_CR17","DOI":"10.18653\/v1\/2022.acl-short.8"},{"unstructured":"Liu, Y., Wu, H., Wang, J., Long, M.: Non-stationary transformers: Exploring the stationarity in time series forecasting. In: NeurIPS. p. 9881\u20139893 (2022)","key":"10_CR18"},{"unstructured":"Ma, M.: Time series generation with masked autoencoder. arXiv preprint:2201.07006 (2022)","key":"10_CR19"},{"unstructured":"van\u00a0der Maaten, L., Hinton, G.: Visualizing data using t-sne. Journal of Machine Learning Research 9(11), 2579\u20132605 (2008)","key":"10_CR20"},{"doi-asserted-by":"crossref","unstructured":"Meng, Q., Qian, H., Liu, Y., Cui, L., Xu, Y., Shen, Z.: Mhccl: Masked hierarchical cluster-wise contrastive learning for multivariate time series. In: AAAI. p. 9153\u20139161 (2023)","key":"10_CR21","DOI":"10.1609\/aaai.v37i8.26098"},{"unstructured":"Nie, Y., Nguyen, N.H., Sinthong, P., Kalagnanam, J.: A time series is worth 64 words: Long-term forecasting with transformers. In: ICLR (2023)","key":"10_CR22"},{"doi-asserted-by":"crossref","unstructured":"Shao, Z., Zhang, Z., Wang, F., Xu, Y.: Pre-training enhanced spatial-temporal graph neural network for multivariate time series forecasting. In: KDD. p. 1567\u20131577 (2022)","key":"10_CR23","DOI":"10.1145\/3534678.3539396"},{"doi-asserted-by":"crossref","unstructured":"Su, Y., Wang, X., Qin, Y., Chan, C.M., Lin, Y., Wang, H., Wen, K., Liu, Z., Li, P., Li, J.: On transferability of prompt tuning for natural language processing. In: NAACL. pp. 3949\u20133969 (2022)","key":"10_CR24","DOI":"10.18653\/v1\/2022.naacl-main.290"},{"doi-asserted-by":"crossref","unstructured":"Vu, T., Lester, B., Constant, N., AI-Rfou, R., Cer, D.: Spot: Better frozen model adaptation through soft prompt transfer. In: ACL. pp. 5039\u20135059 (2022)","key":"10_CR25","DOI":"10.18653\/v1\/2022.acl-long.346"},{"unstructured":"Wang, H., Peng, J., Huang, F., Wang, J., Chen, J., Xiao, Y.: Micn: Multi-scale local and global context modeling for long-term series forecasting. In: ICLR (2023)","key":"10_CR26"},{"unstructured":"Wang, Z., Xu, X., Zhang, W., Trajcevski, G., Zhong, T., Zhou, F.: Learning latent seasonal-trend representations for time series forecasting. In: NeurIPS. p. 38775\u201338787 (2022)","key":"10_CR27"},{"unstructured":"Woo, G., Liu, C., Sahoo, D., Kumar, A., Hoi, S.: Cost: Contrastive learning of disentangled seasonal-trend representations for time series forecasting. In: ICLR (2022)","key":"10_CR28"},{"unstructured":"Wu, H., Hu, T., Liu, Y., Zhou, H., Wang, J., Long, M.: Timesnet: Temporal 2d-variation modeling for general time series analysis. In: ICLR (2023)","key":"10_CR29"},{"unstructured":"Wu, H., Xu, J., Wang, J., Long, M.: Autoformer: Decomposition transformers with Auto-Correlation for long-term series forecasting. In: NeurIPS (2021)","key":"10_CR30"},{"unstructured":"Xie, Z., Geng, Z., Hu, J., Zhang, Z., Hu, H., Cao, Y.: Revealing the dark secrets of masked image modeling. arXiv preprint:2205.13543 (2022)","key":"10_CR31"},{"unstructured":"Yang, L., Hong, S.: Unsupervised time-series representation learning with iterative bilinear temporal-spectral fusion. In: ICML. p. 25038\u201325054 (2022)","key":"10_CR32"},{"doi-asserted-by":"crossref","unstructured":"Yue, Z., Wang, Y., Duan, J., Yang, T., Huang, C., Tong, Y., Xu, B.: Ts2vec: Towards universal representation of time series. In: AAAI. pp. 8980\u20138987 (2022)","key":"10_CR33","DOI":"10.1609\/aaai.v36i8.20881"},{"doi-asserted-by":"crossref","unstructured":"Zeng, A., Chen, M., Zhang, L., Xu, Q.: Are transformers effective for time series forecasting? In: AAAI. p. 11121\u201311128 (2023)","key":"10_CR34","DOI":"10.1609\/aaai.v37i9.26317"},{"doi-asserted-by":"crossref","unstructured":"Zerveas, G., Jayaraman, S., Patel, D., Bhamidipaty, A., Eickhoff, C.: A transformer-based framework for multivariate time series representation learning. In: KDD. pp. 2114\u20132124 (2021)","key":"10_CR35","DOI":"10.1145\/3447548.3467401"},{"unstructured":"Zhang, T., Zhang, Y., Cao, W., Bian, J., Yi, X., Zheng, S., Li, J.: Less is more: Fast multivariate time series forecasting with light sampling-oriented mlp structures. arXiv preprint:2207.01186, (2022)","key":"10_CR36"},{"unstructured":"Zhang, X., Zhao, Z., Tsiligkaridis, T., Zitnik, M.: Self-supervised contrastive pre-training for time series via time-frequency consistency. In: NeurIPS. p. 3988\u20134003 (2022)","key":"10_CR37"},{"unstructured":"Zhang, Y., Yan, J.: Crossformer: Transformer utilizing cross-dimension dependency for multivariate time series forecasting. In: ICLR (2023)","key":"10_CR38"},{"doi-asserted-by":"crossref","unstructured":"Zhou, H., Zhang, S., Peng, J., Zhang, S., Li, J., Xiong, H., Zhang, W.: Informer: Beyond efficient transformer for long sequence time-series forecasting. In: AAAI. pp. 11106\u201311115 (2021)","key":"10_CR39","DOI":"10.1609\/aaai.v35i12.17325"},{"unstructured":"Zhou, T., Ma, Z., Wen, Q., Wang, X., Sun, L., Jin, R.: Fedformer: Frequency enhanced decomposed transformer for long-term series forecasting. In: ICML (2022)","key":"10_CR40"}],"container-title":["Lecture Notes in Computer Science","Database Systems for Advanced Applications"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-981-97-5779-4_10","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,1,10]],"date-time":"2025-01-10T08:06:07Z","timestamp":1736496367000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-981-97-5779-4_10"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025]]},"ISBN":["9789819757787","9789819757794"],"references-count":40,"URL":"https:\/\/doi.org\/10.1007\/978-981-97-5779-4_10","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"type":"print","value":"0302-9743"},{"type":"electronic","value":"1611-3349"}],"subject":[],"published":{"date-parts":[[2025]]},"assertion":[{"value":"11 January 2025","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"DASFAA","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Conference on Database Systems for Advanced Applications","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Gifu","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Japan","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2024","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2 July 2024","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"5 July 2024","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"15","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"dasfaa2024a","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/www.dasfaa2024.org\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}