{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,13]],"date-time":"2026-03-13T08:00:21Z","timestamp":1773388821838,"version":"3.50.1"},"reference-count":43,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","license":[{"start":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T00:00:00Z","timestamp":1704067200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by-nc-nd\/4.0\/"}],"funder":[{"DOI":"10.13039\/501100010418","name":"Institute for Information & Communications Technology Promotion","doi-asserted-by":"crossref","id":[{"id":"10.13039\/501100010418","id-type":"DOI","asserted-by":"crossref"}]},{"name":"Korean Government [Ministry of Science and ICT (MSIT)], Artificial Intelligence Graduate School Program","award":["RS-2019-II190075"],"award-info":[{"award-number":["RS-2019-II190075"]}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Access"],"published-print":{"date-parts":[[2024]]},"DOI":"10.1109\/access.2024.3475471","type":"journal-article","created":{"date-parts":[[2024,10,7]],"date-time":"2024-10-07T17:46:18Z","timestamp":1728323178000},"page":"170387-170398","source":"Crossref","is-referenced-by-count":5,"title":["Time Series Classification With Large Language Models via Linguistic Scaffolding"],"prefix":"10.1109","volume":"12","author":[{"ORCID":"https:\/\/orcid.org\/0009-0004-9934-714X","authenticated-orcid":false,"given":"Hyeongwon","family":"Jang","sequence":"first","affiliation":[{"name":"Department of Mathematical Sciences, Seoul National University, Seoul, South Korea"}]},{"ORCID":"https:\/\/orcid.org\/0009-0009-8799-889X","authenticated-orcid":false,"given":"June","family":"Yong Yang","sequence":"additional","affiliation":[{"name":"Kim Jaechul Graduate School of Artificial Intelligence, KAIST, Yuseong-gu, Daejeon, Republic of Korea"}]},{"given":"Jaeryong","family":"Hwang","sequence":"additional","affiliation":[{"name":"Department of Cyber Science, Republic of Korea Naval Academy, Jinhae-gu, Changwon-si, Republic of Korea"}]},{"given":"Eunho","family":"Yang","sequence":"additional","affiliation":[{"name":"Kim Jaechul Graduate School of Artificial Intelligence, KAIST, Yuseong-gu, Daejeon, Republic of Korea"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.90"},{"key":"ref2","first-page":"1877","article-title":"Language models are few-shot learners","volume-title":"Proc. NIPS","author":"Brown"},{"key":"ref3","first-page":"28492","article-title":"Robust speech recognition via large-scale weak supervision","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Radford"},{"key":"ref4","first-page":"1","article-title":"ModernTCN: A modern pure convolution structure for general time series analysis","volume-title":"Proc. 12th Int. Conf. Learn. Represent.","author":"Donghao"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1145\/3394486.3403118"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.48550\/ARXIV.1706.03762"},{"key":"ref7","first-page":"1","article-title":"A time series is worth 64 words: Long-term forecasting with transformers","volume-title":"Proc. 11th Int. Conf. Learn. Represent.","author":"Nie"},{"issue":"8","key":"ref8","first-page":"9","article-title":"Language models are unsupervised multitask learners","volume":"1","author":"Radford","year":"2019","journal-title":"OpenAI Blog"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/tkde.2023.3342137"},{"key":"ref10","first-page":"19622","article-title":"Large language models are zero-shot time series forecasters","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"36","author":"Gruver"},{"key":"ref11","article-title":"Large language models are few-shot health learners","volume-title":"arXiv:2305.15525","author":"Liu","year":"2023"},{"key":"ref12","first-page":"43322","article-title":"One fits all: Power general time series analysis by pretrained lm","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"36","author":"Zhou"},{"key":"ref13","first-page":"1","article-title":"TEST: Text prototype aligned embedding to activate LLM\u2019s ability for time series","volume-title":"Proc. 12th Int. Conf. Learn. Represent.","author":"Sun"},{"key":"ref14","first-page":"1","article-title":"Graph-guided network for irregularly sampled multivariate time series","volume-title":"Proc. Int. Conf. Learn. Represent.","author":"Zhang"},{"key":"ref15","first-page":"49187","article-title":"Time series as images: Vision transformer for irregularly sampled time series","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Li"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1145\/3209978.3210006"},{"key":"ref17","first-page":"1","article-title":"Efficiently modeling long sequences with structured state spaces","volume-title":"Proc. Int. Conf. Learn. Represent.","author":"Gu"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1007\/s10618-020-00701-z"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v35i9.17018"},{"key":"ref20","first-page":"5816","article-title":"SciNet: Time series modeling and forecasting with sample convolution and interaction","volume-title":"Proc. 36th Conf. Neural Inf. Process. Syst.","author":"Liu"},{"key":"ref21","first-page":"1","article-title":"MICN: Multi-scale local and global context modeling for long-term series forecasting","volume-title":"Proc. 11th Int. Conf. Learn. Represent.","author":"Wang"},{"key":"ref22","first-page":"1","article-title":"TimesNet: Temporal 2D-variation modeling for general time series analysis","volume-title":"Proc. 11th Int. Conf. Learn. Represent.","author":"Wu"},{"key":"ref23","article-title":"Less is more: Fast multivariate time series forecasting with light sampling-oriented MLP structures","volume-title":"arXiv:2207.01186","author":"Zhang","year":"2022"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v37i9.26317"},{"key":"ref25","first-page":"27268","article-title":"FedFormer: Frequency enhanced decomposed transformer for long-term series forecasting","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Zhou"},{"key":"ref26","first-page":"24226","article-title":"Flowformer: Linearizing transformers with conservation flows","volume-title":"Proc. 39th Int. Conf. Mach. Learn.","volume":"162","author":"Wu"},{"key":"ref27","first-page":"1","article-title":"Crossformer: Transformer utilizing cross-dimension dependency for multivariate time series forecasting","volume-title":"Proc. 11th Int. Conf. Learn. Represent.","author":"Zhang"},{"key":"ref28","first-page":"1","article-title":"Unsupervised scalable representation learning for multivariate time series","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"32","author":"Franceschi"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1038\/s41598-018-24271-9"},{"key":"ref30","first-page":"4353","article-title":"Set functions for time series","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Horn"},{"key":"ref31","first-page":"1","article-title":"Multi-time attention networks for irregularly sampled time series","volume-title":"Proc. Int. Conf. Learn. Represent.","author":"Shukla"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v35i1.16145"},{"key":"ref33","first-page":"1","article-title":"Interpolation-prediction networks for irregularly sampled time series","volume-title":"Proc. Int. Conf. Learn. Represent.","author":"Shukla"},{"key":"ref34","first-page":"1","article-title":"TEMPO: Prompt-based generative pre-trained transformer for time series forecasting","volume-title":"Proc. 12th Int. Conf. Learn. Represent.","author":"Cao"},{"key":"ref35","article-title":"Llm4ts: Aligning pre-trained LLMs as data-efficient time-series forecasters","volume-title":"arXiv:2308.08469","author":"Chang","year":"2024"},{"key":"ref36","first-page":"1","article-title":"Time-LLM: Time series forecasting by reprogramming large language models","volume-title":"Proc. 12th Int. Conf. Learn. Represent.","author":"Jin"},{"key":"ref37","first-page":"11763","article-title":"LIFT: Language-interfaced fine-tuning for non-language machine learning tasks","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"35","author":"Dinh"},{"key":"ref38","article-title":"The UEA multivariate time series classification archive, 2018","volume-title":"arXiv:1811.00075","author":"Bagnall","year":"2018"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1109\/ISWC.2012.13"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1097\/CCM.0000000000004145"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1161\/01.CTR.101.23.e215"},{"key":"ref42","volume-title":"GPT-J-6B: A 6 Billion Parameter Autoregressive Language Model","author":"Wang","year":"2021"},{"key":"ref43","first-page":"1","article-title":"Adam: A method for stochastic optimization","volume-title":"Proc. Int. Conf. Learn. Represent.","author":"Kingma"}],"container-title":["IEEE Access"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/6287639\/10380310\/10706904.pdf?arnumber=10706904","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,11,27]],"date-time":"2024-11-27T00:41:37Z","timestamp":1732668097000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10706904\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024]]},"references-count":43,"URL":"https:\/\/doi.org\/10.1109\/access.2024.3475471","relation":{},"ISSN":["2169-3536"],"issn-type":[{"value":"2169-3536","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024]]}}}