{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,25]],"date-time":"2026-03-25T06:29:49Z","timestamp":1774420189049,"version":"3.50.1"},"reference-count":20,"publisher":"IEEE","license":[{"start":{"date-parts":[[2025,4,6]],"date-time":"2025-04-06T00:00:00Z","timestamp":1743897600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,4,6]],"date-time":"2025-04-06T00:00:00Z","timestamp":1743897600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025,4,6]]},"DOI":"10.1109\/icassp49660.2025.10889449","type":"proceedings-article","created":{"date-parts":[[2025,3,12]],"date-time":"2025-03-12T17:15:02Z","timestamp":1741799702000},"page":"1-5","source":"Crossref","is-referenced-by-count":0,"title":["VITRO: Vocabulary Inversion for Time-series Representation Optimization"],"prefix":"10.1109","author":[{"given":"Filippos","family":"Bellos","sequence":"first","affiliation":[{"name":"University of Michigan,Ann Arbor,MI,USA"}]},{"given":"Nam H.","family":"Nguyen","sequence":"additional","affiliation":[{"name":"Capital One,McLean,VA,USA"}]},{"given":"Jason J.","family":"Corso","sequence":"additional","affiliation":[{"name":"University of Michigan,Ann Arbor,MI,USA"}]}],"member":"263","reference":[{"key":"ref1","article-title":"Chain-of-thought prompting elicits reasoning in large language models","volume-title":"Proceedings of the 36th International Conference on Neural Information Processing Systems","author":"Wei"},{"key":"ref2","first-page":"24","article-title":"Can large language models reason about goal-oriented tasks?","volume-title":"Proceedings of the First edition of the Work-shop on the Scaling Behavior of Large Language Models (SCALE-LLM 2024)","author":"Bellos"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2024.3443141"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP49357.2023.10096002"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1029\/RG012i003p00447"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1145\/3511808.3557294"},{"key":"ref7","article-title":"Language models are few-shot learners","author":"Brown","year":"2020","journal-title":"Advances in Neural Information Processing Systems 33, NeurIPS 2020, December 6-12, 2020"},{"key":"ref8","first-page":"22199","article-title":"Large language models are zero-shot reasoners","volume":"35","author":"Kojima","year":"2022","journal-title":"Advances in neural information processing systems"},{"key":"ref9","article-title":"Time-LLM: Time series forecasting by reprogramming large language models","volume-title":"The Twelfth International Conference on Learning Representations","author":"Jin"},{"key":"ref10","article-title":"TEST: Text prototype aligned embedding to activate LLM\u2019s ability for time series","volume-title":"The Twelfth International Conference on Learning Representations","author":"Sun"},{"key":"ref11","article-title":"One fits all: Power general time series analysis by pretrained LM","volume-title":"Thirty-seventh Conference on Neural Information Processing Systems","author":"Zhou"},{"key":"ref12","article-title":"$s^2$IP-LLM: Semantic space informed prompt learning with LLM for time series forecasting","volume-title":"in Forty-first International Conference on Machine Learning","author":"Pan"},{"key":"ref13","article-title":"An image is worth one word: Personalizing text-to-image generation using textual inversion","volume-title":"The Eleventh International Conference on Learning Representations","author":"Gal"},{"key":"ref14","article-title":"Reversible instance normalization for accurate time-series forecasting against distribution shift","volume-title":"International Conference on Learning Representations","author":"Kim"},{"key":"ref15","article-title":"A time series is worth 64 words: Long-term forecasting with transformers","volume-title":"International Conference on Learning Representations","author":"Nie"},{"key":"ref16","article-title":"Multimodal few-shot learning with frozen language models","author":"Tsimpoukelli","year":"2021","journal-title":"Advances in Neural Information Processing Systems"},{"issue":"8","key":"ref17","first-page":"9","article-title":"Language models are unsupervised multitask learners","volume":"1","author":"Radford","year":"2019","journal-title":"OpenAI blog"},{"key":"ref18","article-title":"Llama: Open and efficient foundation language models","author":"Touvron","year":"2023"},{"key":"ref19","article-title":"Timesnet: Temporal 2d-variation modeling for general time series analysis","volume-title":"International Conference on Learning Representations","author":"Wu"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v37i9.26317"}],"event":{"name":"ICASSP 2025 - 2025 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)","location":"Hyderabad, India","start":{"date-parts":[[2025,4,6]]},"end":{"date-parts":[[2025,4,11]]}},"container-title":["ICASSP 2025 - 2025 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/10887540\/10887541\/10889449.pdf?arnumber=10889449","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,3,25]],"date-time":"2026-03-25T05:26:54Z","timestamp":1774416414000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10889449\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,4,6]]},"references-count":20,"URL":"https:\/\/doi.org\/10.1109\/icassp49660.2025.10889449","relation":{},"subject":[],"published":{"date-parts":[[2025,4,6]]}}}