{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,10,1]],"date-time":"2025-10-01T15:14:05Z","timestamp":1759331645880,"version":"build-2065373602"},"reference-count":30,"publisher":"IEEE","license":[{"start":{"date-parts":[[2025,7,26]],"date-time":"2025-07-26T00:00:00Z","timestamp":1753488000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,7,26]],"date-time":"2025-07-26T00:00:00Z","timestamp":1753488000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025,7,26]]},"DOI":"10.23919\/mva65244.2025.11175120","type":"proceedings-article","created":{"date-parts":[[2025,9,26]],"date-time":"2025-09-26T17:35:13Z","timestamp":1758908113000},"page":"1-6","source":"Crossref","is-referenced-by-count":0,"title":["Bidirectional Action Sequence Learning for Long-term Action Anticipation with Large Language Models"],"prefix":"10.23919","author":[{"given":"Yuji","family":"Sato","sequence":"first","affiliation":[{"name":"Panasonic Connect Co., Ltd."}]},{"given":"Yasunori","family":"Ishii","sequence":"additional","affiliation":[{"name":"Panasonic Holdings Corporation"}]},{"given":"Takayoshi","family":"Yamashita","sequence":"additional","affiliation":[{"name":"Chubu University"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-19778-9_38"},{"article-title":"VideoLLM: Modeling video sequence with large language models","year":"2023","author":"Chen","key":"ref2"},{"article-title":"ELECTRA: Pre-training text encoders as discriminators rather than generators","volume-title":"Proceedings of the International Conference on Learning Representations (ICLR)","author":"Clark","key":"ref3"},{"article-title":"Video + clip baseline for ego4d long-term action anticipation","year":"2022","author":"Das","key":"ref4"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/N19-1423"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00675"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00630"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00306"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01842"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52733.2024.01834"},{"article-title":"DeBERTa: Decoding-enhanced bert with disen-tangled attention","volume-title":"Proceedings of the International Conference on Learning Representations (ICLR)","author":"He","key":"ref11"},{"article-title":"PALM: Predicting actions through language models @ ego4d long-term action anticipation challenge 2023","year":"2023","author":"Huang","key":"ref12"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52733.2024.02084"},{"article-title":"Technical report for ego4d long term action anticipation challenge 2023","year":"2023","author":"Ishibashi","key":"ref14"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.01396"},{"article-title":"ALBERT: A lite bert for self-supervised learning of language representations","volume-title":"Proceedings of the International Conference on Learning Representations (ICLR)","author":"Lan","key":"ref16"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52733.2024.01374"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.01644"},{"article-title":"A robustly optimized bert pretraining approach","year":"2019","author":"Liu","key":"ref19"},{"key":"ref20","first-page":"46212","article-title":"EgoSchema: A diagnostic benchmark for very long-form video language understanding","volume-title":"Proceedings of the Advances in Neural Information Processing Systems (NeurIPS)","volume":"36","author":"Mangalam"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-19830-4_32"},{"key":"ref22","first-page":"5079","article-title":"Meet in the middle: A new pre-training paradigm","volume-title":"Proceedings of the Advances in Neural Information Processing Systems (NeurIPS)","volume":"36","author":"Nguyen"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.01249"},{"article-title":"DistilBERT, a distilled version of bert: smaller, faster, cheaper and lighter","year":"2019","author":"Sanh","key":"ref24"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00989"},{"key":"ref26","first-page":"5926","article-title":"MASS: Masked sequence to sequence pre-training for language generation","volume-title":"Proceedings of the International Conference on Machine Learning (ICML)","author":"Song"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/WACV57701.2024.00661"},{"article-title":"AntGPT: Can large language models help long-term action anticipation from videos?","volume-title":"Proceedings of the International Conference on Learning Representations (ICLR)","author":"Zhao","key":"ref28"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.00637"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.01840"}],"event":{"name":"2025 19th International Conference on Machine Vision and Applications (MVA)","start":{"date-parts":[[2025,7,26]]},"location":"Kyoto, Japan","end":{"date-parts":[[2025,7,28]]}},"container-title":["2025 19th International Conference on Machine Vision and Applications (MVA)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/11174131\/11175049\/11175120.pdf?arnumber=11175120","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,9,30]],"date-time":"2025-09-30T13:33:55Z","timestamp":1759239235000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11175120\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,7,26]]},"references-count":30,"URL":"https:\/\/doi.org\/10.23919\/mva65244.2025.11175120","relation":{},"subject":[],"published":{"date-parts":[[2025,7,26]]}}}