{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,12,1]],"date-time":"2025-12-01T11:25:31Z","timestamp":1764588331039,"version":"3.28.0"},"reference-count":37,"publisher":"IEEE","license":[{"start":{"date-parts":[[2022,6,1]],"date-time":"2022-06-01T00:00:00Z","timestamp":1654041600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2022,6,1]],"date-time":"2022-06-01T00:00:00Z","timestamp":1654041600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2022,6]]},"DOI":"10.1109\/cvprw56347.2022.00422","type":"proceedings-article","created":{"date-parts":[[2022,8,23]],"date-time":"2022-08-23T19:52:53Z","timestamp":1661284373000},"page":"3773-3780","source":"Crossref","is-referenced-by-count":16,"title":["Continual Learning with Transformers for Image Classification"],"prefix":"10.1109","author":[{"given":"Beyza","family":"Ermis","sequence":"first","affiliation":[{"name":"AWS,Berlin"}]},{"given":"Giovanni","family":"Zappella","sequence":"additional","affiliation":[{"name":"AWS,Berlin"}]},{"given":"Martin","family":"Wistuba","sequence":"additional","affiliation":[{"name":"AWS,Berlin"}]},{"given":"Aditya","family":"Rawal","sequence":"additional","affiliation":[{"name":"AWS,Santa Clara"}]},{"given":"Cedric","family":"Archambeau","sequence":"additional","affiliation":[{"name":"AWS,Berlin"}]}],"member":"263","reference":[{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00046"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.emnlp-demos.6"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.emnlp-main.39"},{"key":"ref30","article-title":"Attention is all you need","volume":"30","author":"vaswani","year":"2017","journal-title":"Advances in neural information processing systems"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1109\/WACV45572.2020.9093365"},{"article-title":"Lifelong learning with dynamically expandable networks","year":"2017","author":"yoon","key":"ref36"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/D19-1404"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.00303"},{"article-title":"Continual learning via neural pruning","year":"2019","author":"golkar","key":"ref10"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00092"},{"key":"ref12","first-page":"2790","article-title":"Parameter-efficient transfer learning for nlp","author":"houlsby","year":"2019","journal-title":"International Conference on Machine Learning"},{"article-title":"Frustratingly easy transferability estimation","year":"2021","author":"huang","key":"ref13"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.naacl-main.218"},{"key":"ref15","article-title":"Compacting, picking and growing for unforgetting continual learning","volume":"32","author":"hung","year":"2019","journal-title":"Advances in neural information processing systems"},{"key":"ref16","doi-asserted-by":"crossref","first-page":"3521","DOI":"10.1073\/pnas.1611835114","article-title":"Overcoming catastrophic forgetting in neural networks","volume":"114","author":"kirkpatrick","year":"2017","journal-title":"Proceedings of the National Academy of Sciences"},{"article-title":"Learning multiple layers of features from tiny images","year":"2009","author":"krizhevsky","key":"ref17"},{"article-title":"Technical report for iccv 2021 challenge sslad-track3b: Transformers are better continual learners","year":"2022","author":"li","key":"ref18"},{"key":"ref19","first-page":"3925","article-title":"Learn to grow: A continual structure learning framework for overcoming catastrophic forgetting","author":"li","year":"2019","journal-title":"International Conference on Machine Learning"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1007\/s11263-015-0816-y"},{"article-title":"Episodic memory in lifelong language learning","year":"2019","author":"masson d\u2019autume","key":"ref4"},{"article-title":"Experience replay for continual learning","year":"2018","author":"rolnick","key":"ref27"},{"journal-title":"Continual learning with tiny episodic memories","year":"2019","author":"chaudhry","key":"ref3"},{"article-title":"An image is worth 16x16 words: Transformers for image recognition at scale","year":"2020","author":"dosovitskiy","key":"ref6"},{"key":"ref29","first-page":"10347","article-title":"Training data-efficient image transformers & distillation through attention","author":"touvron","year":"2021","journal-title":"International Conference on Machine Learning"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2009.5206848"},{"article-title":"Dytox: Transformers for continual learning with dynamic token expansion","year":"2021","author":"douillard","key":"ref8"},{"key":"ref7","first-page":"86","article-title":"Podnet: Pooled outputs distillation for small-tasks incremental learning","author":"douillard","year":"2020","journal-title":"European Conference on Computer Vision"},{"key":"ref2","first-page":"1877","article-title":"Language models are few-shot learners","volume":"33","author":"brown","year":"2020","journal-title":"Advances in neural information processing systems"},{"article-title":"Memory efficient continual learning for neural text classification","year":"2022","author":"ermis","key":"ref9"},{"article-title":"Selfless sequential learning","year":"2018","author":"aljundi","key":"ref1"},{"key":"ref20","first-page":"6467","article-title":"Gradient episodic memory for continual learning","volume":"30","author":"lopez-paz","year":"2017","journal-title":"Advances in neural information processing systems"},{"key":"ref22","first-page":"7294","article-title":"Leep: A new measure to evaluate transferability of learned representations","author":"nguyen","year":"2020","journal-title":"International Conference on Machine Learning"},{"key":"ref21","doi-asserted-by":"crossref","first-page":"109","DOI":"10.1016\/S0079-7421(08)60536-8","article-title":"Catastrophic interference in connectionist networks: The sequential learning problem","volume":"24","author":"mccloskey","year":"1989","journal-title":"Psychology of Learning and Motivation"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.eacl-main.39"},{"article-title":"How do vision transformers work?","year":"2022","author":"park","key":"ref23"},{"key":"ref26","first-page":"2001","article-title":"icarl: Incremental classifier and representation learning","author":"rebuffi","year":"2017","journal-title":"Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition"},{"key":"ref25","first-page":"8119","article-title":"Efficient parametrization of multi-domain deep neural net-works","author":"rebuffi","year":"2018","journal-title":"Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition"}],"event":{"name":"2022 IEEE\/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)","start":{"date-parts":[[2022,6,19]]},"location":"New Orleans, LA, USA","end":{"date-parts":[[2022,6,20]]}},"container-title":["2022 IEEE\/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9856930\/9856648\/09857227.pdf?arnumber=9857227","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,9,12]],"date-time":"2022-09-12T20:07:53Z","timestamp":1663013273000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9857227\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022,6]]},"references-count":37,"URL":"https:\/\/doi.org\/10.1109\/cvprw56347.2022.00422","relation":{},"subject":[],"published":{"date-parts":[[2022,6]]}}}