{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,6,26]],"date-time":"2025-06-26T04:53:16Z","timestamp":1750913596022},"reference-count":25,"publisher":"IEEE","license":[{"start":{"date-parts":[[2024,5,19]],"date-time":"2024-05-19T00:00:00Z","timestamp":1716076800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2024,5,19]],"date-time":"2024-05-19T00:00:00Z","timestamp":1716076800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100000781","name":"European Research Council","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100000781","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2024,5,19]]},"DOI":"10.1109\/iscas58744.2024.10558587","type":"proceedings-article","created":{"date-parts":[[2024,7,2]],"date-time":"2024-07-02T17:22:52Z","timestamp":1719940972000},"page":"1-5","source":"Crossref","is-referenced-by-count":3,"title":["Enabling Efficient Hardware Acceleration of Hybrid Vision Transformer (ViT) Networks at the Edge"],"prefix":"10.1109","author":[{"given":"Joren","family":"Dumoulin","sequence":"first","affiliation":[{"name":"KU Leuven,MICAS, ESAT"}]},{"given":"Pouya","family":"Houshmand","sequence":"additional","affiliation":[{"name":"KU Leuven,MICAS, ESAT"}]},{"given":"Vikram","family":"Jain","sequence":"additional","affiliation":[{"name":"KU Leuven,MICAS, ESAT"}]},{"given":"Marian","family":"Verhelst","sequence":"additional","affiliation":[{"name":"KU Leuven,MICAS, ESAT"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01167"},{"article-title":"An image is worth 16x16 words: Transformers for image recognition at scale","year":"2020","author":"Dosovitskiy","key":"ref2"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/cvpr.2016.90"},{"article-title":"Very deep convolutional networks for large-scale image recognition","year":"2014","author":"Simonyan","key":"ref4"},{"article-title":"Mobilenets: Efficient convolutional neural networks for mobile vision applications","year":"2017","author":"Howard","key":"ref5"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00474"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/cvpr.2019.00293"},{"article-title":"Mobilevit: Light-weight, general-purpose, and mobile-friendly vision transformer","year":"2021","author":"Mehta","key":"ref8"},{"article-title":"Training data-efficient image transformers and distillation through attention","year":"2020","author":"Touvron","key":"ref9"},{"article-title":"Separable self-attention for mobile vision transformers","year":"2022","author":"Mehta","key":"ref10"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-25082-8_1"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/ISSCC.2014.6757323"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1145\/1816038.1815968"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/ISSCC.2017.7870353"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/JSSC.2022.3214064"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/JSSC.2023.3236566"},{"article-title":"Vaqf: Fully automatic software-hardware co-design framework for low-bit vision transformer","year":"2022","author":"Sun","key":"ref17"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/AICAS54282.2022.9869928"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/hpca53966.2022.00041"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/isscc42613.2021.9365943"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/ISSCC42614.2022.9731773"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/jssc.2022.3214170"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/ISSCC42615.2023.10067588"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/JSSC.2018.2865489"},{"article-title":"Zigzag: A memory-centric rapid dnn accelerator design space exploration framework","year":"2020","author":"Mei","key":"ref25"}],"event":{"name":"2024 IEEE International Symposium on Circuits and Systems (ISCAS)","start":{"date-parts":[[2024,5,19]]},"location":"Singapore, Singapore","end":{"date-parts":[[2024,5,22]]}},"container-title":["2024 IEEE International Symposium on Circuits and Systems (ISCAS)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/10557746\/10557828\/10558587.pdf?arnumber=10558587","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,7,3]],"date-time":"2024-07-03T06:57:22Z","timestamp":1719989842000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10558587\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,5,19]]},"references-count":25,"URL":"https:\/\/doi.org\/10.1109\/iscas58744.2024.10558587","relation":{},"subject":[],"published":{"date-parts":[[2024,5,19]]}}}