{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,3]],"date-time":"2026-02-03T04:31:14Z","timestamp":1770093074430,"version":"3.49.0"},"publisher-location":"New York, NY, USA","reference-count":22,"publisher":"ACM","funder":[{"name":"NSTC","award":["113-2221-E-027-050-MY3"],"award-info":[{"award-number":["113-2221-E-027-050-MY3"]}]}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2025,11,21]]},"DOI":"10.1145\/3787256.3787272","type":"proceedings-article","created":{"date-parts":[[2026,2,2]],"date-time":"2026-02-02T16:23:12Z","timestamp":1770049392000},"page":"107-112","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":0,"title":["A High-Performance ReLU-Based Hardware Accelerator with Simplified Quantization for Efficient Vision Transformers on Edge Devices"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0009-0001-9676-0922","authenticated-orcid":false,"given":"Yu-Wen","family":"Peng","sequence":"first","affiliation":[{"name":"National Taipei University of Technology, Taipei, Taiwan,"}]},{"ORCID":"https:\/\/orcid.org\/0009-0004-0913-1987","authenticated-orcid":false,"given":"Pin-Chieh","family":"Hsieh","sequence":"additional","affiliation":[{"name":"National Taipei University of Technology, Taipei, Taiwan,"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-9599-6415","authenticated-orcid":false,"given":"Yu-Cheng","family":"Fan","sequence":"additional","affiliation":[{"name":"National Taipei University of Technology, Taipei, Taiwan,"}]}],"member":"320","published-online":{"date-parts":[[2026,2,2]]},"reference":[{"key":"e_1_3_3_1_1_2","doi-asserted-by":"publisher","DOI":"10.1109\/5.726791"},{"key":"e_1_3_3_1_2_2","first-page":"454","volume-title":"Kyoto","author":"Liu F. Y.","year":"2021","unstructured":"F. Y. Liu, C. L. Liao, P. W. Chou and Y. C. Fan, \"Objects Detection Deep Learning System Based on 2-D Winograd Convolutional Neural Network,\" 2021 IEEE 10th Global Conference on Consumer Electronics (GCCE), Kyoto, Japan, 2021, pp. 454-455."},{"key":"e_1_3_3_1_3_2","doi-asserted-by":"publisher","DOI":"10.1145\/3065386"},{"key":"e_1_3_3_1_4_2","doi-asserted-by":"publisher","unstructured":"Karen Simonyan and Andrew Zisserman. 2015. Very Deep Convolutional Networks for Large-Scale Image Recognition. 10.48550\/arXiv.1409.1556","DOI":"10.48550\/arXiv.1409.1556"},{"key":"e_1_3_3_1_5_2","doi-asserted-by":"publisher","unstructured":"Kaiming He Xiangyu Zhang Shaoqing Ren and Jian Sun. 2015. Deep Residual Learning for Image Recognition. 10.48550\/arXiv.1512.03385","DOI":"10.48550\/arXiv.1512.03385"},{"key":"e_1_3_3_1_6_2","doi-asserted-by":"publisher","unstructured":"Ashish Vaswani Noam Shazeer Niki Parmar Jakob Uszkoreit Llion Jones Aidan N. Gomez Lukasz Kaiser and Illia Polosukhin. 2023. Attention Is All You Need. 10.48550\/arXiv.1706.03762","DOI":"10.48550\/arXiv.1706.03762"},{"key":"e_1_3_3_1_7_2","doi-asserted-by":"publisher","unstructured":"Alexey Dosovitskiy Lucas Beyer Alexander Kolesnikov Dirk Weissenborn Xiaohua Zhai Thomas Unterthiner Mostafa Dehghani Matthias Minderer Georg Heigold Sylvain Gelly Jakob Uszkoreit and Neil Houlsby. 2021. An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale. 10.48550\/arXiv.2010.11929","DOI":"10.48550\/arXiv.2010.11929"},{"key":"e_1_3_3_1_8_2","first-page":"1","volume-title":"2024 IEEE International Conference on Consumer Electronics-Asia (ICCE-Asia)","author":"Liu M. Y.","year":"2024","unstructured":"M. Y. Liu et al., \"Matrix Accelerator Designed for Vision Transformer,\" 2024 IEEE International Conference on Consumer Electronics-Asia (ICCE-Asia), Danang, Vietnam, 2024, pp. 1-2."},{"key":"e_1_3_3_1_9_2","first-page":"1","volume-title":"Matsue","author":"Hsieh P. C.","year":"2025","unstructured":"P. C. Hsieh, Y. Z. Xu and Y. C. Fan, \"An Area-Efficient Accelerator for Convolution and Vision Transformer,\" 2025 1st International Conference on Consumer Technology (ICCT-Pacific), Matsue, Shimane, Japan, 2025, pp. 1-3."},{"key":"e_1_3_3_1_10_2","doi-asserted-by":"publisher","unstructured":"Xinyu Liu Houwen Peng Ningxin Zheng Yuqing Yang Han Hu and Yixuan Yuan. 2023. EfficientViT: Memory Efficient Vision Transformer with Cascaded Group Attention. 10.48550\/arXiv.2305.07027","DOI":"10.48550\/arXiv.2305.07027"},{"key":"e_1_3_3_1_11_2","volume-title":"Learning multiple layers of features from tiny images","author":"Krizhevsky A.","year":"2009","unstructured":"A. Krizhevsky and G. Hinton, \"Learning multiple layers of features from tiny images,\" 2009."},{"key":"e_1_3_3_1_12_2","first-page":"248","volume-title":"Imagenet: A large-scale hierarchical image database,\" in 2009 IEEE conference on computer vision and pattern recognition","author":"Deng J.","year":"2009","unstructured":"J. Deng, W. Dong, R. Socher, L.-J. Li, K. Li, and L. Fei-Fei, \"Imagenet: A large-scale hierarchical image database,\" in 2009 IEEE conference on computer vision and pattern recognition, 2009: Ieee, pp. 248-255."},{"key":"e_1_3_3_1_13_2","first-page":"1","volume-title":"Matsue","author":"Fang T. L.","year":"2025","unstructured":"T. L. Fang, S. C. Chang and Y. C. Fan, \"Image Classification Based on Triplet Neighborhood Attention Transformer,\" 2025 1st International Conference on Consumer Technology (ICCT-Pacific), Matsue, Shimane, Japan, 2025, pp. 1-3."},{"key":"e_1_3_3_1_14_2","doi-asserted-by":"publisher","DOI":"10.1109\/TMAG.2010.2102743"},{"key":"e_1_3_3_1_15_2","first-page":"28092","article-title":"Post-training quantization for vision transformer","volume":"34","author":"Liu Z.","year":"2021","unstructured":"Z. Liu, Y. Wang, K. Han, W. Zhang, S. Ma, and W. Gao, \"Post-training quantization for vision transformer,\" Advances in Neural Information Processing Systems, vol. 34, pp. 28092-28103, 2021.","journal-title":"Advances in Neural Information Processing Systems"},{"key":"e_1_3_3_1_16_2","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00286"},{"key":"e_1_3_3_1_17_2","volume-title":"Mobilevit: light-weight, general-purpose, and mobile friendly vision transformer,\" arXiv preprint arXiv:2110.02178","author":"Mehta S.","year":"2021","unstructured":"S. Mehta and M. Rastegari, \"Mobilevit: light-weight, general-purpose, and mobile friendly vision transformer,\" arXiv preprint arXiv:2110.02178, 2021."},{"key":"e_1_3_3_1_18_2","volume-title":"Fq-vit: Post-training quantization for fully quantized vision transformer,\" arXiv preprint arXiv:2111.13824","author":"Lin Y.","year":"2021","unstructured":"Y. Lin, T. Zhang, P. Sun, Z. Li, and S. Zhou, \"Fq-vit: Post-training quantization for fully quantized vision transformer,\" arXiv preprint arXiv:2111.13824, 2021."},{"key":"e_1_3_3_1_19_2","first-page":"17227","article-title":"Repq-vit: Scale reparameterization for post-training quantization of vision transformers","author":"Li Z.","year":"2023","unstructured":"Z. Li, J. Xiao, L. Yang, and Q. Gu, \"Repq-vit: Scale reparameterization for post-training quantization of vision transformers,\" in Proceedings of the IEEE\/CVF International Conference on Computer Vision, 2023, pp. 17227-17236.","journal-title":"Proceedings of the IEEE\/CVF International Conference on Computer Vision"},{"key":"e_1_3_3_1_20_2","first-page":"191","volume-title":"Ptq4vit: Post-training quantization for vision transformers with twin uniform quantization,\" in European conference on computer vision","author":"Yuan Z.","year":"2022","unstructured":"Z. Yuan, C. Xue, Y. Chen, Q. Wu, and G. Sun, \"Ptq4vit: Post-training quantization for vision transformers with twin uniform quantization,\" in European conference on computer vision, 2022: Springer, pp. 191-207."},{"key":"e_1_3_3_1_21_2","doi-asserted-by":"crossref","unstructured":"Z. Li et al. \"Auto-vit-acc: An fpga-aware automatic acceleration framework for vision transformer with mixed-scheme quantization \" in 2022 32nd International Conference on Field-Programmable Logic and Applications (FPL) 2022: IEEE pp. 109-116.","DOI":"10.1109\/FPL57034.2022.00027"},{"key":"e_1_3_3_1_22_2","doi-asserted-by":"publisher","DOI":"10.1109\/TVLSI.2024.3525184"}],"event":{"name":"CIIS 2025: 2025 The 8th International Conference on Computational Intelligence and Intelligent Systems","location":"Okayama , Japan","acronym":"CIIS 2025"},"container-title":["Proceedings of the 2025 8th International Conference on Computational Intelligence and Intelligent Systems"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3787256.3787272","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,2,2]],"date-time":"2026-02-02T16:23:49Z","timestamp":1770049429000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3787256.3787272"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,11,21]]},"references-count":22,"alternative-id":["10.1145\/3787256.3787272","10.1145\/3787256"],"URL":"https:\/\/doi.org\/10.1145\/3787256.3787272","relation":{},"subject":[],"published":{"date-parts":[[2025,11,21]]},"assertion":[{"value":"2026-02-02","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}