{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,3]],"date-time":"2026-02-03T03:34:31Z","timestamp":1770089671034,"version":"3.49.0"},"publisher-location":"New York, NY, USA","reference-count":20,"publisher":"ACM","funder":[{"name":"NSTC","award":["113-2221-E-027-050-MY3"],"award-info":[{"award-number":["113-2221-E-027-050-MY3"]}]}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2025,11,21]]},"DOI":"10.1145\/3787256.3787270","type":"proceedings-article","created":{"date-parts":[[2026,2,2]],"date-time":"2026-02-02T16:23:12Z","timestamp":1770049392000},"page":"96-100","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":0,"title":["A Highly Area-Efficient Transformer Accelerator for Edge Devices Based on Cross-Shaped Window Attention"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0009-0003-3903-1410","authenticated-orcid":false,"given":"Yue-Hang","family":"Li","sequence":"first","affiliation":[{"name":"National Taipei University of Technology, Taipei, Taiwan,"}]},{"ORCID":"https:\/\/orcid.org\/0009-0001-3855-1655","authenticated-orcid":false,"given":"Tzu-Lun","family":"Fang","sequence":"additional","affiliation":[{"name":"National Taipei University of Technology, Taipei, Taiwan,"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-9599-6415","authenticated-orcid":false,"given":"Yu-Cheng","family":"Fan","sequence":"additional","affiliation":[{"name":"National Taipei University of Technology, Taipei, Taiwan,"}]}],"member":"320","published-online":{"date-parts":[[2026,2,2]]},"reference":[{"key":"e_1_3_3_1_1_2","volume-title":"An image is worth 16x16 words: Transformers for image recognition at scale,\" arXiv preprint arXiv:2010.11929","author":"Dosovitskiy","year":"2020","unstructured":"Dosovitskiy et al., \"An image is worth 16x16 words: Transformers for image recognition at scale,\" arXiv preprint arXiv:2010.11929, 2020."},{"key":"e_1_3_3_1_2_2","first-page":"1","volume-title":"2024 IEEE International Conference on Consumer Electronics-Asia (ICCE-Asia)","author":"Liu M. Y.","year":"2024","unstructured":"M. Y. Liu et al., \"Matrix Accelerator Designed for Vision Transformer,\" 2024 IEEE International Conference on Consumer Electronics-Asia (ICCE-Asia), Danang, Vietnam, 2024, pp. 1-2."},{"key":"e_1_3_3_1_3_2","first-page":"1","volume-title":"Matsue","author":"Hsieh P. C.","year":"2025","unstructured":"P. C. Hsieh, Y. Z. Xu and Y. C. Fan, \"An Area-Efficient Accelerator for Convolution and Vision Transformer,\" 2025 1st International Conference on Consumer Technology (ICCT-Pacific), Matsue, Shimane, Japan, 2025, pp. 1-3."},{"key":"e_1_3_3_1_4_2","first-page":"10012","article-title":"Swin transformer: Hierarchical vision transformer using shifted windows","author":"Z. Liu","year":"2021","unstructured":"Z. Liu et al., \"Swin transformer: Hierarchical vision transformer using shifted windows,\" in Proceedings of the IEEE\/CVF international conference on computer vision, 2021, pp. 10012-10022.","journal-title":"Proceedings of the IEEE\/CVF international conference on computer vision"},{"key":"e_1_3_3_1_5_2","first-page":"12124","article-title":"Cswin transformer: A general vision transformer backbone with cross-shaped windows","author":"X. Dong","year":"2022","unstructured":"X. Dong et al., \"Cswin transformer: A general vision transformer backbone with cross-shaped windows,\" in Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition, 2022, pp. 12124-12134.","journal-title":"Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition"},{"key":"e_1_3_3_1_6_2","volume-title":"Attention is all you need,\" Advances in neural information processing systems","author":"A. Vaswani","year":"2017","unstructured":"A. Vaswani et al., \"Attention is all you need,\" Advances in neural information processing systems, vol. 30, 2017."},{"key":"e_1_3_3_1_7_2","first-page":"1","volume-title":"Matsue","author":"Fang T. L.","year":"2025","unstructured":"T. L. Fang, S. C. Chang and Y. C. Fan, \"Image Classification Based on Triplet Neighborhood Attention Transformer,\" 2025 1st International Conference on Consumer Technology (ICCT-Pacific), Matsue, Shimane, Japan, 2025, pp. 1-3."},{"key":"e_1_3_3_1_8_2","volume-title":"Spike-driven Transformer  V2: Meta Spiking Neural Network Architecture Inspiring the Design of Next-generation Neuromorphic Chips. arXiv preprint arXiv:2404.03663. Retrieved","author":"Yao Man","year":"2025","unstructured":"Man Yao, JiaKui Hu, Tianxiang Hu, Yifan Xu, Zhaokun Zhou, Yonghong Tian, Boxing Xu, and Guoqi Li. 2024. Spike-driven Transformer V2: Meta Spiking Neural Network Architecture Inspiring the Design of Next-generation Neuromorphic Chips. arXiv preprint arXiv:2404.03663. Retrieved September 6, 2025 from https:\/\/arxiv.org\/abs\/2404.03663"},{"key":"e_1_3_3_1_9_2","first-page":"469","volume-title":"Softermax: Hardware\/software co-design of an efficient softmax for transformers,\" in 2021 58th ACM\/IEEE Design Automation Conference (DAC)","author":"Stevens J. R.","year":"2021","unstructured":"J. R. Stevens, R. Venkatesan, S. Dai, B. Khailany, and A. Raghunathan, \"Softermax: Hardware\/software co-design of an efficient softmax for transformers,\" in 2021 58th ACM\/IEEE Design Automation Conference (DAC), 2021: IEEE, pp. 469-474."},{"key":"e_1_3_3_1_10_2","first-page":"84","volume-title":"Hardware accelerator for multi-head attention and position-wise feed-forward in the transformer,\" in 2020 IEEE 33rd International System-on-Chip Conference (SOCC)","author":"Lu S.","year":"2020","unstructured":"S. Lu, M. Wang, S. Liang, J. Lin, and Z. Wang, \"Hardware accelerator for multi-head attention and position-wise feed-forward in the transformer,\" in 2020 IEEE 33rd International System-on-Chip Conference (SOCC), 2020: IEEE, pp. 84-89."},{"key":"e_1_3_3_1_11_2","first-page":"248","volume-title":"Imagenet: A large-scale hierarchical image database,\" in 2009 IEEE conference on computer vision and pattern recognition","author":"Deng J.","year":"2009","unstructured":"J. Deng, W. Dong, R. Socher, L.-J. Li, K. Li, and L. Fei-Fei, \"Imagenet: A large-scale hierarchical image database,\" in 2009 IEEE conference on computer vision and pattern recognition, 2009: Ieee, pp. 248-255."},{"key":"e_1_3_3_1_12_2","doi-asserted-by":"publisher","DOI":"10.1504\/IJHPCN.2007.015767"},{"key":"e_1_3_3_1_13_2","doi-asserted-by":"publisher","DOI":"10.1109\/TMAG.2010.2102743"},{"key":"e_1_3_3_1_14_2","first-page":"9355","volume-title":"Twins: Revisiting the design of spatial attention in vision transformers,\" Advances in neural information processing systems","author":"X. Chu","year":"2021","unstructured":"X. Chu et al., \"Twins: Revisiting the design of spatial attention in vision transformers,\" Advances in neural information processing systems, vol. 34, pp. 9355-9366, 2021."},{"key":"e_1_3_3_1_15_2","first-page":"6185","article-title":"Neighborhood attention transformer","author":"Hassani A.","year":"2023","unstructured":"A. Hassani, S. Walton, J. Li, S. Li, and H. Shi, \"Neighborhood attention transformer,\" in Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition, 2023, pp. 6185-6194.","journal-title":"Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition"},{"key":"e_1_3_3_1_16_2","first-page":"10347","volume-title":"Training data-efficient image transformers & distillation through attention,\" in International conference on machine learning","author":"Touvron H.","year":"2021","unstructured":"H. Touvron, M. Cord, M. Douze, F. Massa, A. Sablayrolles, and H. J\u00e9gou, \"Training data-efficient image transformers & distillation through attention,\" in International conference on machine learning, 2021: PMLR, pp. 10347-10357."},{"key":"e_1_3_3_1_17_2","first-page":"1","volume-title":"65: IEEE","author":"Y. Wang","unstructured":"Y. Wang et al., \"A 28nm 27.5 TOPS\/W approximate-computing-based transformer processor with asymptotic sparsity speculating and out-of-order computing,\" in 2022 IEEE international solid-state circuits conference (ISSCC), 2022, vol. 65: IEEE, pp. 1-3."},{"key":"e_1_3_3_1_18_2","first-page":"363","article-title":"OPTIMUS: OPTImized matrix MUltiplication Structure for Transformer neural network accelerator","volume":"2","author":"Park J.","year":"2020","unstructured":"J. Park, H. Yoon, D. Ahn, J. Choi, and J.-J. Kim, \"OPTIMUS: OPTImized matrix MUltiplication Structure for Transformer neural network accelerator,\" Proceedings of Machine Learning and Systems, vol. 2, pp. 363-378, 2020.","journal-title":"Proceedings of Machine Learning and Systems"},{"key":"e_1_3_3_1_19_2","doi-asserted-by":"publisher","DOI":"10.1016\/j.sysarc.2022.102520"},{"key":"e_1_3_3_1_20_2","first-page":"273","volume-title":"Vitcod: Vision transformer acceleration via dedicated algorithm and accelerator co-design,\" in 2023 IEEE International Symposium on High-Performance Computer Architecture (HPCA)","author":"H. You","year":"2023","unstructured":"H. You et al., \"Vitcod: Vision transformer acceleration via dedicated algorithm and accelerator co-design,\" in 2023 IEEE International Symposium on High-Performance Computer Architecture (HPCA), 2023: IEEE, pp. 273-286."}],"event":{"name":"CIIS 2025: 2025 The 8th International Conference on Computational Intelligence and Intelligent Systems","location":"Okayama , Japan","acronym":"CIIS 2025"},"container-title":["Proceedings of the 2025 8th International Conference on Computational Intelligence and Intelligent Systems"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3787256.3787270","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,2,2]],"date-time":"2026-02-02T16:24:08Z","timestamp":1770049448000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3787256.3787270"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,11,21]]},"references-count":20,"alternative-id":["10.1145\/3787256.3787270","10.1145\/3787256"],"URL":"https:\/\/doi.org\/10.1145\/3787256.3787270","relation":{},"subject":[],"published":{"date-parts":[[2025,11,21]]},"assertion":[{"value":"2026-02-02","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}