{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,11,15]],"date-time":"2025-11-15T07:43:19Z","timestamp":1763192599109,"version":"3.45.0"},"reference-count":43,"publisher":"IEEE","license":[{"start":{"date-parts":[[2025,6,30]],"date-time":"2025-06-30T00:00:00Z","timestamp":1751241600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,6,30]],"date-time":"2025-06-30T00:00:00Z","timestamp":1751241600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025,6,30]]},"DOI":"10.1109\/ijcnn64981.2025.11228139","type":"proceedings-article","created":{"date-parts":[[2025,11,14]],"date-time":"2025-11-14T18:46:15Z","timestamp":1763145975000},"page":"1-8","source":"Crossref","is-referenced-by-count":0,"title":["GLFormer: A Lightweight Vision Transformer for Balancing Global and Local Information"],"prefix":"10.1109","author":[{"given":"Zezhou","family":"Wang","sequence":"first","affiliation":[{"name":"The Australian National University"}]},{"given":"Yi","family":"Wang","sequence":"additional","affiliation":[{"name":"Modale AI Sci-Tech"}]},{"given":"Wei","family":"Zhang","sequence":"additional","affiliation":[{"name":"Yarbo Inc"}]},{"given":"Yuping","family":"Yuan","sequence":"additional","affiliation":[{"name":"Radio, Film and Television Design and Research Institute Co., Ltd,Information and Network Institute"}]},{"given":"Suyang","family":"Chen","sequence":"additional","affiliation":[{"name":"The City College of the City University of New York"}]},{"given":"Guangzhen","family":"Yao","sequence":"additional","affiliation":[{"name":"Northeast Normal University"}]},{"given":"Chengze","family":"Du","sequence":"additional","affiliation":[{"name":"Beijing University of Posts and Telecommunications"}]},{"given":"Renda","family":"Han","sequence":"additional","affiliation":[{"name":"Hainan University"}]},{"given":"Bobin","family":"Xie","sequence":"additional","affiliation":[{"name":"Shanghai University of Electric Power"}]},{"given":"Sandong","family":"Zhu","sequence":"additional","affiliation":[{"name":"Northeast Normal University"}]},{"given":"Long","family":"Zhang","sequence":"additional","affiliation":[{"name":"Northeast Normal University"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01181"},{"key":"ref2","first-page":"24 101","article-title":"A fast post-training pruning framework for transformers","volume":"35","author":"Kwon","year":"2022","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/IPCCC59868.2024.10850369"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2024.conll-1.5"},{"article-title":"Mobilevit: light-weight, general-purpose, and mobile-friendly vision transformer","year":"2021","author":"Mehta","key":"ref5"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00520"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00140"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-20083-0_18"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01169"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00986"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01179"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01183"},{"key":"ref13","first-page":"35 624","article-title":"A closer look at self-supervised lightweight vision transformers","volume-title":"International Conference TABLE VABLATION OF GLNET. on Machine Learning","author":"Wang"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1007\/s41095-022-0274-8"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00060"},{"article-title":"Quadtree attention for vision transformers","year":"2022","author":"Tang","key":"ref16"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.01172"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00474"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-01264-9_8"},{"article-title":"Moat: Alternating mobile convolution and attention brings strong vision models","volume-title":"The Eleventh International Conference on Learning Representations","author":"Yang","key":"ref20"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.01044"},{"key":"ref22","first-page":"10 347","article-title":"Training data-efficient image transformers & distillation through attention","volume-title":"International conference on machine learning","author":"Touvron"},{"key":"ref23","first-page":"15 908","article-title":"Transformer in transformer","volume":"34","author":"Han","year":"2021","journal-title":"Advances in neural information processing systems"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00061"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00299"},{"key":"ref26","first-page":"20 014","article-title":"Xcit: Cross-covariance image transformers","volume":"34","author":"Ali","year":"2021","journal-title":"Advances in neural information processing systems"},{"key":"ref27","first-page":"15 475","article-title":"Rest: An efficient transformer for visual recognition","volume":"34","author":"Zhang","year":"2021","journal-title":"Advances in neural information processing systems"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00983"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00089"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00714"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-19809-0_35"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1007\/s41095-022-0274-8"},{"key":"ref33","first-page":"6105","article-title":"Efficientnet: Rethinking model scaling for convolutional neural networks","volume-title":"International conference on machine learning","author":"Tan"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01058"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2022.3202765"},{"article-title":"Quadtree attention for vision transformers","year":"2022","author":"Tang","key":"ref36"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1007\/s41095-023-0364-2"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.90"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01055"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-20050-2_43"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.00207"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01055"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01181"}],"event":{"name":"2025 International Joint Conference on Neural Networks (IJCNN)","start":{"date-parts":[[2025,6,30]]},"location":"Rome, Italy","end":{"date-parts":[[2025,7,5]]}},"container-title":["2025 International Joint Conference on Neural Networks (IJCNN)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/11227166\/11227148\/11228139.pdf?arnumber=11228139","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,11,15]],"date-time":"2025-11-15T07:39:45Z","timestamp":1763192385000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11228139\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,6,30]]},"references-count":43,"URL":"https:\/\/doi.org\/10.1109\/ijcnn64981.2025.11228139","relation":{},"subject":[],"published":{"date-parts":[[2025,6,30]]}}}