{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,13]],"date-time":"2026-03-13T03:10:54Z","timestamp":1773371454748,"version":"3.50.1"},"reference-count":49,"publisher":"IEEE","license":[{"start":{"date-parts":[[2024,6,30]],"date-time":"2024-06-30T00:00:00Z","timestamp":1719705600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2024,6,30]],"date-time":"2024-06-30T00:00:00Z","timestamp":1719705600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2024,6,30]]},"DOI":"10.1109\/ijcnn60899.2024.10651059","type":"proceedings-article","created":{"date-parts":[[2024,9,9]],"date-time":"2024-09-09T17:35:05Z","timestamp":1725903305000},"page":"1-8","source":"Crossref","is-referenced-by-count":1,"title":["SPGNet: A Serial-Parallel Gated Convolutional Network for Image Classification on Small Datasets"],"prefix":"10.1109","author":[{"given":"Yun","family":"Song","sequence":"first","affiliation":[{"name":"Changsha University of Science &amp; Technology,Changsha,China"}]},{"given":"Jinxuan","family":"Wang","sequence":"additional","affiliation":[{"name":"Changsha University of Science &amp; Technology,Changsha,China"}]},{"given":"Miaohui","family":"Wang","sequence":"additional","affiliation":[{"name":"Shenzhen University,Shenzhen,China"}]}],"member":"263","reference":[{"key":"ref1","article-title":"Self-consuming generative models go mad","author":"Alemohammad","year":"2023"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00518"},{"key":"ref3","article-title":"When vision transformers outperform resnets without pre-training or strong data augmentations","volume-title":"International Conference on Learning Representation (ICLR)","author":"Chen"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00520"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00135"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2014.461"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.350"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/CVPRW50498.2020.00359"},{"key":"ref9","first-page":"3965","article-title":"Coatnet: Marrying convolution and attention for all data sizes","volume":"34","author":"Dai","year":"2021","journal-title":"Advances in neural information processing systems"},{"key":"ref10","first-page":"2286","article-title":"Convit: Improving vision transformers with soft convolutional inductive biases","volume-title":"PMLR International Conference on Machine Learning","author":"d\u2019Ascoli"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2009.5206848"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/cvpr52688.2022.01166"},{"key":"ref13","article-title":"An image is worth 16x16 words: Transformers for image recognition at scale","author":"Dosovitskiy","year":"2020"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1007\/s41095-023-0364-2"},{"issue":"3","key":"ref15","article-title":"Demystifying local vision transformer: Sparse connectivity, weight sharing, and dynamic weight","volume":"2","author":"Han","year":"2021"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.90"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00745"},{"key":"ref18","article-title":"Learning multiple layers of features from tiny images","author":"Krizhevsky","year":"2009"},{"issue":"7","key":"ref19","first-page":"3","article-title":"Tiny imagenet visual recognition challenge","volume":"7","author":"Le","year":"2015","journal-title":"CS 231N"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1093\/mnras\/sty3217"},{"key":"ref21","article-title":"Harnessing hard mixed samples with decoupled regularizer","volume-title":"Thirty-seventh Conference on Neural Information Processing Systems","author":"Liu"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00986"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01167"},{"key":"ref24","article-title":"Decoupled weight decay regularization","volume-title":"International Conference on Learning Representation (ICLR)","author":"Loshchilov"},{"key":"ref25","article-title":"Fine-grained visual classification of aircraft","author":"Maji","year":"2013"},{"key":"ref26","article-title":"Mobilevit: light-weight, general-purpose, and mobile-friendly vision transformer","author":"Mehta","year":"2021"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/ICVGIP.2008.47"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2012.6248092"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00086"},{"key":"ref30","first-page":"10353","article-title":"Hornet: Efficient high-order spatial interactions with recursive gated convolutions","volume":"35","author":"Rao","year":"2022","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref31","article-title":"When does bias transfer in transfer learning?","author":"Salman","year":"2022"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00474"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.74"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.97"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2015.7298594"},{"key":"ref36","first-page":"6105","article-title":"Efficientnet: Rethinking model scaling for convolutional neural networks","volume-title":"PMLR International conference on machine learning","author":"Tan"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.3390\/e24091243"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.48550\/ARXIV.1706.03762"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-01228-1_26"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1109\/ICPR.2018.8545476"},{"key":"ref41","first-page":"4203","article-title":"Focal modulation networks","volume":"35","author":"Yang","year":"2022","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1145\/3494981"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01055"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00612"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.1007\/978-1-4899-7687-1_79"},{"key":"ref46","article-title":"Dino: Detr with improved denoising anchor boxes for end-to-end object detection","author":"Zhang","year":"2022"},{"key":"ref47","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00716"},{"key":"ref48","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2022.3201602"},{"key":"ref49","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v34i07.7000"}],"event":{"name":"2024 International Joint Conference on Neural Networks (IJCNN)","location":"Yokohama, Japan","start":{"date-parts":[[2024,6,30]]},"end":{"date-parts":[[2024,7,5]]}},"container-title":["2024 International Joint Conference on Neural Networks (IJCNN)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/10649807\/10649898\/10651059.pdf?arnumber=10651059","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,9,10]],"date-time":"2024-09-10T06:55:02Z","timestamp":1725951302000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10651059\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,6,30]]},"references-count":49,"URL":"https:\/\/doi.org\/10.1109\/ijcnn60899.2024.10651059","relation":{},"subject":[],"published":{"date-parts":[[2024,6,30]]}}}