{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,9,13]],"date-time":"2025-09-13T16:14:59Z","timestamp":1757780099367,"version":"3.37.3"},"reference-count":41,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","license":[{"start":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T00:00:00Z","timestamp":1704067200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T00:00:00Z","timestamp":1704067200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T00:00:00Z","timestamp":1704067200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"name":"National Key Research and Development Special Program","award":["2022YFE0112400"],"award-info":[{"award-number":["2022YFE0112400"]}]},{"name":"Jiangsu Postdoctoral Research Funding Program"},{"name":"Natural Science","award":["1601009 A"],"award-info":[{"award-number":["1601009 A"]}]},{"name":"China Postdoctoral Science Fund batch 62"},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["21706096"],"award-info":[{"award-number":["21706096"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100004608","name":"Natural Science Foundation of Jiangsu Province","doi-asserted-by":"publisher","award":["BK20160162"],"award-info":[{"award-number":["BK20160162"]}],"id":[{"id":"10.13039\/501100004608","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100012226","name":"Fundamental Research Funds for the Central Universities","doi-asserted-by":"publisher","award":["JUSRP123035"],"award-info":[{"award-number":["JUSRP123035"]}],"id":[{"id":"10.13039\/501100012226","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Signal Process. Lett."],"published-print":{"date-parts":[[2024]]},"DOI":"10.1109\/lsp.2024.3374057","type":"journal-article","created":{"date-parts":[[2024,3,6]],"date-time":"2024-03-06T19:08:11Z","timestamp":1709752091000},"page":"865-869","source":"Crossref","is-referenced-by-count":5,"title":["Lightweight Deep Neural Network Model With Padding-Free Downsampling"],"prefix":"10.1109","volume":"31","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-6193-6641","authenticated-orcid":false,"given":"Dengfeng","family":"Liu","sequence":"first","affiliation":[{"name":"School of Artificial Intelligence and Computer Science, Jiangnan University, Wuxi, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0008-3415-4645","authenticated-orcid":false,"given":"Xiaohe","family":"Guo","sequence":"additional","affiliation":[{"name":"School of Artificial Intelligence and Computer Science, Jiangnan University, Wuxi, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-3546-0966","authenticated-orcid":false,"given":"Ning","family":"Wang","sequence":"additional","affiliation":[{"name":"School of Artificial Intelligence and Computer Science, Jiangnan University, Wuxi, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-8087-3001","authenticated-orcid":false,"given":"Qin","family":"Wu","sequence":"additional","affiliation":[{"name":"School of Artificial Intelligence and Computer Science, Jiangnan University, Wuxi, China"}]}],"member":"263","reference":[{"key":"ref1","first-page":"1","article-title":"Deep compression: Compressing deep neural network with pruning, trained quantization and Huffman coding","volume-title":"Proc. 4th Int. Conf. Learn. Representations","author":"Han","year":"2016"},{"key":"ref2","first-page":"1135","article-title":"Learning both weights and connections for efficient neural network","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"28","author":"Han","year":"2015"},{"key":"ref3","first-page":"1","article-title":"Pruning filters for efficient convnets","volume-title":"Proc. 5th Int. Conf. Learn. Representations","author":"Li","year":"2017"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00339"},{"key":"ref5","first-page":"5151","article-title":"Scalable methods for 8-bit training of neural networks","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Banner","year":"2018"},{"key":"ref6","first-page":"1","article-title":"Training binary neural networks with real-to-binary convolutions","volume-title":"Proc. 8th Int. Conf. Learn. Representations","author":"Martnez","year":"2020"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00489"},{"article-title":"Distilling the knowledge in a neural network","year":"2015","author":"Hinton","key":"ref8"},{"key":"ref9","first-page":"1","article-title":"Fitnets: Hints for thin deep nets","volume-title":"Proc. 3rd Int. Conf. Learn. Representations","author":"Romero","year":"2015"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.754"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00474"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00716"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00165"},{"key":"ref14","first-page":"9969","article-title":"GhostNetv2: Enhance cheap operation with long-range attention","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"35","author":"Tang","year":"2022"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-19809-0_35"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-01264-9_8"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00140"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00293"},{"article-title":"Mobilenets: Efficient convolutional neural networks for mobile vision applications","year":"2017","author":"Howard","key":"ref19"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.01461"},{"key":"ref21","first-page":"12934","article-title":"Efficientformer: Vision transformers at mobilenet speed","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"35","author":"Li","year":"2022"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.01549"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-20083-0_18"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-25082-8_1"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00520"},{"article-title":"Separable self-attention for mobile vision transformers","year":"2022","author":"Mehta","key":"ref26"},{"key":"ref27","first-page":"1","article-title":"Mobilevit: Light-weight, general-purpose, and mobile-friendly vision transformer","volume-title":"Proc. 10th Int. Conf. Learn. Representations Virtual Event","author":"Mehta","year":"2022"},{"article-title":"Mobilevitv3: Mobile-friendly vision transformer with simple and effective fusion of local, global and input features","year":"2022","author":"Wadekar","key":"ref28"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1145\/3570955"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1109\/TGRS.2023.3341437"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2021.3102504"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1109\/TIM.2023.3300421"},{"article-title":"Efficient large-scale vision representation learning","year":"2023","author":"Dolev","key":"ref33"},{"article-title":"Learning multiple layers of features from tiny images","year":"2009","author":"Krizhevsky","key":"ref34"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2009.5206848"},{"key":"ref36","first-page":"554","article-title":"Novel dataset for fine-grained image categorization: Stanford dogs","volume-title":"Proc. CVPR Workshop Fine-Grained Vis. Categorization","volume":"2","author":"Khosla","year":"2011"},{"key":"ref37","first-page":"1967","article-title":"Pelee: A real-time object detection system on mobile devices","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"31","author":"Wang","year":"2018"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.90"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.5244\/C.30.87"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-11021-5_19"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.66"}],"container-title":["IEEE Signal Processing Letters"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/97\/10380231\/10461068.pdf?arnumber=10461068","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,3,28]],"date-time":"2024-03-28T20:46:27Z","timestamp":1711658787000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10461068\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024]]},"references-count":41,"URL":"https:\/\/doi.org\/10.1109\/lsp.2024.3374057","relation":{},"ISSN":["1070-9908","1558-2361"],"issn-type":[{"type":"print","value":"1070-9908"},{"type":"electronic","value":"1558-2361"}],"subject":[],"published":{"date-parts":[[2024]]}}}