{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,12,23]],"date-time":"2025-12-23T10:05:12Z","timestamp":1766484312175,"version":"3.45.0"},"reference-count":40,"publisher":"IEEE","license":[{"start":{"date-parts":[[2025,6,30]],"date-time":"2025-06-30T00:00:00Z","timestamp":1751241600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,6,30]],"date-time":"2025-06-30T00:00:00Z","timestamp":1751241600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025,6,30]]},"DOI":"10.1109\/ijcnn64981.2025.11229265","type":"proceedings-article","created":{"date-parts":[[2025,11,14]],"date-time":"2025-11-14T18:46:15Z","timestamp":1763145975000},"page":"1-8","source":"Crossref","is-referenced-by-count":1,"title":["Input Resolution Downsizing as a Compression Technique for Vision Deep Learning Systems"],"prefix":"10.1109","author":[{"given":"J\u00e9r\u00e9my","family":"Morlier","sequence":"first","affiliation":[{"name":"IMT Atlantique Lab-STICC, UMR CNRS 6285,Brest,France,F-29238"}]},{"given":"Mathieu","family":"L\u00e9onardon","sequence":"additional","affiliation":[{"name":"IMT Atlantique Lab-STICC, UMR CNRS 6285,Brest,France,F-29238"}]},{"given":"Vincent","family":"Gripon","sequence":"additional","affiliation":[{"name":"IMT Atlantique Lab-STICC, UMR CNRS 6285,Brest,France,F-29238"}]}],"member":"263","reference":[{"key":"ref1","article-title":"Learning both weights and connections for efficient neural network","volume":"28","author":"Han","year":"2015","journal-title":"Advances in neural information processing systems"},{"article-title":"The lottery ticket hypothesis: Finding sparse, trainable neural networks","year":"2018","author":"Frankle","key":"ref2"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00286"},{"key":"ref4","article-title":"Binarized neural networks","volume":"29","author":"Hubara","year":"2016","journal-title":"Advances in neural information processing systems"},{"article-title":"Distilling the knowledge in a neural network","year":"2015","author":"Hinton","key":"ref5"},{"key":"ref6","article-title":"Exploiting linear structure within convolutional networks for efficient evaluation","volume":"27","author":"Denton","year":"2014","journal-title":"Advances in neural information processing systems"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.90"},{"article-title":"An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale","year":"2021","author":"Dosovitskiy","key":"ref8"},{"article-title":"Mobilenets: Efficient convolutional neural networks for mobile vision applications","year":"2017","author":"Howard","key":"ref9"},{"article-title":"EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks","year":"2020","author":"Tan","key":"ref10"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00474"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.00098"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-86340-1_11"},{"article-title":"Fixing the train-test resolution discrepancy","year":"2022","author":"Touvron","key":"ref14"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00986"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.01172"},{"article-title":"Vitar: Vision transformer with any resolution","year":"2024","author":"Fan","key":"ref17"},{"key":"ref18","first-page":"16 079","article-title":"Cape: Encoding relative positions with continuous augmented positional embeddings","volume":"34","author":"Likhomanenko","year":"2021","journal-title":"Advances in Neural Information Processing Systems"},{"article-title":"Dinov2: Learning robust visual features without supervision","year":"2023","author":"Oquab","key":"ref19"},{"article-title":"A white paper on neural network quantization","year":"2021","author":"Nagel","key":"ref20"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/cvpr.2018.00286"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.155"},{"article-title":"Deep compression: Compressing deep neural networks with pruning, trained quantization and huffman coding","year":"2015","author":"Han","key":"ref23"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01179"},{"key":"ref25","first-page":"8714","article-title":"Searching the search space of vision transformer","volume":"34","author":"Chen","year":"2021","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00171"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00155"},{"article-title":"A survey on transformer compression","year":"2024","author":"Tang","key":"ref28"},{"article-title":"Thinresnet: A new baseline for structured convolutional networks pruning","year":"2023","author":"Tessier","key":"ref29"},{"article-title":"Rethinking the value of network pruning","year":"2018","author":"Liu","key":"ref30"},{"article-title":"A closer look at structured pruning for neural network compression","year":"2018","author":"Crowley","key":"ref31"},{"key":"ref32","first-page":"711","article-title":"Data movement is all you need: A case study on optimizing transformers","volume-title":"Proceedings of Machine Learning and Systems","volume":"3","author":"Ivanov"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.21428\/bf6fb269.1f033948"},{"article-title":"Benchmarking tpu, gpu, and cpu platforms for deep learning","year":"2019","author":"Wang","key":"ref34"},{"article-title":"EfficientNetV2: Smaller Models and Faster Training","year":"2021","author":"Tan","key":"ref35"},{"article-title":"Efficient memory management for deep neural net inference","year":"2020","author":"Pisarchyk","key":"ref36"},{"article-title":"Scaling laws for neural language models","year":"2020","author":"Kaplan","key":"ref37"},{"article-title":"Training compute-optimal large language models","year":"2022","author":"Hoffmann","key":"ref38"},{"key":"ref39","first-page":"16 344","article-title":"Flashattention: Fast and memory-efficient exact attention with io-awareness","volume":"35","author":"Dao","year":"2022","journal-title":"Advances in Neural Information Processing Systems"},{"author":"Vryniotis","key":"ref40","article-title":"How to train state-of-the-art models using torchvision\u2019s latest primitives"}],"event":{"name":"2025 International Joint Conference on Neural Networks (IJCNN)","start":{"date-parts":[[2025,6,30]]},"location":"Rome, Italy","end":{"date-parts":[[2025,7,5]]}},"container-title":["2025 International Joint Conference on Neural Networks (IJCNN)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/11227166\/11227148\/11229265.pdf?arnumber=11229265","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,11,15]],"date-time":"2025-11-15T07:20:29Z","timestamp":1763191229000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11229265\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,6,30]]},"references-count":40,"URL":"https:\/\/doi.org\/10.1109\/ijcnn64981.2025.11229265","relation":{},"subject":[],"published":{"date-parts":[[2025,6,30]]}}}