{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,28]],"date-time":"2026-04-28T15:34:15Z","timestamp":1777390455070,"version":"3.51.4"},"reference-count":33,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","license":[{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by\/4.0\/legalcode"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62375081"],"award-info":[{"award-number":["62375081"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Access"],"published-print":{"date-parts":[[2025]]},"DOI":"10.1109\/access.2025.3558956","type":"journal-article","created":{"date-parts":[[2025,4,8]],"date-time":"2025-04-08T18:02:17Z","timestamp":1744135337000},"page":"64727-64736","source":"Crossref","is-referenced-by-count":1,"title":["Convolution Smooth: A Post-Training Quantization Method for Convolutional Neural Networks"],"prefix":"10.1109","volume":"13","author":[{"ORCID":"https:\/\/orcid.org\/0009-0008-9813-5603","authenticated-orcid":false,"given":"Yongyuan","family":"Chen","sequence":"first","affiliation":[{"name":"Department of Electronic Science and Technology, School of Physics and Electronics, Hunan University, Changsha, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0002-8504-9649","authenticated-orcid":false,"given":"Zhendao","family":"Wang","sequence":"additional","affiliation":[{"name":"Department of Electronic Science and Technology, School of Physics and Electronics, Hunan University, Changsha, China"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/cvpr.2017.574"},{"key":"ref2","first-page":"1","article-title":"Relaxed quantization for discretized neural networks","volume-title":"Proc. 7th Int. Conf. Learn. Represent. (ICLR)","author":"Louizos"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/tpami.2021.3088904"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/AICAS59952.2024.10595895"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1016\/j.neunet.2024.106910"},{"key":"ref6","first-page":"1","article-title":"Fast convolutional nets with FBFFT: A GPU performance evaluation","volume-title":"Proc. 3rd Int. Conf. Learn. Represent. (ICLR)","author":"Vasilache"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00881"},{"key":"ref8","first-page":"3123","article-title":"BinaryConnect: Training deep neural networks with binary weights during propagation","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"28","author":"Courbariaux"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-46493-0_32"},{"key":"ref10","article-title":"DoReFa-net: Training low bitwidth convolutional neural networks with low bitwidth gradients","author":"Zhou","year":"2016","journal-title":"arXiv:1606.06160"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00826"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1007\/s11263-019-01227-8"},{"key":"ref13","first-page":"1","article-title":"Trained ternary quantization","volume-title":"Proc. Int. Conf. Learn. Represent.","author":"Zhu"},{"key":"ref14","article-title":"Binarized neural networks: Training deep neural networks with weights and activations constrained to  or -1","author":"Courbariaux","year":"2016","journal-title":"arXiv:1602.02830"},{"key":"ref15","first-page":"1","article-title":"WRPN: Wide reduced-precision networks","volume-title":"Proc. Int. Conf. Learn. Represent.","author":"Mishra"},{"key":"ref16","article-title":"Incremental network quantization: Towards lossless CNNs with low-precision weights","author":"Zhou","year":"2017","journal-title":"arXiv:1702.03044"},{"key":"ref17","article-title":"PACT: Parameterized clipping activation for quantized neural networks","author":"Choi","year":"2018","journal-title":"arXiv:1805.06085"},{"key":"ref18","first-page":"1","article-title":"Learned step size quantization","volume-title":"Proc. Int. Conf. Learn. Represent.","author":"Esser"},{"key":"ref19","volume-title":"Nvidia TensorRT","year":"2025"},{"key":"ref20","article-title":"Quantizing deep convolutional networks for efficient inference: A whitepaper","author":"Krishnamoorthi","year":"2018","journal-title":"arXiv:1806.08342"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/ICCVW.2019.00363"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00141"},{"key":"ref23","first-page":"7197","article-title":"Up or down? Adaptive rounding for post-training quantization","volume-title":"Proc. Mach. Learn. Res.","author":"Nagel"},{"key":"ref24","article-title":"EasyQuant: Post-training quantization via scale optimization","author":"Wu","year":"2020","journal-title":"arXiv:2006.16669"},{"key":"ref25","article-title":"Fighting quantization bias with bias","author":"Finkelstein","year":"2019","journal-title":"arXiv:1906.03193"},{"key":"ref26","first-page":"4486","article-title":"Same, same but different: Recovering neural network quantization error through weight factorization","volume-title":"Proc. Mach. Learn. Res.","author":"Meller"},{"key":"ref27","first-page":"1","article-title":"Batch normalization: Accelerating deep network training by reducing internal covariate shift","volume-title":"Proc. Int. Conf. Mach. Learn. (ICML)","author":"Ioffe"},{"key":"ref28","first-page":"38087","article-title":"SmoothQuant: Accurate and efficient post-training quantization for large language models","volume-title":"Proc. Mach. Learn. Res.","author":"Xiao"},{"key":"ref29","first-page":"18518","article-title":"HAWQ-v2: Hessian aware trace-weighted quantization of neural networks","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"33","author":"Dong"},{"key":"ref30","article-title":"BRECQ: Pushing the limit of post-training quantization by block reconstruction","author":"Li","year":"2021","journal-title":"arXiv:2102.05426"},{"key":"ref31","article-title":"EfQAT: An efficient framework for quantization-aware training","author":"Ashkboos","year":"2024","journal-title":"arXiv:2411.11038"},{"key":"ref32","first-page":"1","article-title":"QDrop: Randomly dropping quantization for extremely low-bit post-training quantization","volume-title":"Proc. Int. Conf. Learn. Represent.","author":"Wei"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2024\/474"}],"container-title":["IEEE Access"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/6287639\/10820123\/10955493.pdf?arnumber=10955493","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,4,21]],"date-time":"2025-04-21T17:41:41Z","timestamp":1745257301000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10955493\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025]]},"references-count":33,"URL":"https:\/\/doi.org\/10.1109\/access.2025.3558956","relation":{},"ISSN":["2169-3536"],"issn-type":[{"value":"2169-3536","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025]]}}}