{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,12,15]],"date-time":"2025-12-15T14:11:26Z","timestamp":1765807886367,"version":"3.37.3"},"reference-count":59,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"8","license":[{"start":{"date-parts":[[2021,8,1]],"date-time":"2021-08-01T00:00:00Z","timestamp":1627776000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2021,8,1]],"date-time":"2021-08-01T00:00:00Z","timestamp":1627776000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2021,8,1]],"date-time":"2021-08-01T00:00:00Z","timestamp":1627776000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"name":"Director of Computer Application Research Institute Foundation","award":["SJ2020A08"],"award-info":[{"award-number":["SJ2020A08"]}]},{"DOI":"10.13039\/501100002851","name":"China Academy of Engineering Physics Innovation and Development Fund Cultivation Project","doi-asserted-by":"publisher","award":["PY20210160"],"award-info":[{"award-number":["PY20210160"]}],"id":[{"id":"10.13039\/501100002851","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Intell. Transport. Syst."],"published-print":{"date-parts":[[2021,8]]},"DOI":"10.1109\/tits.2021.3056426","type":"journal-article","created":{"date-parts":[[2021,2,13]],"date-time":"2021-02-13T02:14:02Z","timestamp":1613182442000},"page":"5261-5274","source":"Crossref","is-referenced-by-count":9,"title":["Compiler-Based Efficient CNN Model Construction for 5G Edge Devices"],"prefix":"10.1109","volume":"22","author":[{"given":"Kun","family":"Wan","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0001-8510-4025","authenticated-orcid":false,"given":"Xiaolei","family":"Liu","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-1215-9882","authenticated-orcid":false,"given":"Jianyu","family":"Yu","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0001-9886-1412","authenticated-orcid":false,"given":"Xiaosong","family":"Zhang","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-4235-9671","authenticated-orcid":false,"given":"Xiaojiang","family":"Du","sequence":"additional","affiliation":[]},{"given":"Nadra","family":"Guizani","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref39","first-page":"442","article-title":"Tensorizing neural networks","author":"novikov","year":"2015","journal-title":"Proc Adv Neural Inf Process Syst"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2014.6854828"},{"key":"ref33","first-page":"1269","article-title":"Exploiting linear structure within convolutional networks for efficient evaluation","author":"denton","year":"2014","journal-title":"Proc Adv Neural Inf Process Syst"},{"key":"ref32","article-title":"Training and inference with integers in deep neural networks","author":"wu","year":"2018","journal-title":"arXiv 1802 04680"},{"key":"ref31","article-title":"Alternating multi-bit quantization for recurrent neural networks","author":"xu","year":"2018","journal-title":"arXiv 1802 00150"},{"key":"ref30","first-page":"1509","article-title":"Terngrad: Ternary gradients to reduce communication in distributed deep learning","author":"wen","year":"2017","journal-title":"Proc Adv Neural Inf Process Syst"},{"key":"ref37","article-title":"Design of efficient convolutional layers using single intra-channel convolution, topological subdivisioning and spatial &#x2018;Bottleneck&#x2019; structure","author":"wang","year":"2016","journal-title":"arXiv 1608 04337"},{"key":"ref36","article-title":"Flattened convolutional neural networks for feedforward acceleration","author":"jin","year":"2014","journal-title":"arXiv 1412 5474"},{"key":"ref35","article-title":"Speeding-up convolutional neural networks using fine-tuned CP-decomposition","author":"lebedev","year":"2014","journal-title":"arXiv 1412 6553"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.5244\/C.28.88"},{"key":"ref28","article-title":"Mixed precision training","author":"micikevicius","year":"2017","journal-title":"arXiv 1710 03740"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1016\/j.neunet.2018.01.010"},{"key":"ref29","article-title":"Extremely low bit neural network: Squeeze the last bit out with ADMM","author":"leng","year":"2017","journal-title":"arXiv 1707 09870"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1145\/3065386"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.90"},{"key":"ref20","first-page":"963","article-title":"Expectation backpropagation: Parameter-free training of multilayer neural networks with continuous or discrete weights","author":"soudry","year":"2014","journal-title":"Proc Adv Neural Inf Process Syst"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.521"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-46493-0_32"},{"key":"ref24","article-title":"Incremental network quantization: Towards lossless CNNs with low-precision weights","author":"zhou","year":"2017","journal-title":"arXiv 1702 03044"},{"key":"ref23","article-title":"DoReFa-net: Training low bitwidth convolutional neural networks with low bitwidth gradients","author":"zhou","year":"2016","journal-title":"arXiv 1606 06160 [cs]"},{"key":"ref26","article-title":"Binarized neural networks: Training deep neural networks with weights and activations constrained to +1 or ?1","author":"courbariaux","year":"2016","journal-title":"arXiv 1602 02830 [cs]"},{"key":"ref25","first-page":"3123","article-title":"Binaryconnect: Training deep neural networks with binary weights during propagations","author":"courbariaux","year":"2015","journal-title":"Proc Adv Neural Inf Process Syst"},{"key":"ref50","doi-asserted-by":"publisher","DOI":"10.1109\/CNSM.2016.7818445"},{"key":"ref51","doi-asserted-by":"publisher","DOI":"10.1109\/ICCNC.2018.8390280"},{"key":"ref59","article-title":"ProxylessNAS: Direct neural architecture search on target task and hardware","author":"cai","year":"2018","journal-title":"arXiv 1812 00332"},{"key":"ref58","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.634"},{"key":"ref57","article-title":"Densely connected convolutional networks","author":"huang","year":"2016","journal-title":"arXiv 1608 06993"},{"key":"ref56","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2015.123"},{"key":"ref55","doi-asserted-by":"publisher","DOI":"10.1007\/s11263-015-0816-y"},{"key":"ref54","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2009.5206848"},{"key":"ref53","article-title":"Batch normalization: Accelerating deep network training by reducing internal covariate shift","author":"ioffe","year":"2015","journal-title":"arXiv 1502 03167"},{"key":"ref52","first-page":"630","article-title":"Identity mappings in deep residual networks","author":"he","year":"2016","journal-title":"Proc Eur Conf Comput Vis"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.298"},{"key":"ref11","article-title":"Pruning filters for efficient ConvNets","author":"li","year":"2016","journal-title":"arXiv 1608 08710"},{"key":"ref40","article-title":"Ultimate tensorization: Compressing convolutional and FC layers alike","author":"garipov","year":"2016","journal-title":"arXiv 1611 03214"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.155"},{"key":"ref13","first-page":"806","article-title":"Sparse convolutional neural networks","author":"liu","year":"2015","journal-title":"Proc IEEE Conf Comput Vis Pattern Recognit (CVPR)"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.541"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.78"},{"key":"ref16","article-title":"Pruning convolutional neural networks for resource efficient inference","author":"molchanov","year":"2016","journal-title":"arXiv 1611 06440"},{"key":"ref17","article-title":"MeProp: Sparsified back propagation for accelerated deep learning with reduced overfitting","author":"sun","year":"2017","journal-title":"arXiv 1706 06197"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1145\/3097983.3098035"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/ISCAS.2017.8050797"},{"key":"ref4","article-title":"Deep compression: Compressing deep neural networks with pruning, trained quantization and Huffman coding","author":"han","year":"2015","journal-title":"arXiv 1510 00149 [cs]"},{"key":"ref3","article-title":"Very deep convolutional networks for large-scale image recognition","author":"simonyan","year":"2014","journal-title":"arXiv 1409 1556"},{"key":"ref6","first-page":"598","article-title":"Optimal brain damage","author":"lecun","year":"1990","journal-title":"Proc Adv Neural Inf Process Syst"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/MWC.2018.1700291"},{"key":"ref8","first-page":"2074","article-title":"Learning structured sparsity in deep neural networks","author":"wen","year":"2016","journal-title":"Proc Adv Neural Inf Process Syst"},{"key":"ref7","first-page":"1135","article-title":"Learning both weights and connections for efficient neural network","author":"han","year":"2015","journal-title":"Proc Adv Neural Inf Process Syst"},{"key":"ref49","article-title":"ShuffleNet: An extremely efficient convolutional neural network for mobile devices","author":"zhang","year":"2017","journal-title":"arXiv 1707 01083"},{"key":"ref9","article-title":"Sparsely-connected neural networks: Towards efficient VLSI implementation of deep neural networks","author":"ardakani","year":"2016","journal-title":"arXiv 1611 01427"},{"key":"ref46","article-title":"Rigid-motion scattering for texture classification","author":"sifre","year":"2014","journal-title":"arXiv 1403 1687 [cs]"},{"key":"ref45","article-title":"MobileNets: Efficient convolutional neural networks for mobile vision applications","author":"howard","year":"2017","journal-title":"arXiv 1704 04861"},{"key":"ref48","article-title":"Xception: Deep learning with depthwise separable convolutions","author":"chollet","year":"2016","journal-title":"arXiv 1610 02357"},{"key":"ref47","article-title":"Network in network","author":"lin","year":"2013","journal-title":"arXiv 1312 4400"},{"key":"ref42","article-title":"Tensor-train recurrent neural networks for video classification","author":"yang","year":"2017","journal-title":"arXiv 1707 01786"},{"key":"ref41","article-title":"Compression of deep convolutional neural networks for fast and low power mobile applications","author":"kim","year":"2015","journal-title":"arXiv 1511 06530"},{"key":"ref44","first-page":"1097","article-title":"Imagenet classification with deep convolutional neural networks","author":"krizhevsky","year":"2012","journal-title":"Proc Adv Neural Inf Process Syst"},{"key":"ref43","first-page":"856","article-title":"Compression-aware training of deep networks","author":"alvarez","year":"2017","journal-title":"Proc Adv Neural Inf Process Syst"}],"container-title":["IEEE Transactions on Intelligent Transportation Systems"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/6979\/9509372\/09352525.pdf?arnumber=9352525","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,5,10]],"date-time":"2022-05-10T14:52:07Z","timestamp":1652194327000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9352525\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2021,8]]},"references-count":59,"journal-issue":{"issue":"8"},"URL":"https:\/\/doi.org\/10.1109\/tits.2021.3056426","relation":{},"ISSN":["1524-9050","1558-0016"],"issn-type":[{"type":"print","value":"1524-9050"},{"type":"electronic","value":"1558-0016"}],"subject":[],"published":{"date-parts":[[2021,8]]}}}