{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,20]],"date-time":"2026-03-20T08:45:32Z","timestamp":1773996332382,"version":"3.50.1"},"reference-count":157,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"12","license":[{"start":{"date-parts":[[2024,12,1]],"date-time":"2024-12-01T00:00:00Z","timestamp":1733011200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2024,12,1]],"date-time":"2024-12-01T00:00:00Z","timestamp":1733011200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2024,12,1]],"date-time":"2024-12-01T00:00:00Z","timestamp":1733011200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"name":"King Abdullah University of Science and Technology CRG","award":["URF\/1\/4704-01-01"],"award-info":[{"award-number":["URF\/1\/4704-01-01"]}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Pattern Anal. Mach. Intell."],"published-print":{"date-parts":[[2024,12]]},"DOI":"10.1109\/tpami.2024.3394390","type":"journal-article","created":{"date-parts":[[2024,4,29]],"date-time":"2024-04-29T17:37:27Z","timestamp":1714412247000},"page":"7793-7812","source":"Crossref","is-referenced-by-count":17,"title":["A Review of State-of-the-art Mixed-Precision Neural Network Frameworks"],"prefix":"10.1109","volume":"46","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-2514-7960","authenticated-orcid":false,"given":"Mariam","family":"Rakka","sequence":"first","affiliation":[{"name":"Center for Embedded &amp; Cyber-physical Systems, University of California - Irvine, Irvine, CA, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-7139-3428","authenticated-orcid":false,"given":"Mohammed E.","family":"Fouda","sequence":"additional","affiliation":[{"name":"Rain Neuromorphics Inc., San Francisco, CA, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-6634-6950","authenticated-orcid":false,"given":"Pramod","family":"Khargonekar","sequence":"additional","affiliation":[{"name":"Center for Embedded &amp; Cyber-physical Systems, University of California - Irvine, Irvine, CA, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-6982-365X","authenticated-orcid":false,"given":"Fadi","family":"Kurdahi","sequence":"additional","affiliation":[{"name":"Center for Embedded &amp; Cyber-physical Systems, University of California - Irvine, Irvine, CA, USA"}]}],"member":"263","reference":[{"key":"ref1","first-page":"1","article-title":"Deep neural networks for object detection","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"26","author":"Szegedy","year":"2013"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1145\/3180155.3180220"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/JIOT.2020.2972000"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1186\/s13073-021-00835-9"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/MCOM.2018.1700298"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.01152"},{"key":"ref7","first-page":"1","article-title":"Pruning filters for efficient convnets","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Li","year":"2016"},{"key":"ref8","first-page":"1","article-title":"Learning both weights and connections for efficient neural network","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"28","author":"Han","year":"2015"},{"key":"ref9","first-page":"598","article-title":"Optimal brain damage","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"2","author":"LeCun","year":"1989"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.643"},{"key":"ref11","article-title":"Exploring the regularity of sparse structure in convolutional neural networks","author":"Mao","year":"2017"},{"key":"ref12","article-title":"SqueezeNet: AlexNet-level accuracy with 50x fewer parameters and < 0.5 mb model size","author":"Iandola","year":"2016"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00474"},{"key":"ref14","first-page":"6105","article-title":"EfficientNet: Rethinking model scaling for convolutional neural networks","volume-title":"Porc. Int. Conf. Mach. Learning.","author":"Tan","year":"2019"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/CVPRW.2018.00215"},{"key":"ref16","article-title":"Efficient methods and hardware for deep learning","author":"Han","year":"2017"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00140"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.01099"},{"key":"ref19","article-title":"Distilling the knowledge in a neural network","author":"Hinton","year":"2015"},{"key":"ref20","first-page":"1","article-title":"Apprentice: Using knowledge distillation techniques to improve low-precision network accuracy","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Mishra","year":"2018"},{"key":"ref21","first-page":"1","article-title":"Model compression via distillation and quantization","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Polino","year":"2018"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00874"},{"key":"ref23","first-page":"9","article-title":"Experimental determination of precision requirements for back-propagation training of artificial neural networks","volume-title":"Proc. Second Int. Conf. Microelectron. Neural Networks.","author":"Morgan","year":"1991"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/CVPRW50498.2020.00356"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-68238-5_7"},{"key":"ref26","first-page":"4114","article-title":"Binarized neural networks","volume-title":"Proc. 30th Int. Conf. Neural Inf. Process. Syst.","author":"Hubara","year":"2016"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00286"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-46493-0_32"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-01237-3_23"},{"key":"ref30","first-page":"1","article-title":"Incremental network quantization: Towards lossless CNNs with low-precision weights","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Zhou","year":"2016"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1109\/ISCA.2018.00069"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00038"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00514"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1117\/12.20700"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.01318"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1109\/ISSCC.2014.6757323"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00242"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1016\/j.patcog.2020.107281"},{"key":"ref39","article-title":"Dorefa-Net: Training low bitwidth convolutional neural networks with low bitwidth gradients","author":"Zhou","year":"2016"},{"key":"ref40","article-title":"Pact: Parameterized clipping activation for quantized neural networks","author":"Choi","year":"2018"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1109\/ICCVW.2019.00363"},{"key":"ref42","first-page":"1","article-title":"Post-training 4-bit quantization of convolution networks for rapid-deployment","volume":"32","author":"Banner","year":"2019","journal-title":"Adv. Neural Inf. Process. Syst."},{"key":"ref43","article-title":"Quantizing deep convolutional networks for efficient inference: A whitepaper","author":"Krishnamoorthi","year":"2018"},{"key":"ref44","article-title":"Towards efficient training for neural network quantization","author":"Jin","year":"2019"},{"key":"ref45","article-title":"Ternary neural networks with fine-grained quantization","author":"Mellempudi","year":"2017"},{"key":"ref46","article-title":"Deep compression: Compressing deep neural networks with pruning, trained quantization and huffman coding","author":"Han","year":"2015"},{"key":"ref47","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-01225-0_36"},{"key":"ref48","doi-asserted-by":"publisher","DOI":"10.1007\/s10489-020-02109-0"},{"key":"ref49","doi-asserted-by":"publisher","DOI":"10.1109\/MM.2020.3009475"},{"key":"ref50","first-page":"1","article-title":"Learning and generalization in overparameterized neural networks, going beyond two layers","volume":"32","author":"Allen-Zhu","year":"2019","journal-title":"Adv. Neural Inf. Process. Syst."},{"key":"ref51","first-page":"1","article-title":"Binaryconnect: Training deep neural networks with binary weights during propagations","volume":"28","author":"Courbariaux","year":"2015","journal-title":"Adv. Neural Inf. Process. Syst."},{"key":"ref52","first-page":"1737","article-title":"Deep learning with limited numerical precision","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Gupta","year":"2015"},{"key":"ref53","first-page":"1","article-title":"Training and inference with integers in deep neural networks","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Wu","year":"2018"},{"key":"ref54","article-title":"Compressing deep convolutional networks using vector quantization","author":"Gong","year":"2014"},{"key":"ref55","first-page":"1","article-title":"Towards the limit of network quantization","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Choi","year":"2016"},{"key":"ref56","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.521"},{"key":"ref57","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.761"},{"key":"ref58","doi-asserted-by":"publisher","DOI":"10.1109\/icassp49357.2023.10094626"},{"key":"ref59","first-page":"1","article-title":"Loss-aware binarization of deep networks","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Hou","year":"2016"},{"key":"ref60","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v32i1.11713"},{"key":"ref61","first-page":"1","article-title":"Adaptive quantization of neural networks","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Khoram","year":"2018"},{"key":"ref62","first-page":"1","article-title":"Trained ternary quantization","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Zhu","year":"2016"},{"key":"ref63","doi-asserted-by":"publisher","DOI":"10.1109\/78.229903"},{"key":"ref64","first-page":"1","article-title":"Variational network quantization","volume-title":"Porc. Int. Conf. Learn. Representations","author":"Achterhold","year":"2018"},{"key":"ref65","doi-asserted-by":"publisher","DOI":"10.1109\/SiPS.2014.6986082"},{"key":"ref66","article-title":"Rounding methods for neural networks with low resolution synaptic weights","author":"Muller","year":"2015"},{"key":"ref67","article-title":"Neural networks with few multiplications","author":"Lin","year":"2015"},{"key":"ref68","first-page":"1","article-title":"Expectation backpropagation: Parameter-free training of multilayer neural networks with continuous or discrete weights","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Soudry","year":"2014"},{"key":"ref69","first-page":"1","article-title":"Learning discrete weights using the local reparameterization trick","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Shayer","year":"2018"},{"key":"ref70","doi-asserted-by":"publisher","DOI":"10.1023\/A:1007665907178"},{"key":"ref71","article-title":"Joint training of low-precision neural network with quantization interval parameters","author":"Jung","year":"2018"},{"key":"ref72","article-title":"Training compact neural networks with binary weights and low precision activations","author":"Zhuang"},{"key":"ref73","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2014-274"},{"key":"ref74","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2015-354"},{"key":"ref75","first-page":"1709","article-title":"QSGD: Communication-efficient SGD via gradient quantization and encoding","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"30","author":"Alistarh","year":"2017"},{"key":"ref76","doi-asserted-by":"publisher","DOI":"10.1109\/MLHPC.2016.004"},{"key":"ref77","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.574"},{"key":"ref78","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00982"},{"key":"ref79","doi-asserted-by":"publisher","DOI":"10.1201\/9781003162810-13"},{"key":"ref80","doi-asserted-by":"publisher","DOI":"10.1109\/IJCNN48605.2020.9207413"},{"key":"ref81","article-title":"A survey on methods and theories of quantized neural networks","author":"Guo","year":"2018"},{"key":"ref82","article-title":"Differentiable quantization of deep neural networks","author":"Uhlich","year":"2019"},{"key":"ref83","first-page":"1","volume-title":"Constrained Optimization and Lagrange Multiplier Methods","author":"Bertsekas","year":"2014"},{"key":"ref84","first-page":"5741","article-title":"Bayesian bits: Unifying quantization and pruning","volume":"33","author":"Baalen","year":"2020","journal-title":"Adv. Neural Inf. Process. Syst."},{"key":"ref85","first-page":"1","article-title":"Learning sparse neural networks through $ L\\_{0}$L_0 regularization","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Louizos","year":"2018"},{"key":"ref86","article-title":"Auto-encoding variational bayes","author":"Kingma","year":"2013"},{"key":"ref87","first-page":"1278","article-title":"Stochastic backpropagation and approximate inference in deep generative models","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Rezende","year":"2014"},{"key":"ref88","article-title":"Estimating or propagating gradients through stochastic neurons for conditional computation","author":"Bengio","year":"2013"},{"key":"ref89","first-page":"1","article-title":"Relaxed quantization for discretized neural networks","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Louizos","year":"2018"},{"key":"ref90","first-page":"1","article-title":"Learned step size quantization","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Esser","year":"2019"},{"key":"ref91","first-page":"7197","article-title":"Up or down? adaptive rounding for post-training quantization","volume-title":"Proc. Int. Conf. Mach. Learning.","author":"Nagel","year":"2020"},{"key":"ref92","first-page":"112","article-title":"Trained quantization thresholds for accurate and efficient fixed-point inference of deep neural networks","volume-title":"Proc. Mach. Learn. Syst.","volume":"2","author":"Jain","year":"2020"},{"key":"ref93","first-page":"1","article-title":"Mixed precision DNNs: All you need is a good parametrization","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Uhlich","year":"2019"},{"key":"ref94","article-title":"Stochastic layer-wise precision in deep neural networks","author":"Lacey","year":"2018"},{"key":"ref95","doi-asserted-by":"publisher","DOI":"10.1109\/ICIP.2019.8803498"},{"key":"ref96","doi-asserted-by":"publisher","DOI":"10.1109\/ICCVW.2019.00363"},{"key":"ref97","first-page":"7543","article-title":"Improving neural network quantization without retraining using outlier channel splitting","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Zhao","year":"2019"},{"key":"ref98","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00141"},{"key":"ref99","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00292"},{"key":"ref100","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00852"},{"key":"ref101","first-page":"11875","article-title":"HAWQ-V3: Dyadic neural network quantization","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Yao","year":"2021"},{"key":"ref102","article-title":"Tvm: End-to-end optimization stack for deep learning","author":"Chen","year":"2018"},{"key":"ref103","first-page":"1","article-title":"PuLP: A linear programming toolkit for python","author":"Mitchell","year":"2011"},{"key":"ref104","article-title":"Nvidia. cutlass library","year":"2022"},{"key":"ref105","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.90"},{"key":"ref106","article-title":"Improving post training neural quantization: Layer-wise calibration and integer programming","author":"Hubara","year":"2020"},{"key":"ref107","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00881"},{"key":"ref108","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v35i9.16950"},{"key":"ref109","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-01246-5_46"},{"key":"ref110","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2018.2886192"},{"key":"ref111","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00225"},{"key":"ref112","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v32i1.11623"},{"key":"ref113","article-title":"Mixed-precision quantized neural network with progressively decreasing bitwidth for image classification and object detection","author":"Chu","year":"2019"},{"key":"ref114","first-page":"18518","article-title":"Hawq-v2: Hessian aware trace-weighted quantization of neural networks","volume":"33","author":"Dong","year":"2020","journal-title":"Adv. Neural Inf. Process. Syst."},{"key":"ref115","doi-asserted-by":"publisher","DOI":"10.1145\/1944345.1944349"},{"key":"ref116","doi-asserted-by":"publisher","DOI":"10.1038\/s42256-019-0134-0"},{"key":"ref117","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00530"},{"key":"ref118","doi-asserted-by":"publisher","DOI":"10.1109\/ICCAD45719.2019.8942147"},{"key":"ref119","first-page":"1","article-title":"DARTS: Differentiable architecture search","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Liu","year":"2018"},{"key":"ref120","doi-asserted-by":"publisher","DOI":"10.1007\/BF00992696"},{"key":"ref121","first-page":"1","article-title":"Categorical reparameterization with gumbel-softmax","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Jang","year":"2017"},{"key":"ref122","first-page":"1","article-title":"Very deep convolutional networks for large-scale image recognition","volume-title":"Proc. 3rd Int. Conf. Learn. Representations","author":"Simonyan","year":"2015"},{"key":"ref123","first-page":"1","article-title":"mixup: Beyond empirical risk minimization","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Zhang","year":"2018"},{"key":"ref124","first-page":"4095","article-title":"Efficient neural architecture search via parameters sharing","volume-title":"Proc. Int. Conf. Mach. Learning.","author":"Pham","year":"2018"},{"key":"ref125","first-page":"1","article-title":"Neural architecture optimization","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Luo","year":"2018"},{"key":"ref126","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00215"},{"key":"ref127","article-title":"Design automation for efficient deep learning computing","author":"Han","year":"2019"},{"key":"ref128","first-page":"784","article-title":"ACM: Automl for model compression and acceleration on mobile devices","volume-title":"Proc. Eur. Conf. Comput. Vis.","author":"He","year":"2018"},{"key":"ref129","first-page":"1","article-title":"Once-for-all: Train one network and specialize it for efficient deployment","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Cai","year":"2019"},{"key":"ref130","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58517-4_32"},{"key":"ref131","first-page":"1","article-title":"Proxylessnas: Direct neural architecture search on target task and hardware","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Cai","year":"2018"},{"key":"ref132","article-title":"Mixed precision quantization of convnets via differentiable neural architecture search","author":"Wu","year":"2018"},{"key":"ref133","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v33i01.33014780"},{"key":"ref134","doi-asserted-by":"publisher","DOI":"10.l007\/978-3-319-46448-0_2"},{"key":"ref135","first-page":"1","article-title":"High-capacity expert binary networks","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Bulat","year":"2020"},{"key":"ref136","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00448"},{"key":"ref137","first-page":"1","article-title":"Additive powers-of-two quantization: An efficient non-uniform discretization for neural networks","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Li","year":"2019"},{"key":"ref138","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00495"},{"key":"ref139","article-title":"Joint neural architecture search and quantization","author":"Chen","year":"2018"},{"key":"ref140","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00907"},{"key":"ref141","doi-asserted-by":"publisher","DOI":"10.1016\/B978-0-08-050684-5.50008-2"},{"key":"ref142","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58545-7_1"},{"key":"ref143","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58574-7_27"},{"key":"ref144","article-title":"Continuous control with deep reinforcement learning","author":"Lillicrap","year":"2015"},{"key":"ref145","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2009.5206848"},{"key":"ref146","doi-asserted-by":"publisher","DOI":"10.1109\/FPL.2018.00059"},{"key":"ref147","article-title":"Mobilenets: Efficient convolutional neural networks for mobile vision applications","author":"Howard","year":"2017"},{"key":"ref148","first-page":"1","article-title":"Simple augmentation goes a long way: ADRL for DNN quantization","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Ning","year":"2020"},{"key":"ref149","article-title":"Proximal policy optimization algorithms","author":"Schulman","year":"2017"},{"key":"ref150","first-page":"1","article-title":"WRPN: Wide reduced-precision networks","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Mishra","year":"2018"},{"key":"ref151","first-page":"1","article-title":"AutoQ: Automated kernel-wise neural network quantization","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Lou","year":"2019"},{"key":"ref152","first-page":"1","article-title":"Data-efficient hierarchical reinforcement learning","volume":"31","author":"Nachum","year":"2018","journal-title":"Adv. Neural Inf. Process. Syst."},{"key":"ref153","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00460"},{"key":"ref154","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00280"},{"key":"ref155","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP40776.2020.9054599"},{"key":"ref156","doi-asserted-by":"publisher","DOI":"10.1109\/72.248452"},{"key":"ref157","doi-asserted-by":"publisher","DOI":"10.1109\/DAC18074.2021.9586295"}],"container-title":["IEEE Transactions on Pattern Analysis and Machine Intelligence"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/34\/10746266\/10509805.pdf?arnumber=10509805","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,11,27]],"date-time":"2024-11-27T00:11:11Z","timestamp":1732666271000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10509805\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,12]]},"references-count":157,"journal-issue":{"issue":"12"},"URL":"https:\/\/doi.org\/10.1109\/tpami.2024.3394390","relation":{},"ISSN":["0162-8828","2160-9292","1939-3539"],"issn-type":[{"value":"0162-8828","type":"print"},{"value":"2160-9292","type":"electronic"},{"value":"1939-3539","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,12]]}}}