{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,28]],"date-time":"2026-03-28T19:51:30Z","timestamp":1774727490898,"version":"3.50.1"},"reference-count":45,"publisher":"Elsevier BV","license":[{"start":{"date-parts":[[2026,5,1]],"date-time":"2026-05-01T00:00:00Z","timestamp":1777593600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.elsevier.com\/tdm\/userlicense\/1.0\/"},{"start":{"date-parts":[[2026,5,1]],"date-time":"2026-05-01T00:00:00Z","timestamp":1777593600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.elsevier.com\/legal\/tdmrep-license"},{"start":{"date-parts":[[2026,5,1]],"date-time":"2026-05-01T00:00:00Z","timestamp":1777593600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-017"},{"start":{"date-parts":[[2026,5,1]],"date-time":"2026-05-01T00:00:00Z","timestamp":1777593600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"},{"start":{"date-parts":[[2026,5,1]],"date-time":"2026-05-01T00:00:00Z","timestamp":1777593600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-012"},{"start":{"date-parts":[[2026,5,1]],"date-time":"2026-05-01T00:00:00Z","timestamp":1777593600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2026,5,1]],"date-time":"2026-05-01T00:00:00Z","timestamp":1777593600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-004"}],"funder":[{"DOI":"10.13039\/501100021171","name":"Basic and Applied Basic Research Foundation of Guangdong Province","doi-asserted-by":"publisher","award":["2025A1515010142"],"award-info":[{"award-number":["2025A1515010142"]}],"id":[{"id":"10.13039\/501100021171","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62301441"],"award-info":[{"award-number":["62301441"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100019082","name":"Shanghai Aerospace Science and Technology Innovation Foundation","doi-asserted-by":"publisher","award":["SAST2023-020"],"award-info":[{"award-number":["SAST2023-020"]}],"id":[{"id":"10.13039\/501100019082","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["elsevier.com","sciencedirect.com"],"crossmark-restriction":true},"short-container-title":["Neurocomputing"],"published-print":{"date-parts":[[2026,5]]},"DOI":"10.1016\/j.neucom.2026.133191","type":"journal-article","created":{"date-parts":[[2026,2,27]],"date-time":"2026-02-27T15:55:30Z","timestamp":1772207730000},"page":"133191","update-policy":"https:\/\/doi.org\/10.1016\/elsevier_cm_policy","source":"Crossref","is-referenced-by-count":0,"special_numbering":"C","title":["HCDCMQ: Hessian-aware Channel Determinism-decomposition With Counterfactual Multi-agent Optimization For Channel-wise Mixed-precision Post-training Quantization"],"prefix":"10.1016","volume":"679","author":[{"given":"Wentao","family":"Xu","sequence":"first","affiliation":[]},{"given":"Jiaxiang","family":"Wang","sequence":"additional","affiliation":[]},{"given":"Ruize","family":"Wang","sequence":"additional","affiliation":[]},{"given":"Xiang","family":"Li","sequence":"additional","affiliation":[]},{"given":"Yuxuan","family":"Zhang","sequence":"additional","affiliation":[]},{"given":"Xiang","family":"Wu","sequence":"additional","affiliation":[]},{"given":"Weilin","family":"Li","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0001-8681-3288","authenticated-orcid":false,"given":"Fengdong","family":"Qu","sequence":"additional","affiliation":[]}],"member":"78","reference":[{"key":"10.1016\/j.neucom.2026.133191_bib1","article-title":"Natural language processing advancements by deep learning: A survey","author":"Torfi","year":"2020","journal-title":"arXiv:2003.01200 Preprint"},{"key":"10.1016\/j.neucom.2026.133191_bib2","doi-asserted-by":"crossref","first-page":"126","DOI":"10.1109\/MSP.2017.2765695","article-title":"Model compression and acceleration for deep neural networks: The principles, progress, and challenges","volume":"35","author":"Cheng","year":"2018","journal-title":"IEEE Signal Process. Mag."},{"key":"10.1016\/j.neucom.2026.133191_bib3","series-title":"2018 IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR)","first-page":"2704","article-title":"Quantization and training of neural networks for efficient integer-arithmetic-only inference","author":"Jacob","year":"2018"},{"key":"10.1016\/j.neucom.2026.133191_bib4","article-title":"A white paper on neural network quantization","author":"Nagel","year":"2021","journal-title":"arXiv:2106.08295 Preprint"},{"key":"10.1016\/j.neucom.2026.133191_bib5","series-title":"2021 International Conference on Signal Processing and Machine Learning (CONF-SPML), Stanford, CA, USA","first-page":"271","article-title":"Efficient quantization techniques for deep neural networks","author":"Jiang","year":"2021"},{"key":"10.1016\/j.neucom.2026.133191_bib6","first-page":"7197","article-title":"Up or down? Adaptive rounding for post-training quantization","author":"Nagel","year":"2020","journal-title":"Proc. 37th Int. Conf. Mach. Learn. Virtual PMLR"},{"key":"10.1016\/j.neucom.2026.133191_bib7","article-title":"GPTQ: Accurate Post-Training Quantization for Generative Pre-trained Transformers","author":"Frantar","year":"2022","journal-title":"arXiv:2210.17323 Preprint"},{"key":"10.1016\/j.neucom.2026.133191_bib8","article-title":"TesseraQ: Ultra Low-Bit LLM Post-Training Quantization with Block Reconstruction","author":"Li","year":"2024","journal-title":"arXiv:2402.16450 Preprint"},{"key":"10.1016\/j.neucom.2026.133191_bib9","series-title":"2019 IEEE\/CVF International Conference on Computer Vision (ICCV)","first-page":"293","article-title":"HAWQ: Hessian aware quantization of neural networks with mixed-precision","author":"Dong","year":"2019"},{"key":"10.1016\/j.neucom.2026.133191_bib10","article-title":"Mixed precision quantization of ConvNets via differentiable neural architecture search 00090","author":"Wu","year":"2018","journal-title":"Preprint arXiv 1812"},{"key":"10.1016\/j.neucom.2026.133191_bib11","series-title":"in: 8th International Conference on Learning Representations (ICLR 2020)","article-title":"Mixed precision DNNs: All you need is a good parametrization","author":"Uhlich","year":"2020"},{"key":"10.1016\/j.neucom.2026.133191_bib12","series-title":"2019 IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR)","first-page":"8604","article-title":"HAQ: Hardware-aware automated quantization with mixed precision","author":"Wang","year":"2019"},{"key":"10.1016\/j.neucom.2026.133191_bib13","article-title":"A survey on methods and theories of quantized neural networks","author":"Guo","year":"2018","journal-title":"arXiv:1808.04752 Preprint"},{"key":"10.1016\/j.neucom.2026.133191_bib14","doi-asserted-by":"crossref","first-page":"1","DOI":"10.1145\/3623402","article-title":"A comprehensive survey on model quantization for deep neural networks in image classification","volume":"14","author":"Rokh","year":"2023","journal-title":"ACM Trans. Intell. Syst. Technol."},{"key":"10.1016\/j.neucom.2026.133191_bib15","article-title":"Low-bit model quantization for deep neural networks: A survey","author":"Liu","year":"2025","journal-title":"arXiv:2505.05530 Preprint"},{"key":"10.1016\/j.neucom.2026.133191_bib16","series-title":"ICASSP 2021 - 2021 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)","first-page":"7383","article-title":"Mixed precision quantization of transformer language models for speech recognition","author":"Xu","year":"2021"},{"key":"10.1016\/j.neucom.2026.133191_bib17","series-title":"2023 60th ACM\/IEEE Design Automation Conference (DAC)","first-page":"1","article-title":"CSQ: Growing mixed-precision quantization scheme with bi-level continuous sparsification","author":"Xiao","year":"2023"},{"key":"10.1016\/j.neucom.2026.133191_bib18","unstructured":"M. Rakka, M.E. Fouda, P. Khargonekar, F. Kurdahi, Mixed-precision neural networks: A survey, arXiv:2208.06064 Preprint (2022), doi:10.48550\/arXiv.2208.06064."},{"key":"10.1016\/j.neucom.2026.133191_bib19","series-title":"Proceedings of the 34th International Conference on Neural Information Processing Systems","first-page":"1555","article-title":"HAWQ-V2: Hessian aware trace-weighted quantization of neural networks","author":"Dong","year":"2020"},{"key":"10.1016\/j.neucom.2026.133191_bib20","series-title":"Proceedings of the 38th International Conference on Machine Learning","first-page":"11875","article-title":"HAWQ-V3: Dyadic neural network quantization","author":"Yao","year":"2021"},{"key":"10.1016\/j.neucom.2026.133191_bib21","doi-asserted-by":"crossref","DOI":"10.1016\/j.neucom.2025.131000","article-title":"PLMQ: Piecewise linear mixed-precision quantization for deep neural networks","volume":"651","author":"Feng","year":"2025","journal-title":"Neurocomputing"},{"key":"10.1016\/j.neucom.2026.133191_bib22","series-title":"2020 IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR)","first-page":"13166","article-title":"ZeroQ: A novel zero shot quantization framework","author":"Cai","year":"2020"},{"key":"10.1016\/j.neucom.2026.133191_bib23","unstructured":"S. Li, X. Ning, K. Hong, T. Liu, L. Wang, X. Li, K. Zhong, G. Dai, H. Yang, Y. Wang, LLM-MQ: Mixed-precision quantization for efficient LLM deployment, Third Workshop on Efficient Natural Language and Speech Processing (ENLSP-III): Towards the Future of Large Language Models and their Emerging Descendants, New Orleans, NeurIPS, 2023."},{"key":"10.1016\/j.neucom.2026.133191_bib24","series-title":"Proceedings of the 42nd International Conference on Machine Learning","first-page":"25672","article-title":"SliM-LLM: Salience-driven mixed-precision quantization for large language models","author":"Huang","year":"2025"},{"key":"10.1016\/j.neucom.2026.133191_bib25","article-title":"ResQ: Mixed-precision quantization of large language models with low-rank residuals","author":"Saxena","year":"2024","journal-title":"arXiv:2412.14363 Preprint"},{"key":"10.1016\/j.neucom.2026.133191_bib26","series-title":"2020 IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR)","first-page":"12962","article-title":"FBNetV2: Differentiable neural architecture search for spatial and channel dimensions","author":"Wan","year":"2020"},{"key":"10.1016\/j.neucom.2026.133191_bib27","unstructured":"Q. Lou, F. Guo, M. Kim, L. Liu, L. Jiang, AutoQ: Automated kernel-wise neural network quantization, 8th International Conference on Learning Representations (ICLR 2020), Addis Ababa, Ethiopia, OpenReview."},{"key":"10.1016\/j.neucom.2026.133191_bib28","series-title":"International Conference on Automated Machine Learning (AutoML 2024)","article-title":"FLIQS: One-shot mixed-precision floating-point and integer quantization search","author":"Dotzel","year":"2023"},{"key":"10.1016\/j.neucom.2026.133191_bib29","series-title":"International Conference on Automated Machine Learning (AutoML 2024)","article-title":"FLIQS: One-shot mixed-precision floating-point and integer quantization search","author":"Dotzel","year":"2023"},{"key":"10.1016\/j.neucom.2026.133191_bib30","article-title":"Channel-wise mixed-precision quantization for large language models","author":"Chen","year":"2024","journal-title":"arXiv:2410.13056v1 Preprint"},{"key":"10.1016\/j.neucom.2026.133191_bib31","series-title":"2019 Fifth Workshop on Energy Efficient Machine Learning and Cognitive Computing - NeurIPS Edition (EMC2-NIPS)","first-page":"6","article-title":"Discovering low-precision networks close to full-precision networks for efficient inference","author":"McKinstry","year":"2019"},{"key":"10.1016\/j.neucom.2026.133191_bib32","series-title":"2021 IEEE\/CVF International Conference on Computer Vision (ICCV)","first-page":"5330","article-title":"Towards mixed-precision quantization of neural networks via constrained optimization","author":"Chen","year":"2021"},{"key":"10.1016\/j.neucom.2026.133191_bib33","series-title":"2022 IEEE\/CVF Winter Conference on Applications of Computer Vision (WACV)","first-page":"3665","article-title":"Hessian-aware pruning and optimal neural implant","author":"Yu","year":"2022"},{"key":"10.1016\/j.neucom.2026.133191_bib34","doi-asserted-by":"crossref","first-page":"37","DOI":"10.1109\/MM.2020.3009475","article-title":"ReLeQ: A reinforcement learning approach for automatic deep quantization of neural networks","volume":"40","author":"Elthakeb","year":"2020","journal-title":"IEEE Micro"},{"key":"10.1016\/j.neucom.2026.133191_bib35","series-title":"Proceedings of the Second International Conference on Knowledge Discovery and Data Mining","first-page":"226","article-title":"A density-based algorithm for discovering clusters in large spatial databases with noise","author":"Ester","year":"1996"},{"key":"10.1016\/j.neucom.2026.133191_bib36","doi-asserted-by":"crossref","first-page":"457","DOI":"10.1186\/s12859-022-05006-0","article-title":"GMMchi: Gene expression clustering using Gaussian mixture modeling","volume":"23","author":"Liu","year":"2022","journal-title":"BMC Bioinform"},{"key":"10.1016\/j.neucom.2026.133191_bib37","doi-asserted-by":"crossref","first-page":"129","DOI":"10.1109\/TIT.1982.1056489","article-title":"Least squares quantization in PCM","volume":"28","author":"Lloyd","year":"1982","journal-title":"IEEE Trans. Inform. Theory"},{"key":"10.1016\/j.neucom.2026.133191_bib38","series-title":"AAAI\u201918\/IAAI\u201918\/EAAI\u201918: Proceedings of the Thirty-Second AAAI Conference on Artificial Intelligence and Thirtieth Innovative Applications of Artificial Intelligence Conference and Eighth AAAI Symposium on Educational Advances in Artificial Intelligence","first-page":"2974","article-title":"Counterfactual multi-agent policy gradients","author":"Foerster","year":"2018"},{"key":"10.1016\/j.neucom.2026.133191_bib39","doi-asserted-by":"crossref","first-page":"211","DOI":"10.1007\/s11263-015-0816-y","article-title":"ImageNet large scale visual recognition challenge","volume":"115","author":"Russakovsky","year":"2015","journal-title":"Int. J. Comput. Vis."},{"key":"10.1016\/j.neucom.2026.133191_bib40","series-title":"Proceedings of the Thirty-Seventh AAAI Conference on Artificial Intelligence and Thirty-Fifth Conference on Innovative Applications of Artificial Intelligence and Thirteenth Symposium on Educational Advances in Artificial Intelligence","first-page":"9029","article-title":"OMPQ: Orthogonal mixed precision quantization","author":"Ma","year":"2023"},{"key":"10.1016\/j.neucom.2026.133191_bib41","series-title":"Computer Vision \u2013 ECCV 2018","first-page":"608","article-title":"Value-aware quantization for training and inference of neural networks","author":"Park","year":"2018"},{"key":"10.1016\/j.neucom.2026.133191_bib42","article-title":"A practical mixed precision algorithm for post-training quantization","author":"Pandey","year":"2023","journal-title":"arXiv:2302.05397 Preprint"},{"key":"10.1016\/j.neucom.2026.133191_bib43","article-title":"PACT: Parameterized clipping activation for quantized neural networks","author":"Choi","year":"2018","journal-title":"arXiv:1805.06085 Preprint"},{"key":"10.1016\/j.neucom.2026.133191_bib44","series-title":"International Conference on Learning Representations (ICLR 2021)","article-title":"BRECQ: Pushing the limit of post-training quantization by block reconstruction","author":"Li","year":"2021"},{"key":"10.1016\/j.neucom.2026.133191_bib45","doi-asserted-by":"crossref","first-page":"2071","DOI":"10.1109\/TCAD.2024.3363073","article-title":"GroupQ: Group-wise quantization with multi-objective optimization for CNN accelerators","volume":"43","author":"Jiang","year":"2024","journal-title":"IEEE Trans. Comput. Aided Des. Integr. Circuits Syst."}],"container-title":["Neurocomputing"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/api.elsevier.com\/content\/article\/PII:S0925231226005886?httpAccept=text\/xml","content-type":"text\/xml","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/api.elsevier.com\/content\/article\/PII:S0925231226005886?httpAccept=text\/plain","content-type":"text\/plain","content-version":"vor","intended-application":"text-mining"}],"deposited":{"date-parts":[[2026,3,28]],"date-time":"2026-03-28T19:22:25Z","timestamp":1774725745000},"score":1,"resource":{"primary":{"URL":"https:\/\/linkinghub.elsevier.com\/retrieve\/pii\/S0925231226005886"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2026,5]]},"references-count":45,"alternative-id":["S0925231226005886"],"URL":"https:\/\/doi.org\/10.1016\/j.neucom.2026.133191","relation":{},"ISSN":["0925-2312"],"issn-type":[{"value":"0925-2312","type":"print"}],"subject":[],"published":{"date-parts":[[2026,5]]},"assertion":[{"value":"Elsevier","name":"publisher","label":"This article is maintained by"},{"value":"HCDCMQ: Hessian-aware Channel Determinism-decomposition With Counterfactual Multi-agent Optimization For Channel-wise Mixed-precision Post-training Quantization","name":"articletitle","label":"Article Title"},{"value":"Neurocomputing","name":"journaltitle","label":"Journal Title"},{"value":"https:\/\/doi.org\/10.1016\/j.neucom.2026.133191","name":"articlelink","label":"CrossRef DOI link to publisher maintained version"},{"value":"article","name":"content_type","label":"Content Type"},{"value":"\u00a9 2026 Elsevier B.V. All rights are reserved, including those for text and data mining, AI training, and similar technologies.","name":"copyright","label":"Copyright"}],"article-number":"133191"}}