{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,10,9]],"date-time":"2025-10-09T16:54:54Z","timestamp":1760028894818,"version":"3.41.0"},"reference-count":40,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"4","license":[{"start":{"date-parts":[[2025,4,1]],"date-time":"2025-04-01T00:00:00Z","timestamp":1743465600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2025,4,1]],"date-time":"2025-04-01T00:00:00Z","timestamp":1743465600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,4,1]],"date-time":"2025-04-01T00:00:00Z","timestamp":1743465600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Comput."],"published-print":{"date-parts":[[2025,4]]},"DOI":"10.1109\/tc.2024.3517745","type":"journal-article","created":{"date-parts":[[2024,12,16]],"date-time":"2024-12-16T19:16:19Z","timestamp":1734376579000},"page":"1182-1195","source":"Crossref","is-referenced-by-count":1,"title":["A Context-Awareness and Hardware-Friendly Sparse Matrix Multiplication Kernel for CNN Inference Acceleration"],"prefix":"10.1109","volume":"74","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-0086-6301","authenticated-orcid":false,"given":"Haotian","family":"Wang","sequence":"first","affiliation":[{"name":"College of Information Science and Engineering, Hunan University, Changsha, Hunan, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-6956-9260","authenticated-orcid":false,"given":"Yan","family":"Ding","sequence":"additional","affiliation":[{"name":"College of Computer Science and Electronic Engineering, Hunan University, Changsha, Chinad"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-0927-9039","authenticated-orcid":false,"given":"Yumeng","family":"Liu","sequence":"additional","affiliation":[{"name":"Institute of Software, Chinese Academy of Sciences, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-9348-4662","authenticated-orcid":false,"given":"Weichen","family":"Liu","sequence":"additional","affiliation":[{"name":"College of Computing and Data Science, Nanyang Technological University, Singapore"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-2372-6715","authenticated-orcid":false,"given":"Chubo","family":"Liu","sequence":"additional","affiliation":[{"name":"College of Information Science and Engineering, Hunan University, Changsha, Hunan, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-2681-7898","authenticated-orcid":false,"given":"Wangdong","family":"Yang","sequence":"additional","affiliation":[{"name":"College of Information Science and Engineering, Hunan University, Changsha, Hunan, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-2635-7716","authenticated-orcid":false,"given":"Kenli","family":"Li","sequence":"additional","affiliation":[{"name":"College of Information Science and Engineering, Hunan University, Changsha, Hunan, China"}]}],"member":"263","reference":[{"doi-asserted-by":"publisher","key":"ref1","DOI":"10.1109\/JIOT.2020.3038644"},{"doi-asserted-by":"publisher","key":"ref2","DOI":"10.1109\/TII.2021.3138752"},{"key":"ref3","first-page":"2019","article-title":"Convolution neural network for text mining and natural language processing","volume-title":"Proc. IOP Conf. Ser.: Mater. Sci. Eng.","volume":"662","author":"Widiastuti"},{"doi-asserted-by":"publisher","key":"ref4","DOI":"10.1109\/CVPR46437.2021.01139"},{"doi-asserted-by":"publisher","key":"ref5","DOI":"10.1109\/CANDAR53791.2021.00009"},{"issue":"1","key":"ref6","first-page":"34","article-title":"Systolic tensor array: An efficient structured-sparse GEMM accelerator for mobile cnn inference","volume-title":"IEEE Comput. Archit. Lett.","volume":"19","author":"Liu","year":"2020"},{"key":"ref7","first-page":"1","article-title":"DSSA: Dual-side sparse systolic array architecture for accelerating convolutional neural network training","volume-title":"Proc. 51st Int. Conf. Parallel Process.","author":"Chen","year":"2022"},{"key":"ref8","first-page":"743","article-title":"Uniform sparsity in deep neural networks","volume-title":"Proc. Mach. Learn. Syst.","volume":"5","author":"Muralidharan","year":"2023"},{"key":"ref9","first-page":"9398","article-title":"Balanced column-wise block pruning for maximizing GPU parallelism","volume-title":"Proc. AAAI Conf. Artif. Intell.","author":"Park","year":"2023"},{"doi-asserted-by":"publisher","key":"ref10","DOI":"10.1145\/3559009.3569691"},{"issue":"15","key":"ref11","first-page":"2373","article-title":"Conna: Configurable matrix multiplication engine for neural network acceleration","volume-title":"Electronics","volume":"11","author":"Park","year":"2022"},{"doi-asserted-by":"publisher","key":"ref12","DOI":"10.1145\/3489517.3530588"},{"doi-asserted-by":"publisher","key":"ref13","DOI":"10.1109\/IPDPSW.2018.00091"},{"key":"ref14","first-page":"725","article-title":"Duplo: Lifting redundant memory accesses of deep neural networks for GPU tensor cores","volume-title":"Proc. 53rd Annu. IEEE\/ACM Int. Symp. Microarchit. (MICRO)","author":"Kim","year":"2020"},{"year":"2021","author":"Mishra","article-title":"Accelerating sparse deep neural networks","key":"ref15"},{"key":"ref16","first-page":"13316","article-title":"Channel permutations for N:M sparsity","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Pool","year":"2021"},{"doi-asserted-by":"publisher","key":"ref17","DOI":"10.1109\/ICASSP39728.2021.9413944"},{"year":"2014","author":"Chetlur","article-title":"cuDNN: Efficient primitives for deep learning","key":"ref18"},{"key":"ref19","first-page":"6726","article-title":"SPDY: Accurate pruning with speedup guarantees","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Frantar","year":"2022"},{"doi-asserted-by":"publisher","key":"ref20","DOI":"10.1145\/2818311"},{"doi-asserted-by":"publisher","key":"ref21","DOI":"10.1145\/3378176"},{"doi-asserted-by":"publisher","key":"ref22","DOI":"10.1109\/HPCC-DSS-SmartCity-DependSys57074.2022.00035"},{"doi-asserted-by":"publisher","key":"ref23","DOI":"10.1007\/s11263-015-0816-y"},{"key":"ref24","first-page":"12894","article-title":"Structural pruning via latency-saliency knapsack","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"35","author":"Shen","year":"2022"},{"year":"2020","author":"Laurent","article-title":"Revisiting loss modelling for unstructured pruning","key":"ref25"},{"doi-asserted-by":"publisher","key":"ref26","DOI":"10.1145\/3502181.3531463"},{"doi-asserted-by":"publisher","key":"ref27","DOI":"10.1109\/TPDS.2023.3288520"},{"key":"ref28","first-page":"30307","article-title":"Accelerating sparse convolution with column vector-wise sparsity","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"35","author":"Tan","year":"2022"},{"year":"2021","author":"Wang","article-title":"TC-GNN: Accelerating sparse graph neural network computation via dense tensor core on GPUs","key":"ref29"},{"key":"ref30","first-page":"513","article-title":"Efficient GPU kernels for N: M-sparse weights in deep learning","volume-title":"Proc. Mach. Learn. Syst.","volume":"5","author":"Lin","year":"2023"},{"doi-asserted-by":"publisher","key":"ref31","DOI":"10.1609\/aaai.v33i01.33015676"},{"doi-asserted-by":"publisher","key":"ref32","DOI":"10.1109\/HPEC55821.2022.9926300"},{"year":"2024","author":"D\u2019Alberto","article-title":"Weight block sparsity: Training, compilation, and AI engine accelerators","key":"ref33"},{"doi-asserted-by":"publisher","key":"ref34","DOI":"10.1145\/3581784.3607087"},{"key":"ref35","first-page":"20721","article-title":"DominoSearch: Find layer-wise fine-grained N: M sparse schemes from dense neural networks","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"34","author":"Sun","year":"2021"},{"doi-asserted-by":"publisher","key":"ref36","DOI":"10.1007\/978-3-031-25082-8_9"},{"key":"ref37","first-page":"10323","article-title":"SparseGPT: Massive language models can be accurately pruned in one-shot","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Frantar","year":"2023"},{"doi-asserted-by":"publisher","key":"ref38","DOI":"10.1109\/HPCA53966.2022.00049"},{"doi-asserted-by":"publisher","key":"ref39","DOI":"10.1109\/HPCA56546.2023.10071058"},{"key":"ref40","article-title":"Dynamic sparsity is channel-level sparsity learner","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"36","author":"Yin","year":"2024"}],"container-title":["IEEE Transactions on Computers"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/12\/10924434\/10803013.pdf?arnumber=10803013","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,6,11]],"date-time":"2025-06-11T05:26:01Z","timestamp":1749619561000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10803013\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,4]]},"references-count":40,"journal-issue":{"issue":"4"},"URL":"https:\/\/doi.org\/10.1109\/tc.2024.3517745","relation":{},"ISSN":["0018-9340","1557-9956","2326-3814"],"issn-type":[{"type":"print","value":"0018-9340"},{"type":"electronic","value":"1557-9956"},{"type":"electronic","value":"2326-3814"}],"subject":[],"published":{"date-parts":[[2025,4]]}}}