{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,8]],"date-time":"2026-01-08T00:20:04Z","timestamp":1767831604473,"version":"3.49.0"},"reference-count":58,"publisher":"Springer Science and Business Media LLC","issue":"4","license":[{"start":{"date-parts":[[2022,10,28]],"date-time":"2022-10-28T00:00:00Z","timestamp":1666915200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2022,10,28]],"date-time":"2022-10-28T00:00:00Z","timestamp":1666915200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"funder":[{"DOI":"10.13039\/501100001809","name":"Natural Science Foundation of China","doi-asserted-by":"crossref","award":["No.61976098"],"award-info":[{"award-number":["No.61976098"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"crossref"}]},{"name":"Science and Technology Development Foundation of Quanzhou City","award":["No.2020C067"],"award-info":[{"award-number":["No.2020C067"]}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Neural Process Lett"],"published-print":{"date-parts":[[2023,8]]},"DOI":"10.1007\/s11063-022-11058-3","type":"journal-article","created":{"date-parts":[[2022,10,28]],"date-time":"2022-10-28T11:07:15Z","timestamp":1666955235000},"page":"4661-4678","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":6,"title":["ReLP: Reinforcement Learning Pruning Method Based on Prior Knowledge"],"prefix":"10.1007","volume":"55","author":[{"given":"Weiwei","family":"Zhang","sequence":"first","affiliation":[]},{"given":"Ming","family":"Ji","sequence":"additional","affiliation":[]},{"given":"Haoran","family":"Yu","sequence":"additional","affiliation":[]},{"given":"Chenghui","family":"Zhen","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2022,10,28]]},"reference":[{"issue":"8","key":"11058_CR1","doi-asserted-by":"publisher","first-page":"1655","DOI":"10.1109\/JPROC.2019.2921977","volume":"107","author":"J Chen","year":"2019","unstructured":"Chen J, Ran X (2019) Deep learning with edge computing: a review. Proc IEEE 107(8):1655\u20131674","journal-title":"Proc IEEE"},{"key":"11058_CR2","unstructured":"Dauphin YN, Bengio Y (2013) Big neural networks waste capacity. In: 1st international conference on learning representations, Scottsdale, Arizona, USA"},{"key":"11058_CR3","unstructured":"Frankle J, Carbin M J (2019) The lottery ticket hypothesis: finding sparse, trainable neural networks. In: 7th international conference on learning representations, New Orleans, LA, USA"},{"issue":"4","key":"11058_CR4","doi-asserted-by":"publisher","first-page":"485","DOI":"10.1109\/JPROC.2020.2976475","volume":"108","author":"L Deng","year":"2020","unstructured":"Deng L, Li G, Han S et al (2020) Model compression and hardware acceleration for neural networks: a comprehensive survey. Proc IEEE 108(4):485\u2013532","journal-title":"Proc IEEE"},{"key":"11058_CR5","unstructured":"Iandola FN, Han S, Moskewicz MW, et al (2016) SqueezeNet: AlexNet-level accuracy with 50x fewer parameters and <0.5MB model size. arxiv abs\/1602.07360"},{"key":"11058_CR6","unstructured":"Howard AG, Zhu M, Chen B, et al (2017) MobileNets: efficient convolutional neural networks for mobile vision applications. CoRR abs\/1704.04861"},{"key":"11058_CR7","doi-asserted-by":"crossref","unstructured":"Zhang X, Zhou X, Lin M, et al (2018) ShuffleNet: an extremely efficient convolutional neural network for mobile devices. In: 2018 IEEE\/CVF conference on computer vision and pattern recognition, Salt Lake city, UT, USA, pp 6848\u20136856","DOI":"10.1109\/CVPR.2018.00716"},{"key":"11058_CR8","doi-asserted-by":"crossref","unstructured":"Han K, Wang Y, Tian Q, et al (2020) GhostNet: more features from cheap operations. In: 2020 IEEE\/CVF conference on computer vision and pattern recognition, Seattle, WA, USA, pp 1577\u20131586.","DOI":"10.1109\/CVPR42600.2020.00165"},{"key":"11058_CR9","doi-asserted-by":"crossref","unstructured":"Gordon A, Eban E, Nachum O, et al (2018) MorphNet: fast & simple resource-constrained structure learning of deep networks. In: 2018 IEEE\/CVF conference on computer vision and pattern recognition, Salt Lake city, UT, USA, pp 1586\u20131595","DOI":"10.1109\/CVPR.2018.00171"},{"issue":"2","key":"11058_CR10","doi-asserted-by":"publisher","first-page":"145","DOI":"10.1109\/MSP.2013.2297439","volume":"32","author":"A Cichocki","year":"2015","unstructured":"Cichocki A, Mandic D, Lathauwer LD et al (2015) Tensor decompositions for signal processing applications from two-way to multiway component analysis. IEEE Signal Process Mag 32(2):145\u2013163","journal-title":"IEEE Signal Process Mag"},{"key":"11058_CR11","doi-asserted-by":"crossref","unstructured":"Ye J, Wang L, Li G, et al (2018) learning compact recurrent neural networks with block-term tensor decomposition. In: 2018 IEEE\/CVF conference on computer vision and pattern recognition, Salt Lake City, UT, USA, pp 9378\u20139387","DOI":"10.1109\/CVPR.2018.00977"},{"issue":"7","key":"11058_CR12","doi-asserted-by":"publisher","first-page":"5174","DOI":"10.1109\/TGRS.2019.2897316","volume":"57","author":"J Xue","year":"2019","unstructured":"Xue J, Zhao Y, Liao W et al (2019) Nonlocal low-rank regularized tensor decomposition for hyperspectral image denoising. IEEE Trans Geosci Remote Sens 57(7):5174\u20135189","journal-title":"IEEE Trans Geosci Remote Sens"},{"key":"11058_CR13","unstructured":"Gong Y, Liu L, Ming Y, et al (2014) Compressing deep convolutional networks using vector quantization. CoRR abs\/1412.6115"},{"key":"11058_CR14","unstructured":"Khoram S, Jing L (2018) Adaptive quantization of neural networks. In: 6th international conference on learning representations, Vancouver, BC, Canada"},{"key":"11058_CR15","doi-asserted-by":"crossref","unstructured":"Wang K, Liu Z, Lin Y, et al (2019) HAQ: Hardware-aware automated quantization with mixed precision. In: 2019 IEEE\/CVF conference on computer vision and pattern recognition, Long Beach, CA, USA, pp 8604\u20138612","DOI":"10.1109\/CVPR.2019.00881"},{"key":"11058_CR16","unstructured":"Lou Q, Guo F, Liu L, et al (2020) AutoQ: automated kernel-wise neural network quantization. In: 8th international conference on learning representations, Addis Ababa, Ethiopia"},{"key":"11058_CR17","unstructured":"Hinton G, Vinyals O, Dean J (2015) Distilling the knowledge in a neural network. CoRR abs\/1503.02531."},{"key":"11058_CR18","doi-asserted-by":"crossref","unstructured":"Wang J, Bao W, Sun L, et al (2019) Private Model Compression via Knowledge Distillation. In: The thirty-third AAAI conference on artificial intelligence, Honolulu, Hawaii, USA, pp 1190\u20131197","DOI":"10.1609\/aaai.v33i01.33011190"},{"key":"11058_CR19","doi-asserted-by":"crossref","unstructured":"Walawalkar D, Shen Z, Savvides M (2020) Online ensemble model compression using knowledge distillation. In: European conference on computer vision, Glasgow, UK, pp 18\u201335","DOI":"10.1007\/978-3-030-58529-7_2"},{"key":"11058_CR20","unstructured":"Hu H, Peng R, Tai YW, et al (2016) Network trimming: a data-driven neuron pruning approach towards efficient deep architectures. CoRR abs\/1607.03250."},{"key":"11058_CR21","unstructured":"Zhuang L, Li J, Shen Z, et al (2017) learning efficient convolutional networks through network slimming. In: 2017 IEEE international conference on computer vision, Venice, Italy, pp 2755\u20132763"},{"key":"11058_CR22","unstructured":"Zhuang L, Sun M, Zhou T, et al (2018). Rethinking the value of network pruning. In: 7th international conference on learning representations, New Orleans, LA, USA."},{"key":"11058_CR23","doi-asserted-by":"crossref","unstructured":"Li Y, Gu S, Mayer C, et al (2020) Group sparsity: the hinge between filter pruning and decomposition for network compression. In: 2020 IEEE\/CVF conference on computer vision and pattern recognition, Seattle, WA, USA, pp 8015\u20138024","DOI":"10.1109\/CVPR42600.2020.00804"},{"key":"11058_CR24","doi-asserted-by":"crossref","unstructured":"Guo J, Ouyang W, Xu D (2020) Multi-dimensional pruning: a unified framework for model compression. In: 2020 IEEE\/CVF conference on computer vision and pattern recognition, Seattle, WA, USA, pp 1505\u20131514","DOI":"10.1109\/CVPR42600.2020.00158"},{"key":"11058_CR25","unstructured":"Anwar S, Sung W (2016) Compact deep convolutional neural networks with coarse pruning. CoRR abs\/1610.09639"},{"key":"11058_CR26","unstructured":"Song H, Mao H, Dally W J (2016) Deep compression: compressing deep neural networks with pruning, trained quantization and Huffman coding. In: 4th international conference on learning representations, San Juan, Puerto Rico"},{"key":"11058_CR27","unstructured":"Goodfellow I, Pouget-Abadie J, Mirza M, et al (2014) Generative adversarial nets. In: 27th international conference on neural information processing systems, Montreal, Canada, pp 2672\u20132680"},{"key":"11058_CR28","doi-asserted-by":"crossref","unstructured":"Lin S, Ji R, Yan C, et al (2019) Towards optimal structured CNN pruning via generative adversarial learning. In: 2019 IEEE\/CVF conference on computer vision and pattern recognition, Long Beach, CA, USA, pp 2785\u20132794","DOI":"10.1109\/CVPR.2019.00290"},{"issue":"1\u20133","key":"11058_CR29","doi-asserted-by":"publisher","first-page":"489","DOI":"10.1016\/j.neucom.2005.12.126","volume":"70","author":"GB Huang","year":"2006","unstructured":"Huang GB, Zhu QY, Siew CK (2006) Extreme learning machine: Theory and applications. Neurocomptuing 70(1\u20133):489\u2013501","journal-title":"Neurocomptuing"},{"issue":"13","key":"11058_CR30","doi-asserted-by":"publisher","first-page":"8925","DOI":"10.1016\/j.jfranklin.2020.04.033","volume":"357","author":"J Zhang","year":"2020","unstructured":"Zhang J, Li YJ, Xiao WD et al (2020) Non-iterative and fast deep learning: multilayer extreme learning machines. J Franklin Inst 357(13):8925\u20138955","journal-title":"J Franklin Inst"},{"key":"11058_CR31","doi-asserted-by":"crossref","unstructured":"He Y, Liu P, Wang Z, et al (2019) Filter pruning via geometric median for deep convolutional neural networks acceleration. In: 2019 IEEE\/CVF conference on computer vision and pattern recognition, Long Beach, CA, USA, pp 4335\u20134344","DOI":"10.1109\/CVPR.2019.00447"},{"key":"11058_CR32","doi-asserted-by":"crossref","unstructured":"Lin M, Ji R, Wang Y, et al (2020) HRank: filter pruning using high-rank feature map. In: 2020 IEEE\/CVF conference on computer vision and pattern recognition, Seattle, WA, USA, pp 1526\u20131535","DOI":"10.1109\/CVPR42600.2020.00160"},{"key":"11058_CR33","doi-asserted-by":"crossref","unstructured":"He Y, Lin J, Liu Z, et al (2018) AMC: AutoML for model compression and acceleration on mobile devices. In: european conference on computer vision, Munich, Germany, pp 815-832","DOI":"10.1007\/978-3-030-01234-2_48"},{"key":"11058_CR34","doi-asserted-by":"crossref","unstructured":"Chin TW, Ding R, Zhang C, et al (2020) Towards efficient model compression via learned global ranking. In: 2020 IEEE\/CVF conference on computer vision and pattern recognition, Seattle, WA, USA, pp 1515\u20131525","DOI":"10.1109\/CVPR42600.2020.00159"},{"key":"11058_CR35","unstructured":"Li H, Kadav A, Durdanovic I, et al (2017) Pruning filters for efficient ConvNets. In: 5th international conference on learning representations, Toulon, France."},{"issue":"8","key":"11058_CR36","doi-asserted-by":"publisher","first-page":"3594","DOI":"10.1109\/TCYB.2019.2933477","volume":"50","author":"Y He","year":"2019","unstructured":"He Y, Dong X, Kang G et al (2019) Asymptotic soft filter pruning for deep convolutional neural networks. IEEE Trans Cybern 50(8):3594\u20133604","journal-title":"IEEE Trans Cybern"},{"key":"11058_CR37","doi-asserted-by":"crossref","unstructured":"Luo JH, Wu J, Lin W (2017) ThiNet: a filter level pruning method for deep neural network compression. In: 2017 IEEE international conference on computer vision, Venice, Italy, pp 5068\u20135076","DOI":"10.1109\/ICCV.2017.541"},{"key":"11058_CR38","doi-asserted-by":"crossref","unstructured":"Lin S, Ji R, Li Y, et al (2018) Accelerating convolutional networks via global & dynamic filter pruning. In: twenty-seventh international joint conference on artificial intelligence, Stockholm, Sweden, pp 2425-2432","DOI":"10.24963\/ijcai.2018\/336"},{"key":"11058_CR39","doi-asserted-by":"crossref","unstructured":"Liu Z, Mu H, X Zhang, et al (2019) MetaPruning: meta learning for automatic neural network channel pruning. In: 2019 IEEE\/CVF international conference on computer vision, Seoul, Korea (South), pp 3295\u20133304.","DOI":"10.1109\/ICCV.2019.00339"},{"key":"11058_CR40","doi-asserted-by":"crossref","unstructured":"Liu N, Ma X, Xu Z, et al (2020) AutoCompress: an automatic dnn structured pruning framework for ultra-high compression rates. In: The thirty-fourth AAAI conference on artificial intelligence, New York, NY, USA, pp 4876-4883","DOI":"10.1609\/aaai.v34i04.5924"},{"issue":"3","key":"11058_CR41","doi-asserted-by":"publisher","first-page":"1259","DOI":"10.1109\/JIOT.2020.3034925","volume":"8","author":"F Yu","year":"2021","unstructured":"Yu F, Cui L, Wang P et al (2021) EasiEdge: a novel global deep neural networks pruning method for efficient edge computing. IEEE Internet Things J 8(3):1259\u20131271","journal-title":"IEEE Internet Things J"},{"key":"11058_CR42","doi-asserted-by":"crossref","unstructured":"Guo S, Wang Y, Li Q, et al (2020) DMCP: Differentiable Markov channel pruning for neural networks. In: 2020 IEEE\/CVF conference on computer vision and pattern recognition, Seattle, WA, USA, pp 1536\u20131544","DOI":"10.1109\/CVPR42600.2020.00161"},{"issue":"4","key":"11058_CR43","doi-asserted-by":"publisher","first-page":"727","DOI":"10.1109\/JSTSP.2020.2977090","volume":"14","author":"J Wang","year":"2020","unstructured":"Wang J, Bai H, Wu J et al (2020) bayesian automatic model compression. IEEE J Sel Topics Signal Process 14(4):727\u2013736","journal-title":"IEEE J Sel Topics Signal Process"},{"key":"11058_CR44","doi-asserted-by":"crossref","unstructured":"Wang Z, Taylor ME (2019) Interactive reinforcement learning with dynamic reuse of prior knowledge from human and agent demonstrations. In: twenty-eighth international joint conference on artificial intelligence, Macao, China, pp 3820\u20133827","DOI":"10.24963\/ijcai.2019\/530"},{"key":"11058_CR45","unstructured":"Krizhevsky A, Nair V, Hinton G. The CIFAR-10 Dataset. http:\/\/www.cs.toronto.edu\/kriz\/cifar.html\/"},{"key":"11058_CR46","unstructured":"Jia D, Wei D, Socher R, et al (2009) ImageNet: a large-scale hierarchical image database. In: 2009 ieee conference on computer vision and pattern recognition, Miami, FL, USA, pp 248-255"},{"key":"11058_CR47","unstructured":"Simonyan K, Zisserman A (2014) Very deep convolutional networks for large-scale image recognition. In: 3rd international conference on learning representations, San Diego, CA, USA"},{"key":"11058_CR48","doi-asserted-by":"crossref","unstructured":"He K, Zhang X, Ren S, et al (2016) Deep residual learning for image recognition. In: 2016 IEEE conference on computer vision and pattern recognition, Las Vegas, NV, USA, pp 770\u2013778","DOI":"10.1109\/CVPR.2016.90"},{"key":"11058_CR49","doi-asserted-by":"crossref","unstructured":"Sandler M, Howard A, Zhu M, et al (2018) MobileNetV2: inverted residuals and linear bottlenecks. In: 2018 IEEE\/CVF conference on computer vision and pattern recognition, Salt Lake City, UT, USA, pp 4510\u20134520","DOI":"10.1109\/CVPR.2018.00474"},{"issue":"3","key":"11058_CR50","doi-asserted-by":"publisher","first-page":"639","DOI":"10.1007\/s11554-022-01209-z","volume":"19","author":"XZ Xu","year":"2022","unstructured":"Xu XZ, Chen J, Su HY et al (2022) Towards efficient filter pruning via topology. J Real-Time Image Proc 19(3):639\u2013649","journal-title":"J Real-Time Image Proc"},{"key":"11058_CR51","doi-asserted-by":"crossref","unstructured":"Zhan H, Lin W M, Cao Y (2021) Deep model compression via two-stage deep reinforcement learning. In: European conference on machine learning and principles and practice of knowledge discovery in databases, Bilbao, Spain, pp 238\u2013254","DOI":"10.1007\/978-3-030-86486-6_15"},{"issue":"8","key":"11058_CR52","doi-asserted-by":"publisher","first-page":"1738","DOI":"10.1109\/JPROC.2019.2918951","volume":"107","author":"Z Zhou","year":"2019","unstructured":"Zhou Z, Chen X, Li E et al (2019) Edge intelligence: paving the last mile of artificial intelligence with edge computing. Proc IEEE 107(8):1738\u20131762","journal-title":"Proc IEEE"},{"key":"11058_CR53","unstructured":"Cheng Y , Wang D , Zhou P , et al (2017) A Survey of model compression and acceleration for deep neural networks. CoRR abs\/1710.09282."},{"issue":"3","key":"11058_CR54","doi-asserted-by":"publisher","first-page":"7652","DOI":"10.1109\/LRA.2022.3184805","volume":"7","author":"Q Yang","year":"2022","unstructured":"Yang Q, Stork JA, Stoyanov T (2022) MPR-RL: multi-prior regularized reinforcement learning for knowledge transfer. IEEE Robot Autom Lett 7(3):7652\u20137659","journal-title":"IEEE Robot Autom Lett"},{"issue":"1","key":"11058_CR55","first-page":"72","volume":"8","author":"XX Chen","year":"2022","unstructured":"Chen XX, Huang KH, Liang XX et al (2022) Tactical prior knowledge inspiring multi-agent bilevel reinforcement learning. J Command Control 8(1):72\u201379","journal-title":"J Command Control"},{"key":"11058_CR56","doi-asserted-by":"crossref","unstructured":"Lin M, Cao L, Li S, et al (2021) Filter sketch for network pruning. IEEE Trans Neural Netw Learn Syst 1\u201310","DOI":"10.1109\/TNNLS.2022.3156047"},{"key":"11058_CR57","doi-asserted-by":"publisher","first-page":"344","DOI":"10.1109\/LSP.2021.3054315","volume":"28","author":"G Tian","year":"2021","unstructured":"Tian G, Chen J, Zeng X et al (2021) Pruning by training: a noveldeep neural network compression framework for image processing. IEEE Signal Process Lett 28:344\u2013348","journal-title":"IEEE Signal Process Lett"},{"key":"11058_CR58","doi-asserted-by":"crossref","unstructured":"Huang Z, Wang N (2018) Data-driven sparse structure selection for deep neural networks. In: European conference on computer vision, Munich, Germany, pp 317\u2013334","DOI":"10.1007\/978-3-030-01270-0_19"}],"container-title":["Neural Processing Letters"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11063-022-11058-3.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s11063-022-11058-3\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11063-022-11058-3.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,7,31]],"date-time":"2023-07-31T16:46:56Z","timestamp":1690822016000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s11063-022-11058-3"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022,10,28]]},"references-count":58,"journal-issue":{"issue":"4","published-print":{"date-parts":[[2023,8]]}},"alternative-id":["11058"],"URL":"https:\/\/doi.org\/10.1007\/s11063-022-11058-3","relation":{},"ISSN":["1370-4621","1573-773X"],"issn-type":[{"value":"1370-4621","type":"print"},{"value":"1573-773X","type":"electronic"}],"subject":[],"published":{"date-parts":[[2022,10,28]]},"assertion":[{"value":"16 October 2022","order":1,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"28 October 2022","order":2,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}}]}}