{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,9,25]],"date-time":"2025-09-25T14:08:46Z","timestamp":1758809326676},"reference-count":38,"publisher":"Springer Science and Business Media LLC","issue":"11","license":[{"start":{"date-parts":[[2022,10,21]],"date-time":"2022-10-21T00:00:00Z","timestamp":1666310400000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2022,10,21]],"date-time":"2022-10-21T00:00:00Z","timestamp":1666310400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"funder":[{"DOI":"10.13039\/501100013209","name":"Hellenic Foundation for Research and Innovation","doi-asserted-by":"publisher","award":["5631"],"award-info":[{"award-number":["5631"]}],"id":[{"id":"10.13039\/501100013209","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Appl Intell"],"published-print":{"date-parts":[[2023,6]]},"DOI":"10.1007\/s10489-022-04195-8","type":"journal-article","created":{"date-parts":[[2022,10,21]],"date-time":"2022-10-21T03:29:41Z","timestamp":1666322981000},"page":"14102-14127","update-policy":"http:\/\/dx.doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":4,"title":["Model reduction of feed forward neural networks for resource-constrained devices"],"prefix":"10.1007","volume":"53","author":[{"given":"Evangelia","family":"Fragkou","sequence":"first","affiliation":[]},{"given":"Marianna","family":"Koultouki","sequence":"additional","affiliation":[]},{"given":"Dimitrios","family":"Katsaros","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2022,10,21]]},"reference":[{"key":"4195_CR1","unstructured":"Barabasi A-L (2016) Network Science Cambridge University Press"},{"issue":"5439","key":"4195_CR2","doi-asserted-by":"publisher","first-page":"509","DOI":"10.1126\/science.286.5439.509","volume":"286","author":"A-L Barabasi","year":"1999","unstructured":"Barabasi A-L, Albert R (1999) Emergence of scaling in random networks. Science 286 (5439):509\u2013512","journal-title":"Science"},{"issue":"4","key":"4195_CR3","first-page":"26","volume":"46","author":"P Basaras","year":"2013","unstructured":"Basaras P, Katsaros D, Tassiulas L (2013) Detecting influential spreaders in complex, dynamic networks. IEEE Comp Magazine 46(4):26\u201331","journal-title":"IEEE Comp Magazine"},{"key":"4195_CR4","doi-asserted-by":"publisher","first-page":"186","DOI":"10.1038\/nrn2575","volume":"10","author":"E Bullmore","year":"2009","unstructured":"Bullmore E, Sporns O (2009) Complex brain networks: graph theoretical analysis of structural and functional systems. Nature Rev Neuroscience 10:186\u2013198","journal-title":"Nature Rev Neuroscience"},{"key":"4195_CR5","unstructured":"Cai H, Gan C, Zhu L, Han S (2020) TinyTL: reduce memory, not parameters for efficient on-device learning. In: Proceedings of the conference on neural information processing systems (NeurIPS"},{"key":"4195_CR6","doi-asserted-by":"publisher","first-page":"17787","DOI":"10.1007\/s00500-020-05302-y","volume":"24","author":"L Cavallaro","year":"2020","unstructured":"Cavallaro L, Bagdasar O, Meo PD, Fiumara G, Liotta A (2020) Artificial neural networks training acceleration through network science strategies. Soft Comput 24:17787\u201317795","journal-title":"Soft Comput"},{"key":"4195_CR7","doi-asserted-by":"crossref","unstructured":"Chouliaras A, Fragkou E, Katsaros D (2021) Feed forward neural network sparsification with dynamic pruning. In: Proceedings of the panhellenic conference on informatics (PCI)","DOI":"10.1145\/3503823.3503826"},{"key":"4195_CR8","doi-asserted-by":"crossref","unstructured":"Diao H, Li G, Hao Y (2022) PA-NAS: partial operation activation for memory-efficient architecture search. Appl Intell. To appear","DOI":"10.1007\/s10489-021-02961-8"},{"key":"4195_CR9","doi-asserted-by":"publisher","first-page":"16279","DOI":"10.1007\/s00521-020-05161-6","volume":"32","author":"O Erkaymaz","year":"2020","unstructured":"Erkaymaz O (2020) Resilient back-propagation approach in small-world feed-forward neural network topology based on newman-watts algorithm. Neural Comput Applic 32:16279\u201316289","journal-title":"Neural Comput Applic"},{"key":"4195_CR10","unstructured":"Frankle J, Carbin M (2019) The lottery ticket hypothesis: finding sparse, trainable neural networks. In: Proceedings of the international conference on learning representations (ICLR)"},{"key":"4195_CR11","unstructured":"Goodfellow I, Bengio Y, Courville A (2016) Deep learning. The MIT Press"},{"key":"4195_CR12","unstructured":"Han S, Pool J, Tran J, Dally W (2015) Learning both weights and connections for efficient neural network. In: Proceedings of advances in neural information processing systems, pp 1135\u20131143"},{"key":"4195_CR13","unstructured":"Han S, Mao H, Dally WJ (2016) Deep compression: compressing deep neural networks with pruning, trained quantization and Huffman coding. In: Proceedings of the international conference on learning representations (ICLR"},{"key":"4195_CR14","doi-asserted-by":"crossref","unstructured":"Hao J, Cai Z, Li R, Zhu WW (2021) Saliency: a new selection criterion of important architectures in neural architecture search. Neural Comput Appl. To appear","DOI":"10.1007\/s00521-021-06418-4"},{"key":"4195_CR15","first-page":"1","volume":"23","author":"T Hoefler","year":"2021","unstructured":"Hoefler T, Alistarh D, Ben-Nun T, Dryden N, Peste A (2021) Sparsity in deep learning pruning and growth for efficient inference and training in neural networks. J Mach Learn Res 23:1\u2013124","journal-title":"J Mach Learn Res"},{"key":"4195_CR16","doi-asserted-by":"publisher","first-page":"317","DOI":"10.1016\/0031-3203(91)90074-F","volume":"24","author":"Z-Q Hong","year":"1991","unstructured":"Hong Z-Q, Yang J-Y (1991) Optimal discriminant plane for a small number of samples and design method of classifier on the plane. Pattern Recogn 24:317\u2013324","journal-title":"Pattern Recogn"},{"key":"4195_CR17","doi-asserted-by":"crossref","unstructured":"Iiduka H (2022) Appropriate learning rates of adaptive learning rate optimization algorithms for training deep neural networks. IEEE Trans Cybern. To appear","DOI":"10.1109\/TCYB.2021.3107415"},{"key":"4195_CR18","unstructured":"James AP, Dimitrijev S (2012) Feature selection using nearest attributes. Available at: arXiv:1201.5946"},{"issue":"9","key":"4195_CR19","doi-asserted-by":"publisher","first-page":"50","DOI":"10.1145\/3154484","volume":"61","author":"NP Jouppi","year":"2018","unstructured":"Jouppi NP, Young C, Patil N, Patterson D (2018) Domain-specific architecture for deep neural networks. Commun ACM 61(9):50\u201359","journal-title":"Commun ACM"},{"key":"4195_CR20","unstructured":"Liebenwein L, Baykal C, Carter B, Gifford D, Rus D (2021) Lost in pruning: the effects of pruning neural networks beyond test accuracy. In: Proceedings of the machine learning systems conference (MLSys"},{"key":"4195_CR21","doi-asserted-by":"publisher","first-page":"2589","DOI":"10.1007\/s00521-020-05136-7","volume":"33","author":"S Liu","year":"2020","unstructured":"Liu S, Mocanu DC, Matavalam ARR, Pei Y, Pechenizkiy M (2020) Sparse evolutionary deep learning with over one million artificial neurons on commodity hardware. Neural Comput Applic 33:2589\u20132604","journal-title":"Neural Comput Applic"},{"key":"4195_CR22","doi-asserted-by":"crossref","unstructured":"Mocanu DC, Mocanu E, Stone P, Nguyen PH, Gibesce M, Liotta A (2018) Scalable training of artificial neural networks with adaptive sparse connectivity inspired by network science. Nat Commun, pp 9","DOI":"10.1038\/s41467-018-04316-3"},{"key":"4195_CR23","first-page":"3151","volume":"16","author":"A Mokhtari","year":"2015","unstructured":"Mokhtari A, Ribeiro A (2015) Global convergence of online limited memory BFGS. J Mach Learn Res 16:3151\u20133181","journal-title":"J Mach Learn Res"},{"key":"4195_CR24","unstructured":"Narang S, Diamos G, Sengupta S, Elsen E (2017) Exploring sparsity in recurrent neural networks. In: Proceedings of the international conference on learning representations (ICLR)"},{"key":"4195_CR25","unstructured":"Nene SA, Nayar SK, Murase H (1996) Columbia object image library (COIL-20). Technical report CUCS-006-96 Columbia University"},{"key":"4195_CR26","doi-asserted-by":"crossref","unstructured":"Papakostas D, Kasidakis T, Fragkou E, Katsaros D (2021) Backbones for internet of battlefield things. In: Proceedings of the IEEE\/IFIP annual conference on wireless on-demand network systems and services (WONS)","DOI":"10.23919\/WONS51326.2021.9415560"},{"key":"4195_CR27","doi-asserted-by":"crossref","unstructured":"Qiu S, Xu X, Cai B (2019) FReLU: flexible rectified linear units for improving convolutional neural networks. Available at arXiv:1706.08098","DOI":"10.1109\/ICPR.2018.8546022"},{"key":"4195_CR28","doi-asserted-by":"crossref","unstructured":"Ray PP (2022) A review on tinyML: state-of-the-art and prospects. J King Saud University\u2013 Comput Inf Sci, To appear","DOI":"10.1016\/j.jksuci.2021.11.019"},{"key":"4195_CR29","unstructured":"Reddi SJ, Kale S, Kumar S (2018) On the convergence of Adam and beyond. In: Proceedings of the international conference on learning representations (ICLR)"},{"issue":"76","key":"4195_CR30","first-page":"1","volume":"54","author":"P Ren","year":"2021","unstructured":"Ren P, Xiao Y, Chang X, Huang P-Y, Li Z, Chen X, Wang W (2021) A comprehensive survey of neural architecture search challenges and solutions. ACM Comput Surv 54(76):1\u201334","journal-title":"ACM Comput Surv"},{"key":"4195_CR31","unstructured":"Renda A, Frankle J, Carbin M (2020) Comparing rewinding and fine-tuning in neural network pruning. In: Proceedings of the international conference on learning representations (ICLR)"},{"issue":"1","key":"4195_CR32","first-page":"1929","volume":"15","author":"N Srivastava","year":"2014","unstructured":"Srivastava N, Hinton G, Krizhevsky A, Sutskever I, Salakhutdinov R (2014) Dropout: a simple way to prevent neural networks from overfitting. J Mach Learn Res 15(1):1929\u20131958","journal-title":"J Mach Learn Res"},{"key":"4195_CR33","first-page":"3299","volume":"70","author":"X Sun","year":"2017","unstructured":"Sun X, Ren X, Ma S, Wang H (2017) meProp: sparsified back propagation for accelerted deep learning with reduced overfitting. Proc Mach Learn Res 70:3299\u20133308","journal-title":"Proc Mach Learn Res"},{"key":"4195_CR34","doi-asserted-by":"crossref","unstructured":"Sun X, Ren X, Ma S, Wei B, Li W, Xu J, Wang H, Zhang Y (2019) Training simplification and model simplification for deep learning: a minimal effort back propagation method. IEEE Trans Kowl Data Eng, A minimal effort back propagation method. IEEE Transactions on Kowledge and Data Engineering, Training simplification and model simplification for deep learning. To appear","DOI":"10.1109\/TKDE.2018.2883613"},{"key":"4195_CR35","doi-asserted-by":"crossref","unstructured":"Wang X, Zheng Z, He Y, Yan F, qiang Zeng Z, Yang Y (2021) Soft person reidentification network pruning via blockwise adjacent filter decaying. IEEE Trans Cybern","DOI":"10.1109\/TCYB.2021.3130047"},{"key":"4195_CR36","unstructured":"Xiao H, Rasul K, Vollgraf R (2017) Fashion-mnist: a novel image dataset for benchmarking machine learning algorithms. arXiv:1708.07747"},{"key":"4195_CR37","doi-asserted-by":"publisher","first-page":"7409","DOI":"10.1007\/s00521-021-05828-8","volume":"33","author":"S Xu","year":"2021","unstructured":"Xu S, Chen H, Gong X, Liu K, Lu J, Zhang B (2021) Efficient structured pruning based on deep feature stabilization. Neural Comput Applic 33:7409\u20137420","journal-title":"Neural Comput Applic"},{"key":"4195_CR38","doi-asserted-by":"publisher","first-page":"195","DOI":"10.1016\/j.jpdc.2017.02.006","volume":"106","author":"A Zlateski","year":"2017","unstructured":"Zlateski A, Lee K, Seung HS (2017) Scalable training of 3d convolutional networks on multi- and many-cores. J Parallel Distrib Comput 106:195\u2013204","journal-title":"J Parallel Distrib Comput"}],"container-title":["Applied Intelligence"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s10489-022-04195-8.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s10489-022-04195-8\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s10489-022-04195-8.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,5,31]],"date-time":"2023-05-31T10:32:48Z","timestamp":1685529168000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s10489-022-04195-8"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022,10,21]]},"references-count":38,"journal-issue":{"issue":"11","published-print":{"date-parts":[[2023,6]]}},"alternative-id":["4195"],"URL":"https:\/\/doi.org\/10.1007\/s10489-022-04195-8","relation":{},"ISSN":["0924-669X","1573-7497"],"issn-type":[{"value":"0924-669X","type":"print"},{"value":"1573-7497","type":"electronic"}],"subject":[],"published":{"date-parts":[[2022,10,21]]},"assertion":[{"value":"21 September 2022","order":1,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"21 October 2022","order":2,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"The authors declare that they have no known conflicting\/competing financial or non-financial interests or personal relationships that could have appeared to influence the work reported in this paper.","order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}}]}}