{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,2,21]],"date-time":"2025-02-21T01:07:16Z","timestamp":1740100036472,"version":"3.37.3"},"reference-count":18,"publisher":"IEEE","license":[{"start":{"date-parts":[[2021,7,5]],"date-time":"2021-07-05T00:00:00Z","timestamp":1625443200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2021,7,5]],"date-time":"2021-07-05T00:00:00Z","timestamp":1625443200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100013209","name":"Hellenic Foundation for Research and Innovation","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100013209","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2021,7,5]]},"DOI":"10.1109\/icme51207.2021.9428147","type":"proceedings-article","created":{"date-parts":[[2021,6,9]],"date-time":"2021-06-09T21:14:21Z","timestamp":1623273261000},"page":"1-6","source":"Crossref","is-referenced-by-count":6,"title":["Efficient Training of Lightweight Neural Networks Using Online Self-Acquired Knowledge Distillation"],"prefix":"10.1109","author":[{"given":"Maria","family":"Tzelepi","sequence":"first","affiliation":[{"name":"Aristotle University of Thessaloniki,Department of Informatics"}]},{"given":"Anastasios","family":"Tefas","sequence":"additional","affiliation":[{"name":"Aristotle University of Thessaloniki,Department of Informatics"}]}],"member":"263","reference":[{"key":"ref10","first-page":"2654","article-title":"Do deep nets really need to be deep?","volume":"27","author":"ba","year":"2014","journal-title":"Proceedings of the Advances in Neural Information Processing Systems"},{"journal-title":"Pattern Classification","year":"2012","author":"duda","key":"ref11"},{"article-title":"Large scale distributed neural network training through online distillation","year":"2018","author":"anil","key":"ref12"},{"article-title":"Feature fusion for online mutual knowledge distillation","year":"2019","author":"kim","key":"ref13"},{"key":"ref14","article-title":"Learning multiple layers of features from tiny images","author":"krizhevsky","year":"2009","journal-title":"Citeseer Tech Rep"},{"article-title":"Reading digits in natural images with unsupervised feature learning","year":"2011","author":"netzer","key":"ref15"},{"article-title":"Fashion- mnist: a novel image dataset for benchmarking machine learning algorithms","year":"2017","author":"xiao","key":"ref16"},{"key":"ref17","first-page":"807","article-title":"Rectified linear units improve restricted boltzmann machines","author":"nair","year":"2010","journal-title":"Proceedings of the International Conference on Machine Learning"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.90"},{"article-title":"Distilling the knowledge in a neural network","year":"2015","author":"hinton","key":"ref4"},{"article-title":"A survey of model compression and acceleration for deep neural networks","year":"2017","author":"cheng","key":"ref3"},{"key":"ref6","article-title":"Born again neural networks","author":"furlanello","year":"2018","journal-title":"ICML"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.754"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00454"},{"key":"ref7","first-page":"284","article-title":"Self- referenced deep learning","author":"lan","year":"2018","journal-title":"Proceedings of the Asian Conference on Computer Vision"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1016\/j.neucom.2015.09.116"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1017\/ATSIP.2014.4"},{"key":"ref9","first-page":"7517","article-title":"Knowledge distillation by on-the-fly native ensemble","author":"lan","year":"2018","journal-title":"Proceedings of the Advances in Neural Information Processing Systems 31"}],"event":{"name":"2021 IEEE International Conference on Multimedia and Expo (ICME)","start":{"date-parts":[[2021,7,5]]},"location":"Shenzhen, China","end":{"date-parts":[[2021,7,9]]}},"container-title":["2021 IEEE International Conference on Multimedia and Expo (ICME)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9428049\/9428068\/09428147.pdf?arnumber=9428147","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,8,3]],"date-time":"2022-08-03T00:24:16Z","timestamp":1659486256000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9428147\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2021,7,5]]},"references-count":18,"URL":"https:\/\/doi.org\/10.1109\/icme51207.2021.9428147","relation":{},"subject":[],"published":{"date-parts":[[2021,7,5]]}}}