{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2023,12,20]],"date-time":"2023-12-20T00:47:47Z","timestamp":1703033267571},"reference-count":30,"publisher":"Springer Science and Business Media LLC","issue":"2","license":[{"start":{"date-parts":[[2022,11,10]],"date-time":"2022-11-10T00:00:00Z","timestamp":1668038400000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2022,11,10]],"date-time":"2022-11-10T00:00:00Z","timestamp":1668038400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Multimedia Systems"],"published-print":{"date-parts":[[2023,4]]},"DOI":"10.1007\/s00530-022-01021-6","type":"journal-article","created":{"date-parts":[[2022,11,10]],"date-time":"2022-11-10T15:04:31Z","timestamp":1668092671000},"page":"787-796","update-policy":"http:\/\/dx.doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":1,"title":["Layer-fusion for online mutual knowledge distillation"],"prefix":"10.1007","volume":"29","author":[{"given":"Gan","family":"Hu","sequence":"first","affiliation":[]},{"given":"Yanli","family":"Ji","sequence":"additional","affiliation":[]},{"given":"Xingzhu","family":"Liang","sequence":"additional","affiliation":[]},{"given":"Yuexing","family":"Han","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2022,11,10]]},"reference":[{"issue":"7","key":"1021_CR1","first-page":"38","volume":"14","author":"G Hinton","year":"2015","unstructured":"Hinton, G., Vinyals, O., Dean, J.: Distilling the knowledge in a neural network. Comput. Sci. 14(7), 38\u201339 (2015)","journal-title":"Comput. Sci."},{"key":"1021_CR2","unstructured":"Romero, A., Ballas, N., Kahou, S.E., Chassang, A., Gatta, C., Bengio, Y.: Fitnets: hints for thin deep nets. Comput. Sci. (2015)"},{"key":"1021_CR3","unstructured":"Phuong, M., Lampert, C.: Towards understanding knowledge distillation. In: International Conference on Machine Learning, pp. 5142\u20135151 (2019). PMLR"},{"key":"1021_CR4","unstructured":"Komodakis, N., Zagoruyko, S.: Paying more attention to attention: improving the performance of convolutional neural networks via attention transfer. In: ICLR (2017)"},{"key":"1021_CR5","doi-asserted-by":"crossref","unstructured":"Zhang, Y., Xiang, T., Hospedales, T.M., Lu, H.: Deep mutual learning. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 4320\u20134328 (2018)","DOI":"10.1109\/CVPR.2018.00454"},{"key":"1021_CR6","unstructured":"Song, G., Chai, W.: Collaborative learning for deep neural networks. Adv. Neural Inf. Process. Syst. 31 (2018)"},{"key":"1021_CR7","unstructured":"Anil, R., Pereyra, G., Passos, A., Ormandi, R., Dahl, G.E., Hinton, G.E.: Large scale distributed neural network training through online distillation. In: International Conference on Learning Representations (2018)"},{"key":"1021_CR8","unstructured":"Zhu, X., Gong, S., et al.: Knowledge distillation by on-the-fly native ensemble. Adv. Neural Inf. Process. Syst. 31 (2018)"},{"key":"1021_CR9","doi-asserted-by":"crossref","unstructured":"Chen, D., Mei, J.-P., Wang, C., Feng, Y., Chen, C.: Online knowledge distillation with diverse peers. In: Proceedings of the AAAI Conference on Artificial Intelligence, vol. 34, pp. 3430\u20133437 (2020)","DOI":"10.1609\/aaai.v34i04.5746"},{"key":"1021_CR10","doi-asserted-by":"crossref","unstructured":"Hou, S., Liu, X., Wang, Z.: Dualnet: learn complementary features for image recognition. In: Proceedings of the IEEE International Conference on Computer Vision, pp. 502\u2013510 (2017)","DOI":"10.1109\/ICCV.2017.62"},{"key":"1021_CR11","doi-asserted-by":"crossref","unstructured":"Kim, J., Hyun, M., Chung, I., Kwak, N.: Feature fusion for online mutual knowledge distillation. In: 2020 25th International Conference on Pattern Recognition (ICPR), pp. 4619\u20134625 (2021). IEEE","DOI":"10.1109\/ICPR48806.2021.9412615"},{"key":"1021_CR12","doi-asserted-by":"crossref","unstructured":"Park, W., Kim, D., Lu, Y., Cho, M.: Relational knowledge distillation. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 3967\u20133976 (2019)","DOI":"10.1109\/CVPR.2019.00409"},{"key":"1021_CR13","doi-asserted-by":"crossref","unstructured":"Zhang, L., Song, J., Gao, A., Chen, J., Bao, C., Ma, K.: Be your own teacher: improve the performance of convolutional neural networks via self distillation. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 3713\u20133722 (2019)","DOI":"10.1109\/ICCV.2019.00381"},{"key":"1021_CR14","doi-asserted-by":"crossref","unstructured":"You, S., Xu, C., Xu, C., Tao, D.: Learning from multiple teacher networks. In: Proceedings of the 23rd ACM SIGKDD International Conference on Knowledge Discovery and Data Mining, pp. 1285\u20131294 (2017)","DOI":"10.1145\/3097983.3098135"},{"key":"1021_CR15","doi-asserted-by":"crossref","unstructured":"You, S., Xu, C., Xu, C., Tao, D.: Learning with single-teacher multi-student. In: Thirty-Second AAAI Conference on Artificial Intelligence (2018)","DOI":"10.1609\/aaai.v32i1.11636"},{"key":"1021_CR16","unstructured":"Tian, Y., Krishnan, D., Isola, P.: Contrastive representation distillation. In: International Conference on Learning Representations (2019)"},{"key":"1021_CR17","unstructured":"Wang, X., Zhang, R., Sun, Y., Qi, J.: Kdgan: Knowledge distillation with generative adversarial networks. Adv. Neural Inf. Process. Syst. 31 (2018)"},{"key":"1021_CR18","first-page":"22243","volume":"33","author":"T Chen","year":"2020","unstructured":"Chen, T., Kornblith, S., Swersky, K., Norouzi, M., Hinton, G.E.: Big self-supervised models are strong semi-supervised learners. Adv. Neural. Inf. Process. Syst. 33, 22243\u201322255 (2020)","journal-title":"Adv. Neural. Inf. Process. Syst."},{"key":"1021_CR19","doi-asserted-by":"crossref","unstructured":"Guo, Q., Wang, X., Wu, Y., Yu, Z., Liang, D., Hu, X., Luo, P.: Online knowledge distillation via collaborative learning. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 11020\u201311029 (2020)","DOI":"10.1109\/CVPR42600.2020.01103"},{"key":"1021_CR20","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1016\/j.ins.2021.10.043","volume":"583","author":"C Tan","year":"2022","unstructured":"Tan, C., Liu, J.: Online knowledge distillation with elastic peer. Inf. Sci. 583, 1\u201313 (2022)","journal-title":"Inf. Sci."},{"key":"1021_CR21","doi-asserted-by":"crossref","unstructured":"Lin, T.-Y., RoyChowdhury, A., Maji, S.: Bilinear cnn models for fine-grained visual recognition. In: Proceedings of the IEEE International Conference on Computer Vision, pp. 1449\u20131457 (2015)","DOI":"10.1109\/ICCV.2015.170"},{"key":"1021_CR22","unstructured":"Krizhevsky, A.: Learning multiple layers of features from tiny images. Master\u2019s thesis, University of Tront (2009)"},{"issue":"3","key":"1021_CR23","doi-asserted-by":"publisher","first-page":"211","DOI":"10.1007\/s11263-015-0816-y","volume":"115","author":"O Russakovsky","year":"2015","unstructured":"Russakovsky, O., Deng, J., Su, H., Krause, J., Satheesh, S., Ma, S., Huang, Z., Karpathy, A., Khosla, A., Bernstein, M., et al.: Imagenet large scale visual recognition challenge. Int. J. Comput. Vis. 115(3), 211\u2013252 (2015)","journal-title":"Int. J. Comput. Vis."},{"key":"1021_CR24","doi-asserted-by":"crossref","unstructured":"He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 770\u2013778 (2016)","DOI":"10.1109\/CVPR.2016.90"},{"key":"1021_CR25","unstructured":"Simonyan, K., Zisserman, A.: Very deep convolutional networks for large-scale image recognition. Comput. Sci. (2014)"},{"key":"1021_CR26","doi-asserted-by":"crossref","unstructured":"Huang, G., Liu, Z., Laurens, V., Weinberger, K.Q.: Densely connected convolutional networks. IEEE Comput. Soc. (2016)","DOI":"10.1109\/CVPR.2017.243"},{"key":"1021_CR27","doi-asserted-by":"crossref","unstructured":"Zagoruyko, S., Komodakis, N.: Wide residual networks. In: British Machine Vision Conference 2016 (2016). British Machine Vision Association","DOI":"10.5244\/C.30.87"},{"key":"1021_CR28","doi-asserted-by":"crossref","unstructured":"Phuong, M., Lampert, C.H.: Distillation-based training for multi-exit architectures. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 1355\u20131364 (2019)","DOI":"10.1109\/ICCV.2019.00144"},{"key":"1021_CR29","doi-asserted-by":"crossref","unstructured":"Sun, D., Yao, A., Zhou, A., Zhao, H.: Deeply-supervised knowledge synergy. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 6997\u20137006 (2019)","DOI":"10.1109\/CVPR.2019.00716"},{"key":"1021_CR30","doi-asserted-by":"crossref","unstructured":"Yao, A., Sun, D.: Knowledge transfer via dense cross-layer mutual-distillation. In: European Conference on Computer Vision, pp. 294\u2013311 (2020). Springer","DOI":"10.1007\/978-3-030-58555-6_18"}],"container-title":["Multimedia Systems"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s00530-022-01021-6.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s00530-022-01021-6\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s00530-022-01021-6.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,2,27]],"date-time":"2023-02-27T19:12:06Z","timestamp":1677525126000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s00530-022-01021-6"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022,11,10]]},"references-count":30,"journal-issue":{"issue":"2","published-print":{"date-parts":[[2023,4]]}},"alternative-id":["1021"],"URL":"https:\/\/doi.org\/10.1007\/s00530-022-01021-6","relation":{},"ISSN":["0942-4962","1432-1882"],"issn-type":[{"value":"0942-4962","type":"print"},{"value":"1432-1882","type":"electronic"}],"subject":[],"published":{"date-parts":[[2022,11,10]]},"assertion":[{"value":"5 June 2022","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"24 October 2022","order":2,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"10 November 2022","order":3,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors declare that there is no conflict of interest in this paper.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}}]}}