{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2024,3,26]],"date-time":"2024-03-26T01:23:32Z","timestamp":1711416212025},"reference-count":30,"publisher":"Springer Science and Business Media LLC","issue":"4","license":[{"start":{"date-parts":[[2024,2,8]],"date-time":"2024-02-08T00:00:00Z","timestamp":1707350400000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,2,8]],"date-time":"2024-02-08T00:00:00Z","timestamp":1707350400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["SIViP"],"published-print":{"date-parts":[[2024,6]]},"DOI":"10.1007\/s11760-024-02999-4","type":"journal-article","created":{"date-parts":[[2024,2,8]],"date-time":"2024-02-08T18:02:16Z","timestamp":1707415336000},"page":"3361-3372","update-policy":"http:\/\/dx.doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["Multi-task aided face recognition network with convolution kernel spatial collaboration"],"prefix":"10.1007","volume":"18","author":[{"given":"Chunman","family":"Yan","sequence":"first","affiliation":[]},{"given":"Zhen","family":"Zheng","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,2,8]]},"reference":[{"key":"2999_CR1","doi-asserted-by":"crossref","unstructured":"He, K., Zhang, X., Ren, S. et al.: Deep residual learning for image recognition. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 770\u2013778. Las Vegas (2016)","DOI":"10.1109\/CVPR.2016.90"},{"key":"2999_CR2","unstructured":"Xiangyu, Z., Xinyu, Z., Mengxia, O.L., et al.: ShuffleNet: an extremely efficient convolutional neural network for mobile devices. In: Proceedings of the lEEE Conference on Computer Vision and Pattern Recognition, pp. 6848\u20136856. NewYork: IEEE (2018)"},{"issue":"3","key":"2999_CR3","doi-asserted-by":"publisher","first-page":"146","DOI":"10.12677\/JISP.2020.93018","volume":"9","author":"Y Guo","year":"2020","unstructured":"Guo, Y., Abudiriyimu, A., Yadik, N., et al.: Multi-national face classification and recognition based on MobileNet network. J. Image Signal Process. 9(3), 146\u2013155 (2020)","journal-title":"J. Image Signal Process."},{"key":"2999_CR4","doi-asserted-by":"crossref","unstructured":"Han, K., Wang, Y., Tian, Q. et al.: Ghostnet: More features from cheap operations. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 1580\u20131589, Seattle (2020)","DOI":"10.1109\/CVPR42600.2020.00165"},{"key":"2999_CR5","doi-asserted-by":"crossref","unstructured":"Chen, S., Liu, Y., Gao, X. et al.: Mobilefacenets: efficient CNNS for accurate real-time face verification on mobile devices. In: Chinese Conference on Biometric Recognition, pp. 428\u2013438, Cham (2018)","DOI":"10.1007\/978-3-319-97909-0_46"},{"key":"2999_CR6","doi-asserted-by":"crossref","unstructured":"Martindez-Diaz, Y., Luevano, L.S., Mendez-Vazquez, H. et al.: Shufflefacenet: a lightweight face architecture for efficient and highly-accurate face recognition. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision Workshops, Seoul (2019)","DOI":"10.1109\/ICCVW.2019.00333"},{"key":"2999_CR7","doi-asserted-by":"crossref","unstructured":"Cao, Q., Shen, L., Xie, W. et al.: Vggface2: a dataset for recognising faces across pose and age. In: 2018 13th IEEE International Conference on Automatic Face & Gesture Recognition(FG 2018), pp. 67\u201374, Xi'an (2018)","DOI":"10.1109\/FG.2018.00020"},{"key":"2999_CR8","doi-asserted-by":"crossref","unstructured":"Yan, M., Zhao, M., Xu, Z. et al.: Vargfacenet: an efficient variable group convolutional neural network for lightweight face recognition. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision Workshops, Seoul (2019)","DOI":"10.1109\/ICCVW.2019.00323"},{"key":"2999_CR9","doi-asserted-by":"publisher","first-page":"1640","DOI":"10.1109\/TIFS.2019.2946938","volume":"15","author":"Q Wang","year":"2020","unstructured":"Wang, Q., Guo, G.: LS-CNN: characterizing local patches at multiple scales for face recognition. IEEE Trans. Inf. Forensics Secur.15, 1640\u20131653 (2020)","journal-title":"IEEE Trans. Inf. Forensics Secur."},{"issue":"6","key":"2999_CR10","doi-asserted-by":"publisher","first-page":"6","DOI":"10.1016\/j.aej.2021.09.043","volume":"61","author":"M Tamilselvi","year":"2022","unstructured":"Tamilselvi, M., Karthikeyan, S.: An ingenious face recognition system based on HRPSM_CNN under unrestrained environmental condition. Alex. Eng. J. 61(6), 6 (2022)","journal-title":"Alex. Eng. J."},{"issue":"11","key":"2999_CR11","first-page":"5","volume":"13","author":"L He","year":"2023","unstructured":"He, L., He, L.L., Peng, L.J.: CFormerFaceNet: efficient lightweight network merging a CNN and transformer for face recognition. Appl. Sci. Basel 13(11), 5 (2023)","journal-title":"Appl. Sci. Basel"},{"issue":"9","key":"2999_CR12","first-page":"9","volume":"8","author":"GP Nam","year":"2018","unstructured":"Nam, G.P., Choi, H., et al.: PSI-CNN: a pyramid-based scale-invariant CNN architecture for face recognition robust to various image resolutions. Appl. Sci. Basel 8(9), 9 (2018)","journal-title":"Appl. Sci. Basel"},{"issue":"06","key":"2999_CR13","doi-asserted-by":"publisher","first-page":"2250107","DOI":"10.1142\/S0218126622501079","volume":"31","author":"YX Long","year":"2022","unstructured":"Long, Y.X.: A face recognition algorithm based on intermediate layers connected by the CNN. J. Circuits Syst. Comput. 31(06), 2250107 (2022)","journal-title":"J. Circuits Syst. Comput."},{"issue":"14","key":"2999_CR14","doi-asserted-by":"publisher","first-page":"6","DOI":"10.1007\/s11042-021-10537-4","volume":"81","author":"ZH Xie","year":"2022","unstructured":"Xie, Z.H., Niu, J.Y., et al.: Regularization and attention feature distillation base on light CNN for hyperspectral face recognition. Multimed. Tools Appl. 81(14), 6 (2022)","journal-title":"Multimed. Tools Appl."},{"key":"2999_CR15","doi-asserted-by":"crossref","unstructured":"Wang, H., Wang, Y.T., Zhou, Z., Ji, X. et al.: CosFace: large margin cosine loss for deep face recognition. In: IEEE Conference on Computer Vision and Pattern Recognition, pp. 5265\u20135274. Salt Lake City, USA (2018)","DOI":"10.1109\/CVPR.2018.00552"},{"key":"2999_CR16","doi-asserted-by":"crossref","unstructured":"Deng, J., Guo, J., Zafeiriou, S.: ArcF ace: additive angular margin Loss for deep face recognition. In: Proceedings of 2019 IEEE\/CVF Conference onComputer Vision and Pattern Recognition, pp. 4685\u20134694. Long Beach, USAIEEE (2019)","DOI":"10.1109\/CVPR.2019.00482"},{"key":"2999_CR17","doi-asserted-by":"crossref","unstructured":"Zhao R.Z. et al.: Convolution of convolution: let kernels spatially collaborate. In: IEEE Conference on Computer Vision and Pattern Recognition, pp. 651\u2013660. Vancouver Canada (2023)","DOI":"10.1109\/CVPR52688.2022.00073"},{"key":"2999_CR18","doi-asserted-by":"publisher","DOI":"10.1142\/S0219691323500248","author":"A Ghali","year":"2023","unstructured":"Ghali, A., Chouraqui, S., et al.: Few-shot learning CNN optimized using combined 2D-DWT injection and evolutionary optimization algorithms for human face recognition. Int. J. Wavelets Multiresolut. Inf. Process. (2023). https:\/\/doi.org\/10.1142\/S0219691323500248","journal-title":"Int. J. Wavelets Multiresolut. Inf. Process."},{"issue":"26","key":"2999_CR19","doi-asserted-by":"publisher","first-page":"9","DOI":"10.1007\/s00521-023-08732-5","volume":"35","author":"A Khalifa","year":"2023","unstructured":"Khalifa, A., Al-Hamadi, A.: JAMsFace: joint adaptive margins loss for deep face recognition. Neural Comput. Appl. 35(26), 9 (2023)","journal-title":"Neural Comput. Appl."},{"key":"2999_CR20","first-page":"9","volume":"18","author":"PF Ardekani","year":"2023","unstructured":"Ardekani, P.F., Tale, S.Z., Parseh, M.J.: Face mask recognition using a custom CNN and data augmentation. Signal Image Video Process. 18, 9 (2023)","journal-title":"Signal Image Video Process."},{"key":"2999_CR21","doi-asserted-by":"crossref","unstructured":"Pasquale, F., Antonio, G., Alessia, S., Mario, V.: Multi-task learning on the edge for effective gender, age, ethnicity and emotion recognition. Eng. Appl. Artif. Intell. 118 (2022)","DOI":"10.1016\/j.engappai.2022.105651"},{"key":"2999_CR22","doi-asserted-by":"publisher","first-page":"108401","DOI":"10.1016\/j.patcog.2021.108401","volume":"123","author":"WM Yu","year":"2022","unstructured":"Yu, W.M., Xu, H.: Co-attentive multi-task convolutional neural network for facial expression recognition. Pattern Recognit. 123, 108401 (2022)","journal-title":"Pattern Recognit."},{"issue":"2","key":"2999_CR23","doi-asserted-by":"publisher","first-page":"818","DOI":"10.1109\/TAFFC.2020.2969189","volume":"13","author":"LB Mao","year":"2022","unstructured":"Mao, L.B., Yan, Y., Xue, J.H., Wang, H.Z.: Deep multi-task multi-label CNN for effective facial attribute classification. IEEE Trans. Affect. Comput.13(2), 818\u2013828 (2022)","journal-title":"IEEE Trans. Affect. Comput."},{"issue":"1","key":"2999_CR24","doi-asserted-by":"publisher","first-page":"121","DOI":"10.1109\/TPAMI.2017.2781233","volume":"41","author":"R Ranjan","year":"2018","unstructured":"Ranjan, R., Patel, V.M., Chellappa, R.: HyperFace: a deep multi-task learning framework for face detection, landmark localization, pose estimation, and gender recognition. IEEE Trans. Pattern Anal. Mach. Intell. 41(1), 121\u2013135 (2018)","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"issue":"10","key":"2999_CR25","doi-asserted-by":"publisher","first-page":"2178","DOI":"10.1587\/transinf.2020EDP7059","volume":"E103D","author":"YF Liu","year":"2020","unstructured":"Liu, Y.F., Chen, J.H., Qiu, Y.: Joint multi-patch and multi-task CNNs for robust face recognition. IEICE Trans. Inf. Syst. E103D(10), 2178\u20132187 (2020)","journal-title":"IEICE Trans. Inf. Syst."},{"issue":"12","key":"2999_CR26","doi-asserted-by":"publisher","first-page":"1615","DOI":"10.1109\/TPAMI.2003.1251154","volume":"25","author":"T Sim","year":"2003","unstructured":"Sim, T., Baker, S., Bsat, M.: The CMU pose, illumination, and expression database. IEEE Trans. Pattern Anal. Mach. Intell. 25(12), 1615\u20131618 (2003)","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"issue":"1","key":"2999_CR27","doi-asserted-by":"publisher","first-page":"149","DOI":"10.1109\/TSMCA.2007.909557","volume":"38","author":"W Gao","year":"2008","unstructured":"Gao, W., Cao, B., Shan, S.G., et al.: The CAS-PEAL large-scale Chinese face database and baseline evaluations. IEEE Trans. Syst. Man Cybern.38(1), 149\u2013161 (2008)","journal-title":"IEEE Trans. Syst. Man Cybern."},{"key":"2999_CR28","doi-asserted-by":"crossref","unstructured":"Howard, A., Sandler, M., Chu, G., Chen, L.C. et al.: Searching for MobileNetV3. In: International Conference on Computer Vision, Seoul, Korea, (2019)","DOI":"10.1109\/ICCV.2019.00140"},{"key":"2999_CR29","doi-asserted-by":"crossref","unstructured":"Ding, X.H., Zhang, X.Y., Ma, N.N. et al.: RepVGG: making VGG-style convnets great again. In: IEEE Conference on Computer Vision and Pattern Recognition. Online, (2021)","DOI":"10.1109\/CVPR46437.2021.01352"},{"key":"2999_CR30","unstructured":"Tang, Y.H., Han, K., Guo, J.Y., Xu, C., Xu, G., Wang, Y.H.: GhostNetV2: enhance cheap operation with long-range attention. arXiv:2211.12905 (2022)"}],"container-title":["Signal, Image and Video Processing"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11760-024-02999-4.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s11760-024-02999-4\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11760-024-02999-4.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,3,25]],"date-time":"2024-03-25T16:07:58Z","timestamp":1711382878000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s11760-024-02999-4"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,2,8]]},"references-count":30,"journal-issue":{"issue":"4","published-print":{"date-parts":[[2024,6]]}},"alternative-id":["2999"],"URL":"https:\/\/doi.org\/10.1007\/s11760-024-02999-4","relation":{},"ISSN":["1863-1703","1863-1711"],"issn-type":[{"value":"1863-1703","type":"print"},{"value":"1863-1711","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,2,8]]},"assertion":[{"value":"2 December 2023","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"19 December 2023","order":2,"name":"revised","label":"Revised","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"2 January 2024","order":3,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"8 February 2024","order":4,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors declare no competing interests.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}},{"value":"This article is not a human\/animal study.","order":3,"name":"Ethics","group":{"name":"EthicsHeading","label":"Ethical approval"}}]}}