{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,5,2]],"date-time":"2026-05-02T16:01:32Z","timestamp":1777737692180,"version":"3.51.4"},"reference-count":28,"publisher":"Institute of Electronics, Information and Communications Engineers (IEICE)","issue":"2","content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEICE Trans. Inf. &amp; Syst."],"published-print":{"date-parts":[[2024,2,1]]},"DOI":"10.1587\/transinf.2023edl8058","type":"journal-article","created":{"date-parts":[[2024,1,31]],"date-time":"2024-01-31T22:14:52Z","timestamp":1706739292000},"page":"239-243","source":"Crossref","is-referenced-by-count":9,"title":["Dynamic Attentive Convolution for Facial Beauty Prediction"],"prefix":"10.1587","volume":"E107.D","author":[{"given":"Zhishu","family":"SUN","sequence":"first","affiliation":[{"name":"College of Computer and Data Science, Fuzhou University"}]},{"given":"Zilong","family":"XIAO","sequence":"additional","affiliation":[{"name":"College of Computer and Data Science, Fuzhou University"}]},{"given":"Yuanlong","family":"YU","sequence":"additional","affiliation":[{"name":"College of Computer and Data Science, Fuzhou University"}]},{"given":"Luojun","family":"LIN","sequence":"additional","affiliation":[{"name":"College of Computer and Data Science, Fuzhou University"}]}],"member":"532","reference":[{"key":"1","doi-asserted-by":"crossref","unstructured":"[1] D. Zhang, F. Chen, and Y. Xu, Computer Models for Facial Beauty Analysis, Springer, 2016.","DOI":"10.1007\/978-3-319-32598-9"},{"key":"2","doi-asserted-by":"crossref","unstructured":"[2] L. Liang, L. Lin, L. Jin, D. Xie, and M. Li, \u201cScut-fbp5500: A diverse benchmark dataset for multi-paradigm facial beauty prediction,\u201d ICPR, pp.1598-1603, IEEE, 2018. 10.1109\/icpr.2018.8546038","DOI":"10.1109\/ICPR.2018.8546038"},{"key":"3","doi-asserted-by":"crossref","unstructured":"[3] L. Lin, L. Liang, and L. Jin, \u201cR 2-resnext: A resnext-based regression model with relative ranking for facial beauty prediction,\u201d ICPR, pp.85-90, IEEE, 2018. 10.1109\/icpr.2018.8545164","DOI":"10.1109\/ICPR.2018.8545164"},{"key":"4","doi-asserted-by":"crossref","unstructured":"[4] L. Lin, L. Liang, L. Jin, and W. Chen, \u201cAttribute-aware convolutional neural networks for facial beauty prediction.,\u201d IJCAI, pp.847-853, 2019. 10.24963\/ijcai.2019\/119","DOI":"10.24963\/ijcai.2019\/119"},{"key":"5","doi-asserted-by":"publisher","unstructured":"[5] L. Liu, J. Xing, S. Liu, H. Xu, X. Zhou, and S. Yan, \u201cWow! you are so beautiful today!,\u201d TOMM, vol.11, no.1s, pp.1-22, 2014. 10.1145\/2659234","DOI":"10.1145\/2659234"},{"key":"6","doi-asserted-by":"crossref","unstructured":"[6] L. Liang and L. Jin, \u201cFacial skin beautification using region-aware mask,\u201d SMC, pp.2922-2926, IEEE, 2013. 10.1109\/smc.2013.498","DOI":"10.1109\/SMC.2013.498"},{"key":"7","doi-asserted-by":"publisher","unstructured":"[7] T. Alashkar, S. Jiang, S. Wang, and Y. Fu, \u201cExamples-rules guided deep neural network for makeup recommendation,\u201d Proceedings of the AAAI conference on artificial intelligence, vol.31, no.1, pp.941-947, 2017. 10.1609\/aaai.v31i1.10626","DOI":"10.1609\/aaai.v31i1.10626"},{"key":"8","doi-asserted-by":"crossref","unstructured":"[8] J. Li, C. Xiong, L. Liu, X. Shu, and S. Yan, \u201cDeep face beautification,\u201d ACM MM, pp.793-794, 2015. 10.1145\/2733373.2807966","DOI":"10.1145\/2733373.2807966"},{"key":"9","doi-asserted-by":"crossref","unstructured":"[9] P. Aarabi, D. Hughes, K. Mohajer, and M. Emami, \u201cThe automatic measurement of facial beauty,\u201d SMC, pp.2644-2647, IEEE, 2001. 10.1109\/icsmc.2001.972963","DOI":"10.1109\/ICSMC.2001.972963"},{"key":"10","doi-asserted-by":"publisher","unstructured":"[10] D. Zhang, Q. Zhao, and F. Chen, \u201cQuantitative analysis of human facial beauty using geometric features,\u201d Pattern Recognition, vol.44, no.4, pp.940-950, 2011. 10.1016\/j.patcog.2010.10.013","DOI":"10.1016\/j.patcog.2010.10.013"},{"key":"11","doi-asserted-by":"crossref","unstructured":"[11] F. Chen and D. Zhang, \u201cEvaluation of the putative ratio rules for facial beauty indexing,\u201d ICMB, pp.181-188, IEEE, 2014. 10.1109\/icmb.2014.38","DOI":"10.1109\/ICMB.2014.38"},{"key":"12","doi-asserted-by":"crossref","unstructured":"[12] Y. Ren and X. Geng, \u201cSense beauty by label distribution learning,\u201d IJCAI, pp.2648-2654, 2017. 10.24963\/ijcai.2017\/369","DOI":"10.24963\/ijcai.2017\/369"},{"key":"13","unstructured":"[13] A. Krizhevsky, I. Sutskever, and G.E. Hinton, \u201cImagenet classification with deep convolutional neural networks,\u201d Advances in neural information processing systems, vol.25, 2012."},{"key":"14","doi-asserted-by":"crossref","unstructured":"[14] J. Long, E. Shelhamer, and T. Darrell, \u201cFully convolutional networks for semantic segmentation,\u201d CVPR, pp.3431-3440, 2015. 10.1109\/cvpr.2015.7298965","DOI":"10.1109\/CVPR.2015.7298965"},{"key":"15","unstructured":"[15] S. Ren, K. He, R. Girshick, and J. Sun, \u201cFaster r-cnn: Towards real-time object detection with region proposal networks,\u201d NIPS, vol.28, pp.91-99, 2015."},{"key":"16","doi-asserted-by":"crossref","unstructured":"[16] A. Toshev and C. Szegedy, \u201cDeeppose: Human pose estimation via deep neural networks,\u201d Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp.1653-1660, 2014.","DOI":"10.1109\/CVPR.2014.214"},{"key":"17","doi-asserted-by":"crossref","unstructured":"[17] J. Hu, L. Shen, and G. Sun, \u201cSqueeze-and-excitation networks,\u201d CVPR, pp.7132-7141, 2018. 10.1109\/cvpr.2018.00745","DOI":"10.1109\/CVPR.2018.00745"},{"key":"18","doi-asserted-by":"crossref","unstructured":"[18] K. He, X. Zhang, S. Ren, and J. Sun, \u201cDeep residual learning for image recognition,\u201d CVPR, pp.770-778, 2016. 10.1109\/cvpr.2016.90","DOI":"10.1109\/CVPR.2016.90"},{"key":"19","doi-asserted-by":"crossref","unstructured":"[19] R.R. Selvaraju, M. Cogswell, A. Das, R. Vedantam, D. Parikh, and D. Batra, \u201cGrad-cam: Visual explanations from deep networks via gradient-based localization,\u201d ICCV, pp.618-626, 2017. 10.1109\/iccv.2017.74","DOI":"10.1109\/ICCV.2017.74"},{"key":"20","doi-asserted-by":"publisher","unstructured":"[20] R. Thornhill and S.W. Gangestad, \u201cFacial attractiveness,\u201d Trends Cogn. Sci., vol.3, no.12, pp.452-460, 1999. 10.1016\/s1364-6613(99)01403-5","DOI":"10.1016\/S1364-6613(99)01403-5"},{"key":"21","doi-asserted-by":"crossref","unstructured":"[21] D.I. Perrett, K.J. Lee, I. Penton-Voak, D. Rowland, S. Yoshikawa, D.M. Burt, S.P. Henzi, D.L. Castles, and S. Akamatsu, \u201cEffects of sexual dimorphism on facial attractiveness,\u201d Nature, vol.394, no.6696, pp.884-887, 1998. 10.1038\/29772","DOI":"10.1038\/29772"},{"key":"22","doi-asserted-by":"crossref","unstructured":"[22] Y. Chen, X. Dai, M. Liu, D. Chen, L. Yuan, and Z. Liu, \u201cDynamic convolution: Attention over convolution kernels,\u201d CVPR, pp.11027-11036, 2020. 10.1109\/cvpr42600.2020.01104","DOI":"10.1109\/CVPR42600.2020.01104"},{"key":"23","doi-asserted-by":"crossref","unstructured":"[23] D. Xie, L. Liang, L. Jin, J. Xu, and M. Li, \u201cScut-fbp: A benchmark dataset for facial beauty perception,\u201d SMC, pp.1821-1826, 2015. 10.1109\/smc.2015.319","DOI":"10.1109\/SMC.2015.319"},{"key":"24","doi-asserted-by":"crossref","unstructured":"[24] X. Ding, Y. Guo, G. Ding, and J. Han, \u201cAcnet: Strengthening the kernel skeletons for powerful cnn via asymmetric convolution blocks,\u201d ICCV, pp.1911-1920, 2019. 10.1109\/iccv.2019.00200","DOI":"10.1109\/ICCV.2019.00200"},{"key":"25","doi-asserted-by":"crossref","unstructured":"[25] J. Xu, L. Jin, L. Liang, Z. Feng, D. Xie, and H. Mao, \u201cFacial attractiveness prediction using psychologically inspired convolutional neural network (pi-cnn),\u201d ICASSP, pp.1657-1661, IEEE, 2017. 10.1109\/icassp.2017.7952438","DOI":"10.1109\/ICASSP.2017.7952438"},{"key":"26","unstructured":"[26] L. Lin, L. Liang, and L. Jin, \u201cRegression guided by relative ranking using convolutional neural network (r3cnn) for facial beauty prediction,\u201d IEEE Trans. Affect. Comput., 2019."},{"key":"27","unstructured":"[27] A. Dosovitskiy, L. Beyer, A. Kolesnikov, D. Weissenborn, X. Zhai, T. Unterthiner, M. Dehghani, M. Minderer, G. Heigold, S. Gelly, et al., \u201cAn image is worth 16x16 words: Transformers for image recognition at scale,\u201d ICLR, pp.1-21, 2020."},{"key":"28","doi-asserted-by":"publisher","unstructured":"[28] K. Cao, K.-n. Choi, H. Jung, and L. Duan, \u201cDeep learning for facial beauty prediction,\u201d Information, vol.11, no.8, p.391, 2020. 10.3390\/info11080391","DOI":"10.3390\/info11080391"}],"container-title":["IEICE Transactions on Information and Systems"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/www.jstage.jst.go.jp\/article\/transinf\/E107.D\/2\/E107.D_2023EDL8058\/_pdf","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,11,9]],"date-time":"2024-11-09T18:59:24Z","timestamp":1731178764000},"score":1,"resource":{"primary":{"URL":"https:\/\/www.jstage.jst.go.jp\/article\/transinf\/E107.D\/2\/E107.D_2023EDL8058\/_article"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,2,1]]},"references-count":28,"journal-issue":{"issue":"2","published-print":{"date-parts":[[2024]]}},"URL":"https:\/\/doi.org\/10.1587\/transinf.2023edl8058","relation":{},"ISSN":["0916-8532","1745-1361"],"issn-type":[{"value":"0916-8532","type":"print"},{"value":"1745-1361","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,2,1]]},"article-number":"2023EDL8058"}}