{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,21]],"date-time":"2026-01-21T06:22:42Z","timestamp":1768976562655,"version":"3.49.0"},"publisher-location":"Cham","reference-count":38,"publisher":"Springer Nature Switzerland","isbn-type":[{"value":"9783031442032","type":"print"},{"value":"9783031442049","type":"electronic"}],"license":[{"start":{"date-parts":[[2023,1,1]],"date-time":"2023-01-01T00:00:00Z","timestamp":1672531200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2023,1,1]],"date-time":"2023-01-01T00:00:00Z","timestamp":1672531200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2023]]},"DOI":"10.1007\/978-3-031-44204-9_19","type":"book-chapter","created":{"date-parts":[[2023,9,21]],"date-time":"2023-09-21T04:02:11Z","timestamp":1695268931000},"page":"223-235","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":5,"title":["FBPFormer: Dynamic Convolutional Transformer for\u00a0Global-Local-Contexual Facial Beauty Prediction"],"prefix":"10.1007","author":[{"given":"Qipeng","family":"Liu","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-1141-2487","authenticated-orcid":false,"given":"Luojun","family":"Lin","sequence":"additional","affiliation":[]},{"given":"Zhifeng","family":"Shen","sequence":"additional","affiliation":[]},{"given":"Yuanlong","family":"Yu","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2023,9,22]]},"reference":[{"key":"19_CR1","doi-asserted-by":"crossref","unstructured":"Alashkar, T., Jiang, S., Wang, S., Fu, Y.: Examples-rules guided deep neural network for makeup recommendation. In: AAAI, pp. 941\u2013947 (2017)","DOI":"10.1609\/aaai.v31i1.10626"},{"issue":"8","key":"19_CR2","doi-asserted-by":"publisher","first-page":"391","DOI":"10.3390\/info11080391","volume":"11","author":"K Cao","year":"2020","unstructured":"Cao, K., Choi, K.N., Jung, H., Duan, L.: Deep learning for facial beauty prediction. Information 11(8), 391 (2020)","journal-title":"Information"},{"key":"19_CR3","doi-asserted-by":"crossref","unstructured":"Chen, Y., Mao, H., Jin, L.: A novel method for evaluating facial attractiveness. In: 2010 International Conference on Audio, Language and Image Processing, pp. 1382\u20131386. IEEE (2010)","DOI":"10.1109\/ICALIP.2010.5685007"},{"issue":"3","key":"19_CR4","doi-asserted-by":"publisher","first-page":"433","DOI":"10.1037\/h0031591","volume":"5","author":"JF Cross","year":"1971","unstructured":"Cross, J.F., Cross, J.: Age, sex, race, and the perception of facial beauty. Dev. Psychol. 5(3), 433 (1971)","journal-title":"Dev. Psychol."},{"key":"19_CR5","doi-asserted-by":"crossref","unstructured":"Deng, J., Dong, W., Socher, R., Li, L.J., Li, K., Fei-Fei, L.: ImageNet: a large-scale hierarchical image database. In: 2009 IEEE Conference on Computer Vision and Pattern Recognition, pp. 248\u2013255. IEEE (2009)","DOI":"10.1109\/CVPR.2009.5206848"},{"key":"19_CR6","doi-asserted-by":"crossref","unstructured":"Ding, M., Xiao, B., Codella, N., Luo, P., Wang, J., Yuan, L.: Davit: dual attention vision transformers. arXiv preprint arXiv:2204.03645 (2022)","DOI":"10.1007\/978-3-031-20053-3_5"},{"key":"19_CR7","unstructured":"Dosovitskiy, A., et al.: An image is worth 16x16 words: transformers for image recognition at scale. arXiv preprint arXiv:2010.11929 (2020)"},{"key":"19_CR8","doi-asserted-by":"crossref","unstructured":"d\u2019Ascoli, S., Touvron, H., Leavitt, M.L., Morcos, A.S., Biroli, G., Sagun, L.: Convit: improving vision transformers with soft convolutional inductive biases. In: International Conference on Machine Learning, pp. 2286\u20132296. PMLR (2021)","DOI":"10.1088\/1742-5468\/ac9830"},{"issue":"1","key":"19_CR9","doi-asserted-by":"publisher","first-page":"119","DOI":"10.1162\/089976606774841602","volume":"18","author":"Y Eisenthal","year":"2006","unstructured":"Eisenthal, Y., Dror, G., Ruppin, E.: Facial attractiveness: beauty and the machine. Neural Comput. 18(1), 119\u2013142 (2006)","journal-title":"Neural Comput."},{"issue":"6","key":"19_CR10","doi-asserted-by":"publisher","first-page":"2326","DOI":"10.1016\/j.patcog.2011.11.024","volume":"45","author":"J Fan","year":"2012","unstructured":"Fan, J., Chau, K., Wan, X., Zhai, L., Lau, E.: Prediction of facial attractiveness from facial proportions. Pattern Recogn. 45(6), 2326\u20132334 (2012)","journal-title":"Pattern Recogn."},{"issue":"6","key":"19_CR11","doi-asserted-by":"publisher","first-page":"317","DOI":"10.1111\/j.1467-2494.2005.00286.x","volume":"27","author":"B Fink","year":"2005","unstructured":"Fink, B., Neave, N.: The biology of facial beauty. Int. J. Cosmet. Sci. 27(6), 317\u2013325 (2005)","journal-title":"Int. J. Cosmet. Sci."},{"key":"19_CR12","unstructured":"Hadji, I., Wildes, R.P.: What do we understand about convolutional networks? arXiv preprint arXiv:1803.08834 (2018)"},{"key":"19_CR13","doi-asserted-by":"crossref","unstructured":"He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 770\u2013778 (2016)","DOI":"10.1109\/CVPR.2016.90"},{"key":"19_CR14","unstructured":"Lei Ba, J., Kiros, J.R., Hinton, G.E.: Layer normalization. ArXiv e-prints pp. arXiv-1607 (2016)"},{"key":"19_CR15","unstructured":"Li, Y., Yao, T., Pan, Y., Mei, T.: Contextual transformer networks for visual recognition. arXiv preprint arXiv:2107.12292 (2021)"},{"issue":"12","key":"19_CR16","doi-asserted-by":"publisher","first-page":"2600","DOI":"10.1109\/TCYB.2014.2311033","volume":"44","author":"L Liang","year":"2014","unstructured":"Liang, L., Jin, L., Li, X.: Facial skin beautification using adaptive region-aware masks. IEEE Trans. Cybern. 44(12), 2600\u20132612 (2014)","journal-title":"IEEE Trans. Cybern."},{"key":"19_CR17","doi-asserted-by":"crossref","unstructured":"Liang, L., Lin, L., Jin, L., Xie, D., Li, M.: SCUT-FBP5500: a diverse benchmark dataset for multi-paradigm facial beauty prediction. ICPR (2018)","DOI":"10.1109\/ICPR.2018.8546038"},{"key":"19_CR18","doi-asserted-by":"crossref","unstructured":"Lin, L., Liang, L., Jin, L.: R$$^2$$-ResNeXt: a ResNeXt-based regression model with relative ranking for facial beauty prediction. In: 2018 24th International Conference on Pattern Recognition (ICPR), pp. 85\u201390. IEEE (2018)","DOI":"10.1109\/ICPR.2018.8545164"},{"issue":"1","key":"19_CR19","doi-asserted-by":"publisher","first-page":"122","DOI":"10.1109\/TAFFC.2019.2933523","volume":"13","author":"L Lin","year":"2019","unstructured":"Lin, L., Liang, L., Jin, L.: Regression guided by relative ranking using convolutional neural network (R$$^3$$ CNN) for facial beauty prediction. IEEE Trans. Affect. Comput. 13(1), 122\u2013134 (2019)","journal-title":"IEEE Trans. Affect. Comput."},{"key":"19_CR20","doi-asserted-by":"publisher","first-page":"122","DOI":"10.1109\/TAFFC.2019.2933523","volume":"13","author":"L Lin","year":"2019","unstructured":"Lin, L., Liang, L., Jin, L.: Regression guided by relative ranking using convolutional neural network (R$$^3$$CNN) for facial beauty prediction. IEEE Trans. Affect. Comput. 13, 122\u2013134 (2019)","journal-title":"IEEE Trans. Affect. Comput."},{"key":"19_CR21","doi-asserted-by":"crossref","unstructured":"Lin, L., Liang, L., Jin, L., Chen, W.: Attribute-aware convolutional neural networks for facial beauty prediction. In: IJCAI, pp. 847\u2013853 (2019)","DOI":"10.24963\/ijcai.2019\/119"},{"key":"19_CR22","doi-asserted-by":"crossref","unstructured":"Liu, L., Xing, J., Liu, S., Xu, H., Zhou, X., Yan, S.: Wow! you are so beautiful today! ACM Trans. Multimedia Comput. Commun. Appl. (TOMM) 11(1s), 20 (2014)","DOI":"10.1145\/2659234"},{"key":"19_CR23","doi-asserted-by":"crossref","unstructured":"Liu, Z., et al.: Swin transformer: hierarchical vision transformer using shifted windows. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 10012\u201310022 (2021)","DOI":"10.1109\/ICCV48922.2021.00986"},{"key":"19_CR24","doi-asserted-by":"crossref","unstructured":"Liu, Z., Mao, H., Wu, C.Y., Feichtenhofer, C., Darrell, T., Xie, S.: A convnet for the 2020s. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 11976\u201311986 (2022)","DOI":"10.1109\/CVPR52688.2022.01167"},{"key":"19_CR25","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"776","DOI":"10.1007\/978-3-030-58555-6_46","volume-title":"Computer Vision \u2013 ECCV 2020","author":"N Ma","year":"2020","unstructured":"Ma, N., Zhang, X., Huang, J., Sun, J.: WeightNet: revisiting the design space of weight networks. In: Vedaldi, A., Bischof, H., Brox, T., Frahm, J.-M. (eds.) ECCV 2020. LNCS, vol. 12360, pp. 776\u2013792. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-58555-6_46"},{"key":"19_CR26","unstructured":"Park, N., Kim, S.: How do vision transformers work? In: The Tenth International Conference on Learning Representations, ICLR 2022, Virtual Event, April 25\u201329, 2022. OpenReview.net (2022). https:\/\/openreview.net\/forum?id=D78Go4hVcxO"},{"issue":"6696","key":"19_CR27","doi-asserted-by":"publisher","first-page":"884","DOI":"10.1038\/29772","volume":"394","author":"DI Perrett","year":"1998","unstructured":"Perrett, D.I., et al.: Effects of sexual dimorphism on facial attractiveness. Nature 394(6696), 884 (1998)","journal-title":"Nature"},{"key":"19_CR28","doi-asserted-by":"crossref","unstructured":"Ren, Y., Geng, X.: Sense beauty by label distribution learning. In: IJCAI, vol. 17, pp. 2648\u20132654 (2017)","DOI":"10.24963\/ijcai.2017\/369"},{"key":"19_CR29","doi-asserted-by":"publisher","first-page":"199","DOI":"10.1146\/annurev.psych.57.102904.190208","volume":"57","author":"G Rhodes","year":"2006","unstructured":"Rhodes, G., et al.: The evolutionary psychology of facial beauty. Annu. Rev. Psychol. 57, 199 (2006)","journal-title":"Annu. Rev. Psychol."},{"key":"19_CR30","doi-asserted-by":"crossref","unstructured":"Rothe, R., Timofte, R., Van Gool, L.: Some like it hot-visual guidance for preference prediction. In: CVPR, pp. 5553\u20135561 (2016)","DOI":"10.1109\/CVPR.2016.599"},{"key":"19_CR31","unstructured":"Rubenstein, A.J., Langlois, J.H., Roggman, L.A.: What makes a face attractive and why: the role of averageness in defining facial beauty (2002)"},{"key":"19_CR32","doi-asserted-by":"crossref","unstructured":"Selvaraju, R.R., Cogswell, M., Das, A., Vedantam, R., Parikh, D., Batra, D.: Grad-cam: visual explanations from deep networks via gradient-based localization. In: Proceedings of the IEEE International Conference on Computer Vision,p pp. 618\u2013626 (2017)","DOI":"10.1109\/ICCV.2017.74"},{"key":"19_CR33","doi-asserted-by":"crossref","unstructured":"Xie, D., Liang, L., Jin, L., Xu, J., Li, M.: SCUT-FBP: a benchmark dataset for facial beauty perception. In: IEEE International Conference on Systems, Man, and Cybernetics, pp. 1821\u20131826 (2015)","DOI":"10.1109\/SMC.2015.319"},{"key":"19_CR34","doi-asserted-by":"crossref","unstructured":"Xu, J., Jin, L., Liang, L., Feng, Z., Xie, D., Mao, H.: Facial attractiveness prediction using psychologically inspired convolutional neural network (PI-CNN). In: 2017 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 1657\u20131661. IEEE (2017)","DOI":"10.1109\/ICASSP.2017.7952438"},{"key":"19_CR35","unstructured":"Yang, B., Bender, G., Le, Q.V., Ngiam, J.: Condconv: conditionally parameterized convolutions for efficient inference. Advances in Neural Information Processing Systems, vol. 32 (2019)"},{"key":"19_CR36","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-32598-9","volume-title":"Computer Models for Facial Beauty Analysis","author":"D Zhang","year":"2016","unstructured":"Zhang, D., Chen, F., Xu, Y.: Computer Models for Facial Beauty Analysis. Springer, Cham (2016). https:\/\/doi.org\/10.1007\/978-3-319-32598-9"},{"key":"19_CR37","unstructured":"Zhang, Y., Zhang, J., Wang, Q., Zhong, Z.: Dynet: dynamic convolution for accelerating convolutional neural networks. arXiv preprint arXiv:2004.10694 (2020)"},{"key":"19_CR38","unstructured":"Zhu, X., Su, W., Lu, L., Li, B., Wang, X., Dai, J.: Deformable DETR: deformable transformers for end-to-end object detection. arXiv preprint arXiv:2010.04159 (2020)"}],"container-title":["Lecture Notes in Computer Science","Artificial Neural Networks and Machine Learning \u2013 ICANN 2023"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-44204-9_19","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,9,21]],"date-time":"2023-09-21T06:26:46Z","timestamp":1695277606000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-44204-9_19"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023]]},"ISBN":["9783031442032","9783031442049"],"references-count":38,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-44204-9_19","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2023]]},"assertion":[{"value":"22 September 2023","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ICANN","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Conference on Artificial Neural Networks","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Heraklion","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Greece","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2023","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"26 September 2023","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"29 September 2023","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"32","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"icann2023","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/e-nns.org\/icann2023\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Single-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"easyacademia.org","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"947","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"426","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"22","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"45% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"2.4","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"4","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"No","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"type of other papers accepted  : 9 Abstract","order":10,"name":"additional_info_on_review_process","label":"Additional Info on Review Process","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}