{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,3,25]],"date-time":"2025-03-25T22:08:23Z","timestamp":1742940503923,"version":"3.40.3"},"publisher-location":"Singapore","reference-count":26,"publisher":"Springer Nature Singapore","isbn-type":[{"type":"print","value":"9789819984688"},{"type":"electronic","value":"9789819984695"}],"license":[{"start":{"date-parts":[[2023,12,25]],"date-time":"2023-12-25T00:00:00Z","timestamp":1703462400000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2023,12,25]],"date-time":"2023-12-25T00:00:00Z","timestamp":1703462400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2024]]},"DOI":"10.1007\/978-981-99-8469-5_21","type":"book-chapter","created":{"date-parts":[[2023,12,24]],"date-time":"2023-12-24T17:02:18Z","timestamp":1703437338000},"page":"265-277","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["AU-Oriented Expression Decomposition Learning for\u00a0Facial Expression Recognition"],"prefix":"10.1007","author":[{"given":"Zehao","family":"Lin","sequence":"first","affiliation":[]},{"given":"Jiahui","family":"She","sequence":"additional","affiliation":[]},{"given":"Qiu","family":"Shen","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2023,12,25]]},"reference":[{"key":"21_CR1","doi-asserted-by":"crossref","unstructured":"Barsoum, E., Zhang, C., Ferrer, C.C., Zhang, Z.: Training deep networks for facial expression recognition with crowd-sourced label distribution. In: Proceedings of the 18th ACM International Conference on Multimodal Interaction, pp. 279\u2013283 (2016)","DOI":"10.1145\/2993148.2993165"},{"key":"21_CR2","doi-asserted-by":"crossref","unstructured":"Chen, D., Mei, J.P., Wang, C., Feng, Y., Chen, C.: Online knowledge distillation with diverse peers. In: Proceedings of the AAAI Conference on Artificial Intelligence, vol. 34, pp. 3430\u20133437 (2020)","DOI":"10.1609\/aaai.v34i04.5746"},{"key":"21_CR3","first-page":"14338","volume":"33","author":"Z Cui","year":"2020","unstructured":"Cui, Z., Song, T., Wang, Y., Ji, Q.: Knowledge augmented deep neural networks for joint facial expression and action unit recognition. Adv. Neural. Inf. Process. Syst. 33, 14338\u201314349 (2020)","journal-title":"Adv. Neural. Inf. Process. Syst."},{"key":"21_CR4","doi-asserted-by":"crossref","unstructured":"Ekman, P., Friesen, W.V.: Facial action coding system. Environ. Psychol. Nonverbal Behav. (1978)","DOI":"10.1037\/t27734-000"},{"key":"21_CR5","doi-asserted-by":"crossref","unstructured":"Farzaneh, A.H., Qi, X.: Facial expression recognition in the wild via deep attentive center loss. In: Proceedings of the IEEE\/CVF Winter Conference on Applications of Computer Vision, pp. 2402\u20132411 (2021)","DOI":"10.1109\/WACV48630.2021.00245"},{"key":"21_CR6","doi-asserted-by":"crossref","unstructured":"Gu, Y., Yan, H., Zhang, X., Wang, Y., Ji, Y., Ren, F.: Towards facial expression recognition in the wild via noise-tolerant network. IEEE Trans. Circ. Syst. Video Technol. (2022)","DOI":"10.1109\/TCSVT.2022.3220669"},{"key":"21_CR7","unstructured":"Hinton, G., Vinyals, O., Dean, J.: Distilling the knowledge in a neural network. arXiv preprint arXiv:1503.02531 (2015)"},{"issue":"4","key":"21_CR8","doi-asserted-by":"publisher","first-page":"1868","DOI":"10.1109\/TAFFC.2022.3197761","volume":"13","author":"J Jiang","year":"2022","unstructured":"Jiang, J., Deng, W.: Disentangling identity and pose for facial expression recognition. IEEE Trans. Affect. Comput. 13(4), 1868\u20131878 (2022)","journal-title":"IEEE Trans. Affect. Comput."},{"key":"21_CR9","unstructured":"Kingma, D.P., Ba, J.: Adam: a method for stochastic optimization. Comput. Sci. (2014)"},{"key":"21_CR10","doi-asserted-by":"crossref","unstructured":"Li, S., Deng, W., Du, J.: Reliable crowdsourcing and deep locality-preserving learning for expression recognition in the wild. In: 2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pp. 2584\u20132593. IEEE (2017)","DOI":"10.1109\/CVPR.2017.277"},{"issue":"1","key":"21_CR11","doi-asserted-by":"publisher","first-page":"139","DOI":"10.1007\/s00530-022-00986-8","volume":"29","author":"Z Lin","year":"2023","unstructured":"Lin, Z., She, J., Shen, Q.: Real emotion seeker: recalibrating annotation for facial expression recognition. Multimedia Syst. 29(1), 139\u2013151 (2023)","journal-title":"Multimedia Syst."},{"key":"21_CR12","doi-asserted-by":"crossref","unstructured":"Lucey, P., Cohn, J.F., Kanade, T., Saragih, J., Ambadar, Z., Matthews, I.: The extended Cohn-Kanade dataset (CK+): a complete dataset for action unit and emotion-specified expression. In: 2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition-Workshops, pp. 94\u2013101. IEEE (2010)","DOI":"10.1109\/CVPRW.2010.5543262"},{"issue":"1","key":"21_CR13","doi-asserted-by":"publisher","first-page":"18","DOI":"10.1109\/TAFFC.2017.2740923","volume":"10","author":"A Mollahosseini","year":"2017","unstructured":"Mollahosseini, A., Hasani, B., Mahoor, M.H.: AffectNet: a database for facial expression, valence, and arousal computing in the wild. IEEE Trans. Affect. Comput. 10(1), 18\u201331 (2017)","journal-title":"IEEE Trans. Affect. Comput."},{"key":"21_CR14","doi-asserted-by":"crossref","unstructured":"Pu, T., Chen, T., Xie, Y., Wu, H., Lin, L.: Au-expression knowledge constrained representation learning for facial expression recognition. In: 2021 IEEE International Conference on Robotics and Automation (ICRA), pp. 11154\u201311161. IEEE (2021)","DOI":"10.1109\/ICRA48506.2021.9561252"},{"key":"21_CR15","doi-asserted-by":"crossref","unstructured":"Ruan, D., Yan, Y., Lai, S., Chai, Z., Shen, C., Wang, H.: Feature decomposition and reconstruction learning for effective facial expression recognition. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 7660\u20137669 (2021)","DOI":"10.1109\/CVPR46437.2021.00757"},{"key":"21_CR16","doi-asserted-by":"crossref","unstructured":"She, J., Hu, Y., Shi, H., Wang, J., Shen, Q., Mei, T.: Dive into ambiguity: latent distribution mining and pairwise uncertainty estimation for facial expression recognition. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 6248\u20136257 (2021)","DOI":"10.1109\/CVPR46437.2021.00618"},{"key":"21_CR17","unstructured":"Valstar, M., Pantic, M.: Induced disgust, happiness and surprise: an addition to the mmi facial expression database. In: Proceedings of the 3rd International Workshop on EMOTION (satellite of LREC): Corpora for Research on Emotion and Affect, p. 65. Paris, France (2010)"},{"key":"21_CR18","doi-asserted-by":"crossref","unstructured":"Wang, K., Peng, X., Yang, J., Lu, S., Qiao, Y.: Suppressing uncertainties for large-scale facial expression recognition. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 6897\u20136906 (2020)","DOI":"10.1109\/CVPR42600.2020.00693"},{"key":"21_CR19","doi-asserted-by":"crossref","unstructured":"Xue, F., Wang, Q., Guo, G.: Transfer: learning relation-aware facial expression representations with transformers. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 3601\u20133610 (2021)","DOI":"10.1109\/ICCV48922.2021.00358"},{"key":"21_CR20","doi-asserted-by":"publisher","first-page":"103264","DOI":"10.1109\/ACCESS.2022.3210109","volume":"10","author":"J Yang","year":"2022","unstructured":"Yang, J., Lv, Z., Kuang, K., Yang, S., Xiao, L., Tang, Q.: RASN: using attention and sharing affinity features to address sample imbalance in facial expression recognition. IEEE Access 10, 103264\u2013103274 (2022)","journal-title":"IEEE Access"},{"key":"21_CR21","doi-asserted-by":"publisher","unstructured":"Zeng, D., Lin, Z., Yan, X., Liu, Y., Wang, F., Tang, B.: Face2Exp: combating data biases for facial expression recognition. In: 2022 IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 20259\u201320268 (2022). https:\/\/doi.org\/10.1109\/CVPR52688.2022.01965","DOI":"10.1109\/CVPR52688.2022.01965"},{"issue":"10","key":"21_CR22","doi-asserted-by":"publisher","first-page":"692","DOI":"10.1016\/j.imavis.2014.06.002","volume":"32","author":"X Zhang","year":"2014","unstructured":"Zhang, X., et al.: BP4D-spontaneous: a high-resolution spontaneous 3D dynamic facial expression database. Image Vis. Comput. 32(10), 692\u2013706 (2014)","journal-title":"Image Vis. Comput."},{"key":"21_CR23","doi-asserted-by":"crossref","unstructured":"Zhang, Y., Dong, W., Hu, B.G., Ji, Q.: Classifier learning with prior probabilities for facial action unit recognition. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 5108\u20135116 (2018)","DOI":"10.1109\/CVPR.2018.00536"},{"issue":"9","key":"21_CR24","doi-asserted-by":"publisher","first-page":"607","DOI":"10.1016\/j.imavis.2011.07.002","volume":"29","author":"G Zhao","year":"2011","unstructured":"Zhao, G., Huang, X., Taini, M., Li, S.Z., Pietik\u00e4Inen, M.: Facial expression recognition from near-infrared videos. Image Vis. Comput. 29(9), 607\u2013619 (2011)","journal-title":"Image Vis. Comput."},{"key":"21_CR25","doi-asserted-by":"crossref","unstructured":"Zhao, K., Chu, W.S., De la Torre, F., Cohn, J.F., Zhang, H.: Joint patch and multi-label learning for facial action unit detection. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 2207\u20132216 (2015)","DOI":"10.1109\/CVPR.2015.7298833"},{"key":"21_CR26","doi-asserted-by":"publisher","first-page":"6544","DOI":"10.1109\/TIP.2021.3093397","volume":"30","author":"Z Zhao","year":"2021","unstructured":"Zhao, Z., Liu, Q., Wang, S.: Learning deep global multi-scale and local attention features for facial expression recognition in the wild. IEEE Trans. Image Process. 30, 6544\u20136556 (2021)","journal-title":"IEEE Trans. Image Process."}],"container-title":["Lecture Notes in Computer Science","Pattern Recognition and Computer Vision"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-981-99-8469-5_21","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,12,24]],"date-time":"2023-12-24T17:05:14Z","timestamp":1703437514000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-981-99-8469-5_21"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,12,25]]},"ISBN":["9789819984688","9789819984695"],"references-count":26,"URL":"https:\/\/doi.org\/10.1007\/978-981-99-8469-5_21","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"type":"print","value":"0302-9743"},{"type":"electronic","value":"1611-3349"}],"subject":[],"published":{"date-parts":[[2023,12,25]]},"assertion":[{"value":"25 December 2023","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"PRCV","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Chinese Conference on Pattern Recognition and Computer Vision  (PRCV)","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Xiamen","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"China","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2023","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"13 October 2023","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"15 October 2023","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"6","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"ccprcv2023","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/prcv2023.xmu.edu.cn\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Double-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Microsoft CMT","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"1420","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"532","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"0","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"37% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3,78","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3,69","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"No","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}