{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,3,28]],"date-time":"2025-03-28T07:42:09Z","timestamp":1743147729069,"version":"3.40.3"},"publisher-location":"Singapore","reference-count":25,"publisher":"Springer Nature Singapore","isbn-type":[{"type":"print","value":"9789819981472"},{"type":"electronic","value":"9789819981489"}],"license":[{"start":{"date-parts":[[2023,11,26]],"date-time":"2023-11-26T00:00:00Z","timestamp":1700956800000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2023,11,26]],"date-time":"2023-11-26T00:00:00Z","timestamp":1700956800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2024]]},"DOI":"10.1007\/978-981-99-8148-9_8","type":"book-chapter","created":{"date-parts":[[2023,11,25]],"date-time":"2023-11-25T10:02:23Z","timestamp":1700906543000},"page":"92-104","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["Landmark-Assisted Facial Action Unit Detection with\u00a0Optimal Attention and\u00a0Contrastive Learning"],"prefix":"10.1007","author":[{"given":"Yi","family":"Yang","sequence":"first","affiliation":[]},{"given":"Qiaoping","family":"Hu","sequence":"additional","affiliation":[]},{"given":"Hongtao","family":"Lu","sequence":"additional","affiliation":[]},{"given":"Fei","family":"Jiang","sequence":"additional","affiliation":[]},{"given":"Yaoyi","family":"Li","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2023,11,26]]},"reference":[{"key":"8_CR1","first-page":"1086","volume":"2018","author":"JS Chung","year":"2018","unstructured":"Chung, J.S., Nagrani, A., Zisserman, A.: VoxCeleb2: deep speaker recognition. Proc. Interspeech 2018, 1086\u20131090 (2018)","journal-title":"Proc. Interspeech"},{"key":"8_CR2","doi-asserted-by":"crossref","unstructured":"Ekman, P., Friesen, W.V.: Facial action coding system: a technique for the measurement of facial movement. Consulting Psychologists Press (1978)","DOI":"10.1037\/t27734-000"},{"key":"8_CR3","doi-asserted-by":"crossref","unstructured":"Fabian Benitez-Quiroz, C., Srinivasan, R., Martinez, A.M.: EmotioNet: an accurate, real-time algorithm for the automatic annotation of a million facial expressions in the wild. In: IEEE Conference on Computer Vision and Pattern Recognition (CVPR 2016), pp. 5562\u20135570 (2016)","DOI":"10.1109\/CVPR.2016.600"},{"key":"8_CR4","doi-asserted-by":"crossref","unstructured":"Fu, J., et al.: Dual attention network for scene segmentation. In: IEEE International Conference on Computer Vision and Pattern Recognition (CVPR 2019), pp. 3146\u20133154 (2019)","DOI":"10.1109\/CVPR.2019.00326"},{"key":"8_CR5","doi-asserted-by":"crossref","unstructured":"Girard, J.M., Chu, W.S., Jeni, L.A., Cohn, J.F.: Sayette group formation task (GFT) spontaneous facial expression database. In: IEEE International Conference on Automatic Face and Gesture Recognition (FG 2017), pp. 581\u2013588 (2017)","DOI":"10.1109\/FG.2017.144"},{"key":"8_CR6","doi-asserted-by":"crossref","unstructured":"Hayale, W., Negi, P., Mahoor, M.: Facial expression recognition using deep Siamese neural networks with a supervised loss function. In: IEEE International Conference on Automatic Face and Gesture Recognition (FG 2019), pp.\u00a01\u20137 (2019)","DOI":"10.1109\/FG.2019.8756571"},{"key":"8_CR7","doi-asserted-by":"crossref","unstructured":"Hu, Q., Jiang, F., Mei, C., Shen, R.: CCT: a cross-concat and temporal neural network for multi-label action unit detection. In: IEEE International Conference on Multimedia and Expo (ICME 2018) (2018)","DOI":"10.1109\/ICME.2018.8486516"},{"key":"8_CR8","doi-asserted-by":"publisher","first-page":"1452","DOI":"10.1109\/TAFFC.2020.3006392","volume":"13","author":"Q Hu","year":"2020","unstructured":"Hu, Q., et al.: RFAU: a database for facial action unit analysis in real classrooms. IEEE Trans. Affect. Comput. 13, 1452\u20131465 (2020)","journal-title":"IEEE Trans. Affect. Comput."},{"key":"8_CR9","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2020.3007032","author":"Z Huang","year":"2020","unstructured":"Huang, Z., et al.: CCNet: criss-cross attention for semantic segmentation. IEEE Trans. Pattern Anal. Mach. Intell. (2020). https:\/\/doi.org\/10.1109\/TPAMI.2020.3007032","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"8_CR10","unstructured":"Kollias, D., Zafeiriou, S.: Aff-wild2: extending the Aff-wild database for affect recognition. arXiv preprint arXiv:1811.07770 (2018)"},{"key":"8_CR11","unstructured":"Kollias, D., Zafeiriou, S.: A multi-task learning & generation framework: valence-arousal, action units & primary expressions. arXiv preprint arXiv:1811.07771 (2018)"},{"key":"8_CR12","unstructured":"Kollias, D., Zafeiriou, S.: Expression, affect, action unit recognition: Aff-wild2, multi-task learning and arcface. arXiv preprint arXiv:1910.04855 (2019)"},{"key":"8_CR13","doi-asserted-by":"crossref","unstructured":"Li, W., Abtahi, F., Zhu, Z.: Action unit detection with region adaptation, multi-labeling learning and optimal temporal fusing. In: IEEE International Conference on Computer Vision and Pattern Recognition (CVPR 2017) (2017)","DOI":"10.1109\/CVPR.2017.716"},{"key":"8_CR14","doi-asserted-by":"crossref","unstructured":"Li, W., Abtahi, F., Zhu, Z., Yin, L.: EAC-Net: a region-based deep enhancing and cropping approach for facial action unit detection. In: IEEE International Conference on Automatic Face and Gesture Recognition (FG 2017), pp. 103\u2013110 (2017)","DOI":"10.1109\/FG.2017.136"},{"key":"8_CR15","doi-asserted-by":"crossref","unstructured":"Li, Y., Zeng, J., Shan, S., Chen, X.: Self-supervised representation learning from videos for facial action unit detection. In: IEEE International Conference on Computer Vision and Pattern Recognition (CVPR 2019) (2019)","DOI":"10.1109\/CVPR.2019.01118"},{"key":"8_CR16","doi-asserted-by":"publisher","first-page":"2616","DOI":"10.21437\/Interspeech.2017-950","volume":"2017","author":"A Nagrani","year":"2017","unstructured":"Nagrani, A., Chung, J.S., Zisserman, A.: VoxCeleb: a large-scale speaker identification dataset. Proc. Interspeech 2017, 2616\u20132620 (2017)","journal-title":"Proc. Interspeech"},{"key":"8_CR17","doi-asserted-by":"publisher","first-page":"103723","DOI":"10.1016\/j.imavis.2018.09.014","volume":"122","author":"A Romero","year":"2018","unstructured":"Romero, A., Le\u00f3n, J., Arbel\u00e1ez, P.: Multi-view dynamic facial action unit detection. Image Vis. Comput. 122, 103723 (2018)","journal-title":"Image Vis. Comput."},{"issue":"2","key":"8_CR18","doi-asserted-by":"publisher","first-page":"1111","DOI":"10.1109\/TAFFC.2021.3091331","volume":"13","author":"Z Shao","year":"2022","unstructured":"Shao, Z., Cai, J., Cham, T.J., Lu, X., Ma, L.: Unconstrained facial action unit detection via latent feature domain. IEEE Transa. Affect. Comput. 13(2), 1111\u20131126 (2022)","journal-title":"IEEE Transa. Affect. Comput."},{"key":"8_CR19","doi-asserted-by":"crossref","unstructured":"Shao, Z., Liu, Z., Cai, J., Ma, L.: Deep adaptive attention for joint facial action unit detection and face alignment. In: European Conference on Computer Vision (ECCV 2018), pp. 725\u2013740 (2018)","DOI":"10.1007\/978-3-030-01261-8_43"},{"key":"8_CR20","unstructured":"Vaswani, A., et al.: Attention is all you need. In: Advances in Neural Information Processing Systems (NIPS 2017), pp. 5998\u20136008 (2017)"},{"key":"8_CR21","doi-asserted-by":"crossref","unstructured":"Vemulapalli, R., Agarwala, A.: A compact embedding for facial expression similarity. In: IEEE International Conference on Computer Vision and Pattern Recognition (CVPR 2019), pp. 5676\u20135685 (2019)","DOI":"10.1109\/CVPR.2019.00583"},{"key":"8_CR22","doi-asserted-by":"crossref","unstructured":"Walecki, R., Rudovic, O., Pavlovic, V., Pantic, M.: Copula ordinal regression framework for joint estimation of facial action unit intensity. IEEE Trans. Affect. Comput. (TAC 2019) 10(3), 297\u2013312 (2019)","DOI":"10.1109\/TAFFC.2017.2728534"},{"key":"8_CR23","doi-asserted-by":"crossref","unstructured":"Woo, S., Park, J., Lee, J.Y., Kweon, I.S.: CBAM: convolutional block attention module. In: European Conference on Computer Vision (ECCV 2018) (2018)","DOI":"10.1007\/978-3-030-01234-2_1"},{"key":"8_CR24","doi-asserted-by":"crossref","unstructured":"Zhang, X., et al.: Bp4d-spontaneous: a high-resolution spontaneous 3D dynamic facial expression database. Image Vis. Comput. 32(10), 692 \u2013 706 (2014). http:\/\/www.sciencedirect.com\/science\/article\/pii\/S0262885614001012","DOI":"10.1016\/j.imavis.2014.06.002"},{"key":"8_CR25","doi-asserted-by":"crossref","unstructured":"Zhao, K., Chu, W.S., Zhang, H.: Deep region and multi-label learning for facial action unit detection. In: IEEE International Conference on Computer Vision and Pattern Recognition (CVPR 2016), pp. 3391\u20133399 (2016)","DOI":"10.1109\/CVPR.2016.369"}],"container-title":["Communications in Computer and Information Science","Neural Information Processing"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-981-99-8148-9_8","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,3,12]],"date-time":"2024-03-12T18:34:52Z","timestamp":1710268492000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-981-99-8148-9_8"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,11,26]]},"ISBN":["9789819981472","9789819981489"],"references-count":25,"URL":"https:\/\/doi.org\/10.1007\/978-981-99-8148-9_8","relation":{},"ISSN":["1865-0929","1865-0937"],"issn-type":[{"type":"print","value":"1865-0929"},{"type":"electronic","value":"1865-0937"}],"subject":[],"published":{"date-parts":[[2023,11,26]]},"assertion":[{"value":"26 November 2023","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ICONIP","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Conference on Neural Information Processing","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Changsha","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"China","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2023","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"20 November 2023","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"23 November 2023","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"30","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"iconip2023","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"http:\/\/iconip2023.org\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Single-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"EasyChair","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"1274","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"650","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"0","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"51% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"4.14","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"2.46","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Yes","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}