{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,10,28]],"date-time":"2025-10-28T18:39:58Z","timestamp":1761676798724,"version":"3.40.3"},"publisher-location":"Cham","reference-count":20,"publisher":"Springer International Publishing","isbn-type":[{"type":"print","value":"9783030377335"},{"type":"electronic","value":"9783030377342"}],"license":[{"start":{"date-parts":[[2019,12,24]],"date-time":"2019-12-24T00:00:00Z","timestamp":1577145600000},"content-version":"tdm","delay-in-days":0,"URL":"http:\/\/www.springer.com\/tdm"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2020]]},"DOI":"10.1007\/978-3-030-37734-2_9","type":"book-chapter","created":{"date-parts":[[2019,12,26]],"date-time":"2019-12-26T19:03:00Z","timestamp":1577386980000},"page":"100-111","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":4,"title":["Face Tells Detailed Expression: Generating Comprehensive Facial Expression Sentence Through Facial Action Units"],"prefix":"10.1007","author":[{"given":"Joanna","family":"Hong","sequence":"first","affiliation":[]},{"given":"Hong Joo","family":"Lee","sequence":"additional","affiliation":[]},{"given":"Yelin","family":"Kim","sequence":"additional","affiliation":[]},{"given":"Yong Man","family":"Ro","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2019,12,24]]},"reference":[{"key":"9_CR1","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"382","DOI":"10.1007\/978-3-319-46454-1_24","volume-title":"Computer Vision \u2013 ECCV 2016","author":"P Anderson","year":"2016","unstructured":"Anderson, P., Fernando, B., Johnson, M., Gould, S.: SPICE: semantic propositional image caption evaluation. In: Leibe, B., Matas, J., Sebe, N., Welling, M. (eds.) ECCV 2016. LNCS, vol. 9909, pp. 382\u2013398. Springer, Cham (2016). \nhttps:\/\/doi.org\/10.1007\/978-3-319-46454-1_24"},{"key":"9_CR2","unstructured":"Banerjee, S., Lavie, A.: METEOR: an automatic metric for MT evaluation with improved correlation with human judgments. In: Proceedings of the ACL Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and\/or Summarization, pp. 65\u201372 (2005)"},{"issue":"45\u201360","key":"9_CR3","first-page":"16","volume":"98","author":"P Ekman","year":"1999","unstructured":"Ekman, P.: Basic emotions. Handb. Cogn. Emot. 98(45\u201360), 16 (1999)","journal-title":"Handb. Cogn. Emot."},{"key":"9_CR4","volume-title":"What the Face Reveals: Basic and Applied Studies of Spontaneous Expression Using the Facial Action Coding System (FACS)","author":"R Ekman","year":"1997","unstructured":"Ekman, R.: What the Face Reveals: Basic and Applied Studies of Spontaneous Expression Using the Facial Action Coding System (FACS). Oxford University Press, USA (1997)"},{"issue":"1","key":"9_CR5","doi-asserted-by":"publisher","first-page":"259","DOI":"10.1016\/S0031-3203(02)00052-3","volume":"36","author":"B Fasel","year":"2003","unstructured":"Fasel, B., Luettin, J.: Automatic facial expression analysis: a survey. Pattern Recogn. 36(1), 259\u2013275 (2003)","journal-title":"Pattern Recogn."},{"key":"9_CR6","doi-asserted-by":"crossref","unstructured":"Huber, B., McDuff, D., Brockett, C., Galley, M., Dolan, B.: Emotional dialogue generation using image-grounded language models. In: Proceedings of the 2018 CHI Conference on Human Factors in Computing Systems, p. 277. ACM (2018)","DOI":"10.1145\/3173574.3173851"},{"issue":"2","key":"9_CR7","doi-asserted-by":"publisher","first-page":"99","DOI":"10.1007\/s12193-015-0195-2","volume":"10","author":"SE Kahou","year":"2016","unstructured":"Kahou, S.E., et al.: EmoNets: multimodal deep learning approaches for emotion recognition in video. J. Multimodal User Interfaces 10(2), 99\u2013111 (2016)","journal-title":"J. Multimodal User Interfaces"},{"issue":"2","key":"9_CR8","doi-asserted-by":"publisher","first-page":"401","DOI":"10.3390\/s18020401","volume":"18","author":"B Ko","year":"2018","unstructured":"Ko, B.: A brief review of facial emotion recognition based on visual information. Sensors 18(2), 401 (2018)","journal-title":"Sensors"},{"key":"9_CR9","doi-asserted-by":"crossref","unstructured":"Lucey, P., Cohn, J.F., Kanade, T., Saragih, J., Ambadar, Z., Matthews, I.: The extended Cohn-Kanade dataset (ck+): a complete dataset for action unit and emotion-specified expression. In: 2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition-Workshops, pp. 94\u2013101. IEEE (2010)","DOI":"10.1109\/CVPRW.2010.5543262"},{"key":"9_CR10","doi-asserted-by":"crossref","unstructured":"Mavadati, M., Sanger, P., Mahoor, M.H.: Extended DISFA dataset: investigating posed and spontaneous facial expressions. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition Workshops, pp. 1\u20138 (2016)","DOI":"10.1109\/CVPRW.2016.182"},{"key":"9_CR11","series-title":"Lecture Notes in Computer Science (Lecture Notes in Artificial Intelligence)","doi-asserted-by":"publisher","first-page":"226","DOI":"10.1007\/978-3-030-10925-7_14","volume-title":"Machine Learning and Knowledge Discovery in Databases","author":"O Mohamad Nezami","year":"2019","unstructured":"Mohamad Nezami, O., Dras, M., Anderson, P., Hamey, L.: Face-cap: image captioning using facial expression analysis. In: Berlingerio, M., Bonchi, F., G\u00e4rtner, T., Hurley, N., Ifrim, G. (eds.) ECML PKDD 2018. LNCS (LNAI), vol. 11051, pp. 226\u2013240. Springer, Cham (2019). \nhttps:\/\/doi.org\/10.1007\/978-3-030-10925-7_14"},{"key":"9_CR12","unstructured":"Pantic, M., Valstar, M., Rademaker, R., Maat, L.: Web-based database for facial expression analysis. In: 2005 IEEE International Conference on Multimedia and Expo, pp. 5-pp. IEEE (2005)"},{"key":"9_CR13","doi-asserted-by":"crossref","unstructured":"Papineni, K., Roukos, S., Ward, T., Zhu, W.J.: BLEU: a method for automatic evaluation of machine translation. In: Proceedings of the 40th Annual Meeting on Association for Computational Linguistics, pp. 311\u2013318. Association for Computational Linguistics (2002)","DOI":"10.3115\/1073083.1073135"},{"key":"9_CR14","unstructured":"Tang, Y.: Deep learning using linear support vector machines. arXiv preprint. \narXiv:1306.0239\n\n (2013)"},{"key":"9_CR15","doi-asserted-by":"crossref","unstructured":"Vedantam, R., Lawrence Zitnick, C., Parikh, D.: CIDEr: consensus-based image description evaluation. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 4566\u20134575 (2015)","DOI":"10.1109\/CVPR.2015.7299087"},{"key":"9_CR16","doi-asserted-by":"crossref","unstructured":"Vinyals, O., Toshev, A., Bengio, S., Erhan, D.: Show and tell: a neural image caption generator. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 3156\u20133164 (2015)","DOI":"10.1109\/CVPR.2015.7298935"},{"issue":"5","key":"9_CR17","doi-asserted-by":"publisher","first-page":"e0177239","DOI":"10.1371\/journal.pone.0177239","volume":"12","author":"M Wegrzyn","year":"2017","unstructured":"Wegrzyn, M., Vogt, M., Kireclioglu, B., Schneider, J., Kissler, J.: Mapping the emotional face. How individual face parts contribute to successful emotion recognition. PloS One 12(5), e0177239 (2017)","journal-title":"PloS One"},{"key":"9_CR18","unstructured":"Xu, K., et al.: Show, attend and tell: neural image caption generation with visual attention. In: International Conference on Machine Learning, pp. 2048\u20132057 (2015)"},{"key":"9_CR19","doi-asserted-by":"crossref","unstructured":"Yu, Z., Zhang, C.: Image based static facial expression recognition with multiple deep network learning. In: Proceedings of the 2015 ACM on International Conference on Multimodal Interaction, pp. 435\u2013442. ACM (2015)","DOI":"10.1145\/2818346.2830595"},{"key":"9_CR20","doi-asserted-by":"publisher","first-page":"643","DOI":"10.1016\/j.neucom.2017.08.043","volume":"273","author":"N Zeng","year":"2018","unstructured":"Zeng, N., Zhang, H., Song, B., Liu, W., Li, Y., Dobaie, A.M.: Facial expression recognition via learning deep sparse autoencoders. Neurocomputing 273, 643\u2013649 (2018)","journal-title":"Neurocomputing"}],"container-title":["Lecture Notes in Computer Science","MultiMedia Modeling"],"original-title":[],"language":"en","link":[{"URL":"http:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-030-37734-2_9","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2020,2,6]],"date-time":"2020-02-06T13:09:13Z","timestamp":1580994553000},"score":1,"resource":{"primary":{"URL":"http:\/\/link.springer.com\/10.1007\/978-3-030-37734-2_9"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2019,12,24]]},"ISBN":["9783030377335","9783030377342"],"references-count":20,"URL":"https:\/\/doi.org\/10.1007\/978-3-030-37734-2_9","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"type":"print","value":"0302-9743"},{"type":"electronic","value":"1611-3349"}],"subject":[],"published":{"date-parts":[[2019,12,24]]},"assertion":[{"value":"24 December 2019","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"MMM","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Conference on Multimedia Modeling","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Daejeon","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Korea (Republic of)","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2020","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"5 January 2020","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"8 January 2020","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"26","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"mmm2020","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"http:\/\/www.mmm2020.kr\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Double-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"EasyChair","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"171","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"40","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"0","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"23% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"5","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Yes","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Of the 171 submissions, 46 were accepted as poster papers; of the 49 special session paper submissions, 28 were accepted for oral presentation and 8 for poster presentation; 9 demo papers and 10 VBS papers were also accepted.","order":10,"name":"additional_info_on_review_process","label":"Additional Info on Review Process","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}