{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,10,1]],"date-time":"2025-10-01T15:39:51Z","timestamp":1759333191455},"reference-count":18,"publisher":"Institute of Electronics, Information and Communications Engineers (IEICE)","issue":"11","content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEICE Trans. Inf. &amp; Syst."],"published-print":{"date-parts":[[2020,11,1]]},"DOI":"10.1587\/transinf.2020edl8065","type":"journal-article","created":{"date-parts":[[2020,10,31]],"date-time":"2020-10-31T22:13:21Z","timestamp":1604182401000},"page":"2403-2406","source":"Crossref","is-referenced-by-count":4,"title":["Unconstrained Facial Expression Recognition Based on Feature Enhanced CNN and Cross-Layer LSTM"],"prefix":"10.1587","volume":"E103.D","author":[{"given":"Ying","family":"TONG","sequence":"first","affiliation":[{"name":"Dept. of Information and Communication Engineering, Nanjing Institute of Technology"}]},{"given":"Rui","family":"CHEN","sequence":"additional","affiliation":[{"name":"Dept. of Information and Communication Engineering, Nanjing Institute of Technology"}]},{"given":"Ruiyu","family":"LIANG","sequence":"additional","affiliation":[{"name":"Dept. of Information and Communication Engineering, Nanjing Institute of Technology"}]}],"member":"532","reference":[{"key":"1","doi-asserted-by":"publisher","unstructured":"[1] M. Pantic and L.J.M. Rothkrantz, \u201cAutomatic analysis of facial expressions: The state of the art,\u201d IEEE Trans. Pattern Anal. Mach. Intell., vol.22, no.12, pp.1424-1445, 2000. 10.1109\/34.895976","DOI":"10.1109\/34.895976"},{"key":"2","doi-asserted-by":"publisher","unstructured":"[2] Z. Zeng, M. Pantic, G.I. Roisman, and T.S. Huang, \u201cA survey of affect recognition methods: Audio, visual, and spontaneous expressions,\u201d IEEE Trans. Pattern Anal. Mach. Intell., vol.31, no.1, pp.39-58, 2009. 10.1109\/tpami.2008.52","DOI":"10.1109\/TPAMI.2008.52"},{"key":"3","unstructured":"[3] S. Li and W. Deng, \u201cDeep facial expression recognition: A survey,\u201d IEEE Transactions on Affective Computing, arXiv: 1804.08348v2 [cs.CV] 22 Oct. 2018."},{"key":"4","doi-asserted-by":"crossref","unstructured":"[4] M. Baccouche, F. Mamalet, C. Wolf, C. Garcia, and A. Baskurt, \u201cSpatio-Temporal Convolutional Sparse Auto-Encoder for Sequence Classification,\u201d Procedings of the British Machine Vision Conference 2012, pp.124.1-124.12, 2012. 10.5244\/c.26.124","DOI":"10.5244\/C.26.124"},{"key":"5","doi-asserted-by":"crossref","unstructured":"[5] A. Yao, J. Shao, N. Ma, and Y. Chen, \u201cCapturing AU-Aware Facial Features and Their Latent Relations for Emotion Recognition in the Wild.\u201d Proc. 2015 ACM on International Conference on Multimodal Interaction-ICMI &apos;15, pp.451-458, 2015. 10.1145\/2818346.2830585","DOI":"10.1145\/2818346.2830585"},{"key":"6","doi-asserted-by":"crossref","unstructured":"[6] T. Connie, M. Al-Shabi, W.P. Cheah, and M. Goh, \u201cFacial Expression Recognition Using a Hybrid CNN-SIFT Aggregator,\u201d Multi-disciplinary Trends in Artificial Intelligence, Lecture Notes in Computer Science, vol.10607, pp.139-149, Springer International Publishing, Cham, 2017. 10.1007\/978-3-319-69456-6_12","DOI":"10.1007\/978-3-319-69456-6_12"},{"key":"7","doi-asserted-by":"crossref","unstructured":"[7] J. Jeon, J.-C. Park, Y. Jo, C. Nam, K.-H. Bae, Y. Hwang, and D.-S. Kim, \u201cA Real-time Facial Expression Recognizer using Deep Neural Network.\u201d Proc. 10th International Conference on Ubiquitous Information Management and Communication-IMCOM &apos;16, pp.1-4, 2016. 10.1145\/2857546.2857642","DOI":"10.1145\/2857546.2857642"},{"key":"8","doi-asserted-by":"crossref","unstructured":"[8] X. Zhao, X. Liang, L. Liu, T. Li, Y. Han, N. Vasconcelos, and S. Yan, \u201cPeak-piloted Deep Network for Facial Expression Recognition.\u201d Computer Vision-ECCV 2016, Lecture Notes in Computer Science, vol.9906, pp.425-442, Springer International Publishing, Cham, 2016. 10.1007\/978-3-319-46475-6_27","DOI":"10.1007\/978-3-319-46475-6_27"},{"key":"9","doi-asserted-by":"publisher","unstructured":"[9] Z. Yu, Q. Liu, and G. Liu, \u201cDeeper Cascaded Peak-piloted Network for Weak Expression Recognition.\u201d The Visual Computer, vol.34, no.12, pp.1691-1699, 2018. 10.1007\/s00371-017-1443-0","DOI":"10.1007\/s00371-017-1443-0"},{"key":"10","doi-asserted-by":"crossref","unstructured":"[10] H. Jung, S. Lee, J. Yim, S. Park, and J. Kim, \u201cJoint Fine-tuning in Deep Neural Networks for Facial Expression Recognition.\u201d 2015 IEEE International Conference on Computer Vision (ICCV), pp.2983-2991, 2015. 10.1109\/iccv.2015.341","DOI":"10.1109\/ICCV.2015.341"},{"key":"11","doi-asserted-by":"publisher","unstructured":"[11] D.K. Jain, Z. Zhang, and K. Huang, \u201cMulti angle optimal pattern-based deep learning for automatic facial expression recognition,\u201d Pattern. Recogn. Lett., 2017. 10.1016\/j.patrec.2017.06.025","DOI":"10.1016\/j.patrec.2017.06.025"},{"key":"12","doi-asserted-by":"crossref","unstructured":"[12] A. Mollahosseini, D. Chan, and M.H. Mahoor, \u201cGoing deeper in facial expression recognition using deep neural networks,\u201d 2016 IEEE Winter Conference on Applications of Computer Vision (WACV), pp.1-10, 2016. 10.1109\/wacv.2016.7477450","DOI":"10.1109\/WACV.2016.7477450"},{"key":"13","doi-asserted-by":"crossref","unstructured":"[13] O.M. Parkhi, A. Vedaldi, and A. Zisserman, \u201cDeep face recognition,\u201d Proc. British Machine Vision Conference 2015, pp.41.1-41.12, 2015. 10.5244\/c.29.41","DOI":"10.5244\/C.29.41"},{"key":"14","doi-asserted-by":"crossref","unstructured":"[14] P. Lucey, J.F. Cohn, T. Kanade, J. Saragih, Z. Ambadar, and I. Matthews, \u201cThe Extended Cohn-Kanade Dataset (CK+): A complete dataset for action unit and emotion-specified expression,\u201d 2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition-Workshops, pp.94-101, 2010. 10.1109\/cvprw.2010.5543262","DOI":"10.1109\/CVPRW.2010.5543262"},{"key":"15","doi-asserted-by":"publisher","unstructured":"[15] A. Dhall, R. Goecke, S. Lucey, and T. Gedeon, \u201cCollecting large, richly annotated facial-expression databases from movies,\u201d IEEE Multimedia Mag., vol.19, no.3, pp.34-41, 2012. 10.1109\/mmul.2012.26","DOI":"10.1109\/MMUL.2012.26"},{"key":"16","doi-asserted-by":"crossref","unstructured":"[16] A. Dhall, R. Goecke, S. Lucey, and T. Gedeon, \u201cStatic facial expression analysis in tough conditions: Data, evaluation protocol and benchmark,\u201d 2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops), pp.2106-2112, 2011. 10.1109\/iccvw.2011.6130508","DOI":"10.1109\/ICCVW.2011.6130508"},{"key":"17","doi-asserted-by":"crossref","unstructured":"[17] M. Liu, S. Li, S. Shan, R. Wang, and X. Chen, \u201cDeeply learning deformable facial action parts model for dynamic expression analysis,\u201d Computer Vision-ACCV 2014, Lecture Notes in Computer Science, vol.9006, pp.143-157, Springer International Publishing, Cham, 2015. 10.1007\/978-3-319-16817-3_10","DOI":"10.1007\/978-3-319-16817-3_10"},{"key":"18","doi-asserted-by":"publisher","unstructured":"[18] Z. Yu, G. Liu, Q. Liu, and J. Deng, \u201cSpatio-temporal convolutional features with nested LSTM for facial expression recognition,\u201d Neurocomputing, vol.317, pp.50-57, 2018. 10.1016\/j.neucom.2018.07.028","DOI":"10.1016\/j.neucom.2018.07.028"}],"container-title":["IEICE Transactions on Information and Systems"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/www.jstage.jst.go.jp\/article\/transinf\/E103.D\/11\/E103.D_2020EDL8065\/_pdf","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2020,11,7]],"date-time":"2020-11-07T03:25:59Z","timestamp":1604719559000},"score":1,"resource":{"primary":{"URL":"https:\/\/www.jstage.jst.go.jp\/article\/transinf\/E103.D\/11\/E103.D_2020EDL8065\/_article"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2020,11,1]]},"references-count":18,"journal-issue":{"issue":"11","published-print":{"date-parts":[[2020]]}},"URL":"https:\/\/doi.org\/10.1587\/transinf.2020edl8065","relation":{},"ISSN":["0916-8532","1745-1361"],"issn-type":[{"value":"0916-8532","type":"print"},{"value":"1745-1361","type":"electronic"}],"subject":[],"published":{"date-parts":[[2020,11,1]]}}}