{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,6,18]],"date-time":"2025-06-18T04:16:31Z","timestamp":1750220191973,"version":"3.41.0"},"publisher-location":"New York, NY, USA","reference-count":29,"publisher":"ACM","license":[{"start":{"date-parts":[[2022,9,23]],"date-time":"2022-09-23T00:00:00Z","timestamp":1663891200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.acm.org\/publications\/policies\/copyright_policy#Background"}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2022,9,23]]},"DOI":"10.1145\/3573942.3574080","type":"proceedings-article","created":{"date-parts":[[2023,5,16]],"date-time":"2023-05-16T23:45:42Z","timestamp":1684280742000},"page":"689-694","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":0,"title":["Patch Attention Network for Video Facial Expression Recognition"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-9552-8873","authenticated-orcid":false,"given":"Yingkai","family":"Hua","sequence":"first","affiliation":[{"name":"Zhejiang University, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-0910-2375","authenticated-orcid":false,"given":"Xinmin","family":"Xu","sequence":"additional","affiliation":[{"name":"Zhejiang University, China"}]}],"member":"320","published-online":{"date-parts":[[2023,5,16]]},"reference":[{"key":"e_1_3_2_1_1_1","doi-asserted-by":"crossref","unstructured":"A. Mehrabian. 2017. Communication Without Words. communication theory.","DOI":"10.4324\/9781315080918-15"},{"key":"e_1_3_2_1_2_1","volume-title":"Deep facial expression recognition: A survey","author":"Li Shan","year":"2020","unstructured":"Shan Li and Weihong Deng. 2020. Deep facial expression recognition: A survey. IEEE T. Affect. Comput. (2020)."},{"key":"e_1_3_2_1_3_1","volume-title":"Face recognition by using back propagation artificial neural network and windowing method. Journal of Image and Graphics","author":"Korkmaz Mehmet","year":"2016","unstructured":"Mehmet Korkmaz and Nihat Yilmaz. 2016. Face recognition by using back propagation artificial neural network and windowing method. Journal of Image and Graphics (2016), 15-19."},{"key":"e_1_3_2_1_4_1","volume-title":"Staudemeyer and Eric Rothstein Morris","author":"Ralf","year":"2019","unstructured":"Ralf C. Staudemeyer and Eric Rothstein Morris. 2019. Understanding LSTM\u2013a tutorial into long short-term memory recurrent neural networks. arXiv preprint arXiv:1909.09586 (2019)."},{"key":"e_1_3_2_1_5_1","doi-asserted-by":"crossref","unstructured":"D. Tran L. Bourdev R. Fergus L. Torresani and M. Paluri. 2014. Learning Spatiotemporal Features with 3D Convolutional Networks (2014).","DOI":"10.1109\/ICCV.2015.510"},{"key":"e_1_3_2_1_6_1","volume-title":"Softmax regression based deep sparse autoencoder network for facial emotion recognition in human-robot interaction. Inform. Sciences","author":"Chen Luefeng","year":"2018","unstructured":"Luefeng Chen, Mengtian Zhou, Wanjuan Su, Min Wu, Jinhua She, and Kaoru Hirota. 2018. Softmax regression based deep sparse autoencoder network for facial emotion recognition in human-robot interaction. Inform. Sciences (2018), 49-61."},{"key":"e_1_3_2_1_7_1","volume-title":"Ali and David MW Powers","author":"Humayra","year":"2014","unstructured":"Humayra B. Ali and David MW Powers. 2014. Fusion based fastica method: Facial expression recognition. Journal of Image and Graphics (2014), 1-7."},{"key":"e_1_3_2_1_8_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICEIEC49280.2020.9152361"},{"key":"e_1_3_2_1_9_1","doi-asserted-by":"crossref","unstructured":"Huiyuan Yang Umur Ciftci and Lijun Yin Facial expression recognition by de-expression residue learning. 2018.","DOI":"10.1109\/CVPR.2018.00231"},{"key":"e_1_3_2_1_10_1","doi-asserted-by":"crossref","unstructured":"Kai Wang Xiaojiang Peng Jianfei Yang Shijian Lu and Yu Qiao Suppressing uncertainties for large-scale facial expression recognition. 2020.","DOI":"10.1109\/CVPR42600.2020.00693"},{"key":"e_1_3_2_1_11_1","doi-asserted-by":"crossref","unstructured":"J. Chen Z. Chen Z. Chi and H. Fu. 2018. Facial Expression Recognition in Video with Multiple Feature Fusion. IEEE T. Affect. Comput. (2018) 1.","DOI":"10.1109\/TAFFC.2016.2593719"},{"key":"e_1_3_2_1_12_1","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2017.2737821"},{"key":"e_1_3_2_1_13_1","doi-asserted-by":"publisher","DOI":"10.1109\/CCIS.2018.8691380"},{"key":"e_1_3_2_1_14_1","volume-title":"Spatio-temporal convolutional features with nested LSTM for facial expression recognition. Neurocomputing","author":"Yu Zhenbo","year":"2018","unstructured":"Zhenbo Yu, Guangcan Liu, Qingshan Liu, and Jiankang Deng. 2018. Spatio-temporal convolutional features with nested LSTM for facial expression recognition. Neurocomputing (2018), 50-57."},{"key":"e_1_3_2_1_15_1","volume-title":"Learning deep facial expression features from image and optical flow sequences using 3D CNN. The Visual Computer","author":"Zhao Jianfeng","year":"2018","unstructured":"Jianfeng Zhao, Xia Mao, and Jian Zhang. 2018. Learning deep facial expression features from image and optical flow sequences using 3D CNN. The Visual Computer (2018), 1461-1475."},{"key":"e_1_3_2_1_16_1","unstructured":"Kaiming He Xiangyu Zhang Shaoqing Ren and Jian Sun Deep residual learning for image recognition. 2016."},{"key":"e_1_3_2_1_17_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICIP.2019.8803603"},{"key":"e_1_3_2_1_18_1","doi-asserted-by":"crossref","unstructured":"Yu Wang XinMin Xu and Yao Zhuang Learning Dynamics for Video Facial Expression Recognition. 2021.","DOI":"10.1145\/3508546.3508581"},{"key":"e_1_3_2_1_19_1","volume-title":"IEEE","author":"Li Yong","year":"2018","unstructured":"Yong Li, Jiabei Zeng, Shiguang Shan, and Xilin Chen Patch-gated CNN for occlusion-aware facial expression recognition. IEEE, 2018."},{"key":"e_1_3_2_1_20_1","volume-title":"Occlusion aware facial expression recognition using CNN with attention mechanism","author":"Li Yong","year":"2018","unstructured":"Yong Li, Jiabei Zeng, Shiguang Shan, and Xilin Chen. 2018. Occlusion aware facial expression recognition using CNN with attention mechanism. IEEE T. Image Process. (2018), 2439-2450."},{"key":"e_1_3_2_1_21_1","volume-title":"Attention mechanism-based CNN for facial expression recognition. Neurocomputing","author":"Li Jing","year":"2020","unstructured":"Jing Li, Kan Jin, Dalin Zhou, Naoyuki Kubota, and Zhaojie Ju. 2020. Attention mechanism-based CNN for facial expression recognition. Neurocomputing (2020), 340-350."},{"key":"e_1_3_2_1_22_1","volume-title":"Deep-emotion: Facial expression recognition using attentional convolutional network. Sensors-Basel","author":"Minaee Shervin","year":"2021","unstructured":"Shervin Minaee, Mehdi Minaei, and Amirali Abdolrashidi. 2021. Deep-emotion: Facial expression recognition using attentional convolutional network. Sensors-Basel (2021), 3046."},{"key":"e_1_3_2_1_23_1","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2019.2956143"},{"key":"e_1_3_2_1_24_1","first-page":"1499","volume-title":"Joint Face Detection and Alignment Using Multitask Cascaded Convolutional Networks. IEEE Signal Proc. Let. (2016","author":"Zhang K.","year":"2016","unstructured":"K. Zhang, Z. Zhang, Z. Li, and Y. Qiao. 2016. Joint Face Detection and Alignment Using Multitask Cascaded Convolutional Networks. IEEE Signal Proc. Let. (2016), 1499-1503. DOI 10.1109\/LSP.2016.2603342"},{"key":"e_1_3_2_1_25_1","volume-title":"Openface: A general-purpose face recognition library with mobile applications. CMU School of Computer Science","author":"Amos Brandon","year":"2016","unstructured":"Brandon Amos, Bartosz Ludwiczuk, and Mahadev Satyanarayanan. 2016. Openface: A general-purpose face recognition library with mobile applications. CMU School of Computer Science (2016), 20."},{"key":"e_1_3_2_1_26_1","volume-title":"A complete dataset for action unit and emotion-specified expression","author":"Lucey Patrick","year":"2010","unstructured":"Patrick Lucey, Jeffrey F. Cohn, Takeo Kanade, Jason Saragih, Zara Ambadar, and Iain Matthews The extended cohn-kanade dataset (ck+): A complete dataset for action unit and emotion-specified expression. IEEE, 2010."},{"key":"e_1_3_2_1_27_1","volume-title":"Audio-video, student engagement and group-level affect prediction","author":"Dhall Abhinav","year":"2018","unstructured":"Abhinav Dhall, Amanjot Kaur, Roland Goecke, and Tom Gedeon Emotiw 2018: Audio-video, student engagement and group-level affect prediction., 2018."},{"key":"e_1_3_2_1_28_1","doi-asserted-by":"crossref","unstructured":"Heechul Jung Sihaeng Lee Junho Yim Sunjeong Park and Junmo Kim Joint fine-tuning in deep neural networks for facial expression recognition. 2015.","DOI":"10.1109\/ICCV.2015.341"},{"key":"e_1_3_2_1_29_1","unstructured":"Cheng Lu Wenming Zheng Chaolong Li Chuangao Tang Suyuan Liu Simeng Yan and Yuan Zong Multiple spatio-temporal feature learning for video-based emotion recognition in the wild. 2018."}],"event":{"name":"AIPR 2022: 2022 5th International Conference on Artificial Intelligence and Pattern Recognition","acronym":"AIPR 2022","location":"Xiamen China"},"container-title":["Proceedings of the 2022 5th International Conference on Artificial Intelligence and Pattern Recognition"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3573942.3574080","content-type":"unspecified","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3573942.3574080","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,6,17]],"date-time":"2025-06-17T19:02:32Z","timestamp":1750186952000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3573942.3574080"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022,9,23]]},"references-count":29,"alternative-id":["10.1145\/3573942.3574080","10.1145\/3573942"],"URL":"https:\/\/doi.org\/10.1145\/3573942.3574080","relation":{},"subject":[],"published":{"date-parts":[[2022,9,23]]},"assertion":[{"value":"2023-05-16","order":2,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}