{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,3]],"date-time":"2026-03-03T16:01:42Z","timestamp":1772553702879,"version":"3.50.1"},"reference-count":29,"publisher":"IEEE","license":[{"start":{"date-parts":[[2024,7,15]],"date-time":"2024-07-15T00:00:00Z","timestamp":1721001600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2024,7,15]],"date-time":"2024-07-15T00:00:00Z","timestamp":1721001600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2024,7,15]]},"DOI":"10.1109\/icme57554.2024.10687508","type":"proceedings-article","created":{"date-parts":[[2024,9,30]],"date-time":"2024-09-30T17:24:16Z","timestamp":1727717056000},"page":"1-6","source":"Crossref","is-referenced-by-count":36,"title":["CLIPER: A Unified Vision-Language Framework for In-the-Wild Facial Expression Recognition"],"prefix":"10.1109","author":[{"given":"Hanting","family":"Li","sequence":"first","affiliation":[{"name":"University of Science and Technology of China,School of Information Science and Technology,Hefei,China"}]},{"given":"Hongjing","family":"Niu","sequence":"additional","affiliation":[{"name":"University of Science and Technology of China,School of Information Science and Technology,Hefei,China"}]},{"given":"Zhaoqing","family":"Zhu","sequence":"additional","affiliation":[{"name":"Alibaba Group,Alibaba DAMO Academy for Discovery,Hangzhou,China"}]},{"given":"Feng","family":"Zhao","sequence":"additional","affiliation":[{"name":"University of Science and Technology of China,School of Information Science and Technology,Hefei,China"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00693"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v35i4.16465"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/TAFFC.2021.3122146"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00358"},{"key":"ref5","first-page":"8748","article-title":"Learning transferable visual models from natural language supervision","volume-title":"Proc. Int. Conf. Mach. Learn","author":"Radford"},{"key":"ref6","article-title":"Mvt: mask vision transformer for facial expression recognition in the wild","author":"Li","year":"2021"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1145\/3474085.3475292"},{"key":"ref8","article-title":"Spatio-temporal transformer for dynamic facial expression recognition in the wild","author":"Ma","year":"2022"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1145\/3503161.3547865"},{"key":"ref10","first-page":"4904","article-title":"Scaling up visual and vision-language representation learning with noisy text supervision","volume-title":"Proc. Int. Conf. Mach. Learn","author":"Jia"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1016\/j.neucom.2022.07.028"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00209"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01760"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.23915\/distill.00030"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1007\/s11263-022-01653-1"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01631"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.277"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/TAFFC.2017.2740923"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1145\/3394171.3413620"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/MMUL.2012.26"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v37i1.25077"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.02025"},{"key":"ref23","article-title":"Fixing weight decay regularization in adam","author":"Loshchilov","year":"2018"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2019.2956143"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2021.3049955"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01965"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.502"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00685"},{"key":"ref29","article-title":"Nr-dfernet: Noise-robust network for dynamic facial expression recognition","author":"Li","year":"2022"}],"event":{"name":"2024 IEEE International Conference on Multimedia and Expo (ICME)","location":"Niagara Falls, ON, Canada","start":{"date-parts":[[2024,7,15]]},"end":{"date-parts":[[2024,7,19]]}},"container-title":["2024 IEEE International Conference on Multimedia and Expo (ICME)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/10685847\/10687354\/10687508.pdf?arnumber=10687508","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,10,1]],"date-time":"2024-10-01T06:36:25Z","timestamp":1727764585000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10687508\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,7,15]]},"references-count":29,"URL":"https:\/\/doi.org\/10.1109\/icme57554.2024.10687508","relation":{},"subject":[],"published":{"date-parts":[[2024,7,15]]}}}