{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2024,9,6]],"date-time":"2024-09-06T06:07:44Z","timestamp":1725602864603},"reference-count":5,"publisher":"IEEE","license":[{"start":{"date-parts":[[2022,7,18]],"date-time":"2022-07-18T00:00:00Z","timestamp":1658102400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2022,7,18]],"date-time":"2022-07-18T00:00:00Z","timestamp":1658102400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2022,7,18]]},"DOI":"10.1109\/icmew56448.2022.9859289","type":"proceedings-article","created":{"date-parts":[[2022,8,23]],"date-time":"2022-08-23T19:52:02Z","timestamp":1661284322000},"page":"1-1","source":"Crossref","is-referenced-by-count":0,"title":["Demusa: Demo for Multimodal Sentiment Analysis"],"prefix":"10.1109","author":[{"given":"Soyeon","family":"Hong","sequence":"first","affiliation":[]},{"given":"Jeonghoon","family":"Kim","sequence":"additional","affiliation":[]},{"given":"Donghoon","family":"Lee","sequence":"additional","affiliation":[]},{"given":"Hyunsouk","family":"Cho","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref4","article-title":"Multimodal sentiment intensity analysis in videos: Facial gestures and verbal messages","author":"amir","year":"2016","journal-title":"IEEE Intelligent Systems"},{"key":"ref3","article-title":"Improving multimodal fusion with hierarchical mutual information maximization for multimodal sentiment analysis","author":"wei","year":"2021","journal-title":"EMNLP"},{"key":"ref5","article-title":"Multimodal language analysis in the wild: Cmu-mosei dataset and interpretable dynamic fusion graph","author":"amir","year":"2018","journal-title":"ACL"},{"key":"ref2","article-title":"Misa: Modality-invariant and-specific representations for multimodal sentiment analysis","author":"devamanyu","year":"2020","journal-title":"ACM MM"},{"key":"ref1","article-title":"Integrating multimodal information in large pretrained transformers","author":"wasifur","year":"2020","journal-title":"ACL"}],"event":{"name":"2022 IEEE International Conference on Multimedia and Expo Workshops (ICMEW)","start":{"date-parts":[[2022,7,18]]},"location":"Taipei City, Taiwan","end":{"date-parts":[[2022,7,22]]}},"container-title":["2022 IEEE International Conference on Multimedia and Expo Workshops (ICMEW)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9859264\/9859265\/09859289.pdf?arnumber=9859289","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,9,12]],"date-time":"2022-09-12T19:59:55Z","timestamp":1663012795000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9859289\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022,7,18]]},"references-count":5,"URL":"https:\/\/doi.org\/10.1109\/icmew56448.2022.9859289","relation":{},"subject":[],"published":{"date-parts":[[2022,7,18]]}}}