{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,5]],"date-time":"2026-03-05T05:49:00Z","timestamp":1772689740481,"version":"3.50.1"},"reference-count":39,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","license":[{"start":{"date-parts":[[2021,1,1]],"date-time":"2021-01-01T00:00:00Z","timestamp":1609459200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by\/4.0\/legalcode"}],"funder":[{"DOI":"10.13039\/501100003725","name":"National Research Foundation of Korea (NRF) grant","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100003725","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100014188","name":"Korea Government [Ministry of Science and ICT (MSIT)]","doi-asserted-by":"publisher","award":["NRF-2020R1A4A1019191"],"award-info":[{"award-number":["NRF-2020R1A4A1019191"]}],"id":[{"id":"10.13039\/501100014188","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100003725","name":"Bio and Medical Technology Development Program of the National Research Foundation (NRF) grant","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100003725","id-type":"DOI","asserted-by":"publisher"}]},{"name":"Korean Government","award":["NRF-2019M3E5D1A02067961"],"award-info":[{"award-number":["NRF-2019M3E5D1A02067961"]}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Access"],"published-print":{"date-parts":[[2021]]},"DOI":"10.1109\/access.2021.3091169","type":"journal-article","created":{"date-parts":[[2021,6,21]],"date-time":"2021-06-21T20:05:51Z","timestamp":1624305951000},"page":"90465-90474","source":"Crossref","is-referenced-by-count":37,"title":["Context-Aware Emotion Recognition Based on Visual Relationship Detection"],"prefix":"10.1109","volume":"9","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-6362-5791","authenticated-orcid":false,"given":"Manh-Hung","family":"Hoang","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-3575-5035","authenticated-orcid":false,"given":"Soo-Hyung","family":"Kim","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-3024-5060","authenticated-orcid":false,"given":"Hyung-Jeong","family":"Yang","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-8756-1382","authenticated-orcid":false,"given":"Guee-Sang","family":"Lee","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1109\/TAFFC.2021.3064918"},{"key":"ref38","first-page":"1","article-title":"A graph convolutional network for emotion recognition in context","author":"zeng","year":"2020","journal-title":"Proc Cross Strait Radio Sci Wireless Technol Conf (CSRSWTC)"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.3115\/v1\/D14-1162"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.90"},{"key":"ref31","article-title":"Very deep convolutional networks for large-scale image recognition","author":"simonyan","year":"2014","journal-title":"arXiv 1409 1556"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1109\/LSP.2016.2603342"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.106"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.544"},{"key":"ref35","first-page":"740","article-title":"Microsoft coco: Common objects in context","author":"lin","year":"2014","journal-title":"Proc Eur Conf Comput Vis"},{"key":"ref34","first-page":"3","article-title":"CBAM: Convolutional block attention module","author":"woo","year":"2018","journal-title":"Proc Eur Conf Comput Vis (ECCV)"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1016\/j.imavis.2017.01.012"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-642-42051-1_16"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2012.2186121"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/FG.2017.23"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/LSP.2018.2860246"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2019.8682283"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/FG.2017.137"},{"key":"ref17","article-title":"Multi-task, multi-label and multi-domain learning with residual convolutional networks for emotion recognition","author":"pons","year":"2018","journal-title":"arXiv 1802 06664"},{"key":"ref18","first-page":"14234","article-title":"EmotiCon: Context-aware multimodal emotion recognition using Frege&#x2019;s principle","author":"mittal","year":"2020","journal-title":"Proc IEEE\/CVF Conf Comput Vis Pattern Recognit (CVPR)"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.121"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00370"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1126\/science.1224313"},{"key":"ref27","article-title":"DIRV: Dense interaction region voting for end-to-end human-object interaction detection","author":"fang","year":"2020","journal-title":"arXiv 2010 01005"},{"key":"ref3","first-page":"2755","article-title":"Context based emotion recognition using EMOTIC dataset","volume":"42","author":"kosti","year":"2020","journal-title":"IEEE Trans Pattern Anal Mach Intell"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.01024"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-01246-5_41"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1037\/a0024572"},{"key":"ref8","article-title":"Faster R-CNN: Towards real-time object detection with region proposal networks","author":"ren","year":"2015","journal-title":"arXiv 1506 01497"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/ICME.2019.00034"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/MIPRO.2016.7522336"},{"key":"ref9","article-title":"Convolutional neural networks pretrained on large face recognition datasets for emotion classification from video","author":"knyazev","year":"2017","journal-title":"arXiv 1711 04598"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/TAFFC.2020.2981446"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v34i07.6913"},{"key":"ref22","article-title":"DeepSentiBank: Visual sentiment concept classification with deep convolutional neural networks","author":"chen","year":"2014","journal-title":"arXiv 1410 8586"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2020.2992222"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58610-2_41"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/ICCVW.2017.45"},{"key":"ref26","article-title":"Reformulating HOI detection as adaptive set prediction","author":"chen","year":"2021","journal-title":"arXiv 2103 05983"},{"key":"ref25","article-title":"Spatially conditioned graphs for detecting human-object interactions","author":"zhang","year":"2020","journal-title":"arXiv 2012 06060"}],"container-title":["IEEE Access"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/6287639\/9312710\/09461727.pdf?arnumber=9461727","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2021,12,17]],"date-time":"2021-12-17T19:56:41Z","timestamp":1639771001000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9461727\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2021]]},"references-count":39,"URL":"https:\/\/doi.org\/10.1109\/access.2021.3091169","relation":{},"ISSN":["2169-3536"],"issn-type":[{"value":"2169-3536","type":"electronic"}],"subject":[],"published":{"date-parts":[[2021]]}}}