{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,6,18]],"date-time":"2025-06-18T04:17:42Z","timestamp":1750220262704,"version":"3.41.0"},"publisher-location":"New York, NY, USA","reference-count":24,"publisher":"ACM","license":[{"start":{"date-parts":[[2022,11,7]],"date-time":"2022-11-07T00:00:00Z","timestamp":1667779200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.acm.org\/publications\/policies\/copyright_policy#Background"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2022,11,7]]},"DOI":"10.1145\/3536220.3558007","type":"proceedings-article","created":{"date-parts":[[2022,11,4]],"date-time":"2022-11-04T22:11:40Z","timestamp":1667599900000},"page":"98-104","source":"Crossref","is-referenced-by-count":0,"title":["Predicting User Confidence in Video Recordings with Spatio-Temporal Multimodal Analytics"],"prefix":"10.1145","author":[{"given":"Andrew","family":"Emerson","sequence":"first","affiliation":[{"name":"Educational Testing Service, United States"}]},{"given":"Patrick","family":"Houghton","sequence":"additional","affiliation":[{"name":"Educational Testing Service, United States"}]},{"given":"Ke","family":"Chen","sequence":"additional","affiliation":[{"name":"Educational Testing Service, United States"}]},{"given":"Vinay","family":"Basheerabad","sequence":"additional","affiliation":[{"name":"Educational Testing Service, United States"}]},{"given":"Rutuja","family":"Ubale","sequence":"additional","affiliation":[{"name":"Educational Testing Service, United States"}]},{"given":"Chee Wee","family":"Leong","sequence":"additional","affiliation":[{"name":"Educational Testing Service, United States"}]}],"member":"320","published-online":{"date-parts":[[2022,11,7]]},"reference":[{"key":"#cr-split#-e_1_3_2_1_1_1.1","unstructured":"Artsiom Ablavatski Andrey Vakunov Ivan Grishchenko Karthik Raveendran and Matsvei Zhdanovich. 2020. Real-time Pupil Tracking from Monocular Video for Digital Puppetry. https:\/\/doi.org\/10.48550\/ARXIV.2006.11341 10.48550\/ARXIV.2006.11341"},{"key":"#cr-split#-e_1_3_2_1_1_1.2","unstructured":"Artsiom Ablavatski Andrey Vakunov Ivan Grishchenko Karthik Raveendran and Matsvei Zhdanovich. 2020. Real-time Pupil Tracking from Monocular Video for Digital Puppetry. https:\/\/doi.org\/10.48550\/ARXIV.2006.11341"},{"key":"e_1_3_2_1_2_1","unstructured":"M. Argyle. 1972. Non-verbal communication in human social interaction.Non-Verbal Communication(1972).  M. Argyle. 1972. Non-verbal communication in human social interaction.Non-Verbal Communication(1972)."},{"key":"e_1_3_2_1_3_1","volume-title":"Disfluent Responses to Job Interview Questions and What They Entail. Discourse Processes 53 (02","author":"Brosy Julie","year":"2016","unstructured":"Julie Brosy , Adrian Bangerter , and Eric Mayor . 2016. Disfluent Responses to Job Interview Questions and What They Entail. Discourse Processes 53 (02 2016 ). https:\/\/doi.org\/10.1080\/0163853X.2016.1150769 10.1080\/0163853X.2016.1150769 Julie Brosy, Adrian Bangerter, and Eric Mayor. 2016. Disfluent Responses to Job Interview Questions and What They Entail. Discourse Processes 53 (02 2016). https:\/\/doi.org\/10.1080\/0163853X.2016.1150769"},{"key":"e_1_3_2_1_4_1","volume-title":"Implicit effects of speaker accents and vocally-expressed confidence on decisions to trust.Decision 7 (10","author":"Caballero Jonathan","year":"2020","unstructured":"Jonathan Caballero and Marc Pell . 2020. Implicit effects of speaker accents and vocally-expressed confidence on decisions to trust.Decision 7 (10 2020 ). https:\/\/doi.org\/10.1037\/dec0000140 10.1037\/dec0000140 Jonathan Caballero and Marc Pell. 2020. Implicit effects of speaker accents and vocally-expressed confidence on decisions to trust.Decision 7 (10 2020). https:\/\/doi.org\/10.1037\/dec0000140"},{"key":"e_1_3_2_1_5_1","doi-asserted-by":"publisher","DOI":"10.1109\/ACII.2017.8273646"},{"key":"e_1_3_2_1_6_1","doi-asserted-by":"crossref","unstructured":"Timothy Degroot and \u00c6\u00a0Janaki Gooty. 2009. Can Nonverbal Cues be Used to Make Meaningful Personality Attributions in Employment Interviews. Journal of Business and Psychology(2009) 179\u2013192.  Timothy Degroot and \u00c6\u00a0Janaki Gooty. 2009. Can Nonverbal Cues be Used to Make Meaningful Personality Attributions in Employment Interviews. Journal of Business and Psychology(2009) 179\u2013192.","DOI":"10.1007\/s10869-009-9098-0"},{"key":"e_1_3_2_1_7_1","volume-title":"Early Prediction of Visitor Engagement in Science Museums with Multimodal Learning Analytics","author":"Emerson Andrew","year":"1889","unstructured":"Andrew Emerson , Nathan Henderson , Jonathan Rowe , Wookhee Min , Seung Lee , James Minogue , and James Lester . 2020. Early Prediction of Visitor Engagement in Science Museums with Multimodal Learning Analytics . Association for Computing Machinery , New York, NY, USA , 107\u2013116. https:\/\/doi.org\/10.1145\/3382507.34 1889 0 10.1145\/3382507.3418890 Andrew Emerson, Nathan Henderson, Jonathan Rowe, Wookhee Min, Seung Lee, James Minogue, and James Lester. 2020. Early Prediction of Visitor Engagement in Science Museums with Multimodal Learning Analytics. Association for Computing Machinery, New York, NY, USA, 107\u2013116. https:\/\/doi.org\/10.1145\/3382507.3418890"},{"key":"e_1_3_2_1_8_1","doi-asserted-by":"publisher","DOI":"10.1016\/j.pneurobio.2020.101948"},{"key":"e_1_3_2_1_9_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPRW.2017.214"},{"key":"e_1_3_2_1_10_1","volume-title":"Attention Mesh: High-fidelity Face Mesh Prediction in Real-time. https:\/\/doi.org\/10.48550\/ARXIV.2006.10962","author":"Grishchenko Ivan","year":"2020","unstructured":"Ivan Grishchenko , Artsiom Ablavatski , Yury Kartynnik , Karthik Raveendran , and Matthias Grundmann . 2020 . Attention Mesh: High-fidelity Face Mesh Prediction in Real-time. https:\/\/doi.org\/10.48550\/ARXIV.2006.10962 10.48550\/ARXIV.2006.10962 Ivan Grishchenko, Artsiom Ablavatski, Yury Kartynnik, Karthik Raveendran, and Matthias Grundmann. 2020. Attention Mesh: High-fidelity Face Mesh Prediction in Real-time. https:\/\/doi.org\/10.48550\/ARXIV.2006.10962"},{"key":"e_1_3_2_1_11_1","doi-asserted-by":"publisher","DOI":"10.1109\/TAFFC.2017.2751469"},{"key":"e_1_3_2_1_12_1","doi-asserted-by":"publisher","DOI":"10.1145\/3079628.3079669"},{"key":"e_1_3_2_1_13_1","doi-asserted-by":"publisher","DOI":"10.1109\/TAFFC.2019.2930058"},{"key":"e_1_3_2_1_14_1","doi-asserted-by":"publisher","DOI":"10.1177\/1747021819865833"},{"key":"e_1_3_2_1_15_1","doi-asserted-by":"publisher","DOI":"10.1016\/j.cortex.2015.02.002"},{"key":"e_1_3_2_1_16_1","doi-asserted-by":"publisher","DOI":"10.1145\/3462244.3481305"},{"key":"e_1_3_2_1_17_1","doi-asserted-by":"publisher","DOI":"10.1145\/3351529.3360653"},{"key":"e_1_3_2_1_18_1","unstructured":"Chee\u00a0Wee Leong Katrina Roohr Vikram Ramanarayanan Michelle\u00a0P Martin-Raugh Harrison Kell Rutuja Ubale Yao Qian Zydrune Mladineo and Laura McCulla. 2019. To trust or not to trust? A study of human bias in automated video interview assessments. arXiv preprint arXiv:1911.13248(2019).  Chee\u00a0Wee Leong Katrina Roohr Vikram Ramanarayanan Michelle\u00a0P Martin-Raugh Harrison Kell Rutuja Ubale Yao Qian Zydrune Mladineo and Laura McCulla. 2019. To trust or not to trust? A study of human bias in automated video interview assessments. arXiv preprint arXiv:1911.13248(2019)."},{"key":"e_1_3_2_1_19_1","volume-title":"Effect of Modality on Human and Machine Scoring of Presentation Videos","author":"Lepp Haley","year":"1888","unstructured":"Haley Lepp , Chee\u00a0Wee Leong , Katrina Roohr , Michelle Martin-Raugh , and Vikram Ramanarayanan . 2020. Effect of Modality on Human and Machine Scoring of Presentation Videos . Association for Computing Machinery , New York, NY, USA , 630\u2013634. https:\/\/doi.org\/10.1145\/3382507.34 1888 0 10.1145\/3382507.3418880 Haley Lepp, Chee\u00a0Wee Leong, Katrina Roohr, Michelle Martin-Raugh, and Vikram Ramanarayanan. 2020. Effect of Modality on Human and Machine Scoring of Presentation Videos. Association for Computing Machinery, New York, NY, USA, 630\u2013634. https:\/\/doi.org\/10.1145\/3382507.3418880"},{"key":"e_1_3_2_1_20_1","volume-title":"The Look of (Un)confidence: Visual Markers for Inferring Speaker Confidence in Speech. Frontiers in Communication 4","author":"Mori Yondu","year":"2019","unstructured":"Yondu Mori and Marc\u00a0 D. Pell . 2019. The Look of (Un)confidence: Visual Markers for Inferring Speaker Confidence in Speech. Frontiers in Communication 4 ( 2019 ). https:\/\/doi.org\/10.3389\/fcomm.2019.00063 10.3389\/fcomm.2019.00063 Yondu Mori and Marc\u00a0D. Pell. 2019. The Look of (Un)confidence: Visual Markers for Inferring Speaker Confidence in Speech. Frontiers in Communication 4 (2019). https:\/\/doi.org\/10.3389\/fcomm.2019.00063"},{"key":"e_1_3_2_1_21_1","doi-asserted-by":"publisher","DOI":"10.1145\/3242969.3243027"},{"key":"e_1_3_2_1_22_1","doi-asserted-by":"publisher","DOI":"10.1109\/TLT.2022.3171601"},{"key":"e_1_3_2_1_23_1","doi-asserted-by":"publisher","DOI":"10.1109\/TAFFC.2015.2513401"}],"event":{"name":"ICMI '22: INTERNATIONAL CONFERENCE ON MULTIMODAL INTERACTION","sponsor":["SIGCHI ACM Special Interest Group on Computer-Human Interaction"],"location":"Bengaluru India","acronym":"ICMI '22"},"container-title":["INTERNATIONAL CONFERENCE ON MULTIMODAL INTERACTION"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3536220.3558007","content-type":"unspecified","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3536220.3558007","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,6,17]],"date-time":"2025-06-17T19:30:54Z","timestamp":1750188654000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3536220.3558007"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022,11,7]]},"references-count":24,"alternative-id":["10.1145\/3536220.3558007","10.1145\/3536220"],"URL":"https:\/\/doi.org\/10.1145\/3536220.3558007","relation":{},"subject":[],"published":{"date-parts":[[2022,11,7]]}}}