{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,13]],"date-time":"2026-03-13T14:39:57Z","timestamp":1773412797581,"version":"3.50.1"},"publisher-location":"New York, New York, USA","reference-count":15,"publisher":"ACM Press","license":[{"start":{"date-parts":[[2016,1,1]],"date-time":"2016-01-01T00:00:00Z","timestamp":1451606400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.acm.org\/publications\/policies\/copyright_policy#Background"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2016]]},"DOI":"10.1145\/2883851.2883902","type":"proceedings-article","created":{"date-parts":[[2016,4,22]],"date-time":"2016-04-22T13:54:08Z","timestamp":1461333248000},"page":"417-421","source":"Crossref","is-referenced-by-count":24,"title":["A gaze-based learning analytics model"],"prefix":"10.1145","author":[{"given":"Kshitij","family":"Sharma","sequence":"first","affiliation":[{"name":"EPFL, Lausanne, Switzerland"}]},{"given":"Hamed S.","family":"Alavi","sequence":"additional","affiliation":[{"name":"EPFL, Lausanne, Switzerland"}]},{"given":"Patrick","family":"Jermann","sequence":"additional","affiliation":[{"name":"EPFL, Lausanne, Switzerland"}]},{"given":"Pierre","family":"Dillenbourg","sequence":"additional","affiliation":[{"name":"EPFL, Lausanne, Switzerland"}]}],"member":"320","reference":[{"key":"key-10.1145\/2883851.2883902-1","doi-asserted-by":"crossref","unstructured":"P. Allopenna, J. Magnuson, and M. Tanenhaus. Tracking the time course of spoken word recognition using eye movements: Evidence for continuous mapping models* 1,* 2,* 3,* 4,* 5.Journal of memory and language, 38(4), 1998.","DOI":"10.1006\/jmla.1997.2558"},{"key":"key-10.1145\/2883851.2883902-2","doi-asserted-by":"crossref","unstructured":"M. A. Chatti, A. L. Dyckhoff, U. Schroeder, and H. Th&#252;s. A reference model for learning analytics.International Journal of Technology Enhanced Learning, 4(5-6):318--331, 2012.","DOI":"10.1504\/IJTEL.2012.051815"},{"key":"key-10.1145\/2883851.2883902-3","doi-asserted-by":"crossref","unstructured":"D. Clow. The learning analytics cycle: closing the loop effectively. InProceedings of the 2nd international conference on learning analytics and knowledge, pages 134--138. ACM, 2012.","DOI":"10.1145\/2330601.2330636"},{"key":"key-10.1145\/2883851.2883902-4","doi-asserted-by":"crossref","unstructured":"R. D. D. C. Richardson and N. Kirkham. The art of conversation is coordination.Psychological Science, 18(5):407--413, 2007.","DOI":"10.1111\/j.1467-9280.2007.01914.x"},{"key":"key-10.1145\/2883851.2883902-5","unstructured":"S. D'Mello, A. Olney, C. Williams, and P. Hays. Gaze tutor: A gaze-reactive intelligent tutoring system.International Journal of human-computer studies, 70(5):377--398, 2012."},{"key":"key-10.1145\/2883851.2883902-6","doi-asserted-by":"crossref","unstructured":"D. Gergle and A. T. Clark. See what i'm saying? using dyadic mobile eye tracking to study collaborative reference. InIn Proceedings of the ACM 2011 conference on Computer supported cooperative work (pp. 435--444). ACM., 2011.","DOI":"10.1145\/1958824.1958892"},{"key":"key-10.1145\/2883851.2883902-7","doi-asserted-by":"crossref","unstructured":"Z. Griffin and K. Bock. What the eyes say about speaking.Psychological science, 11(4), 2000.","DOI":"10.1111\/1467-9280.00255"},{"key":"key-10.1145\/2883851.2883902-8","doi-asserted-by":"crossref","unstructured":"N. Jaques, C. Conati, J. M. Harley, and R. Azevedo. Predicting affect from gaze data during interaction with an intelligent tutoring system. InIntelligent Tutoring Systems, pages 29--38. Springer, 2014.","DOI":"10.1007\/978-3-319-07221-0_4"},{"key":"key-10.1145\/2883851.2883902-9","unstructured":"P. Jermann. Computer support for interaction regulation in collaborative problem-solving.Unpublished Ph. D. thesis, University of Geneva, Switzerland, 2004."},{"key":"key-10.1145\/2883851.2883902-10","doi-asserted-by":"crossref","unstructured":"P. Jermann and M.-A. Nussli. Effects of sharing text selections on gaze cross-recurrence and interaction quality in a pair programming task. InIn Proceedings of Computer Supported Collaborative Work 2012, 2012.","DOI":"10.1145\/2145204.2145371"},{"key":"key-10.1145\/2883851.2883902-11","unstructured":"A. S. Meyer, A. M. Sleiderink, and W. J. Levelt. Viewing and naming objects: Eye movements during noun phrase production.Cognition, 66(2):B25--B33, 1998."},{"key":"key-10.1145\/2883851.2883902-12","doi-asserted-by":"crossref","unstructured":"A. Oh, H. Fox, M. Van Kleek, A. Adler, K. Gajos, L.-P. Morency, and T. Darrell. Evaluating look-to-talk: a gaze-aware interface in a collaborative environment. InCHI'02 Extended Abstracts on Human Factors in Computing Systems, pages 650--651. ACM, 2002.","DOI":"10.1145\/506443.506528"},{"key":"key-10.1145\/2883851.2883902-13","unstructured":"K. Sharma.Gaze analysis methods for learning analytics. PhD thesis, Ecole Polytechnique Federale de Lausanne, 2015."},{"key":"key-10.1145\/2883851.2883902-14","doi-asserted-by":"crossref","unstructured":"K.-H. Tan, I. Robinson, R. Samadani, B. Lee, D. Gelb, A. Vorbau, B. Culbertson, and J. Apostolopoulos. Connectboard: A remote collaboration system that supports gaze-aware interaction and sharing. InMultimedia Signal Processing, 2009. MMSP'09. IEEE International Workshop on, pages 1--6. IEEE, 2009.","DOI":"10.1109\/MMSP.2009.5293268"},{"key":"key-10.1145\/2883851.2883902-15","doi-asserted-by":"crossref","unstructured":"H. Wang, M. Chignell, and M. Ishizuka. Empathic tutoring software agents using real-time eye tracking. InProceedings of the 2006 symposium on Eye tracking research &#38; applications, pages 73--78. ACM, 2006.","DOI":"10.1145\/1117309.1117346"}],"event":{"name":"the Sixth International Conference","location":"Edinburgh, United Kingdom","acronym":"LAK '16","number":"6","start":{"date-parts":[[2016,4,25]]},"end":{"date-parts":[[2016,4,29]]}},"container-title":["Proceedings of the Sixth International Conference on Learning Analytics &amp; Knowledge - LAK '16"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/2883851.2883902","content-type":"unspecified","content-version":"vor","intended-application":"text-mining"},{"URL":"http:\/\/dl.acm.org\/ft_gateway.cfm?id=2883902&amp;ftid=1708840&amp;dwn=1","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,6,18]],"date-time":"2025-06-18T04:54:07Z","timestamp":1750222447000},"score":1,"resource":{"primary":{"URL":"http:\/\/dl.acm.org\/citation.cfm?doid=2883851.2883902"}},"subtitle":["in-video visual feedback to improve learner's attention in MOOCs"],"proceedings-subject":"Learning Analytics & Knowledge","short-title":[],"issued":{"date-parts":[[2016]]},"references-count":15,"URL":"https:\/\/doi.org\/10.1145\/2883851.2883902","relation":{},"subject":[],"published":{"date-parts":[[2016]]}}}