{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,6,18]],"date-time":"2025-06-18T04:32:34Z","timestamp":1750221154827,"version":"3.41.0"},"publisher-location":"New York, NY, USA","reference-count":7,"publisher":"ACM","license":[{"start":{"date-parts":[[2018,6,14]],"date-time":"2018-06-14T00:00:00Z","timestamp":1528934400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.acm.org\/publications\/policies\/copyright_policy#Background"}],"funder":[{"name":"European Union","award":["644000"],"award-info":[{"award-number":["644000"]}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2018,6,14]]},"DOI":"10.1145\/3204493.3208350","type":"proceedings-article","created":{"date-parts":[[2018,6,7]],"date-time":"2018-06-07T13:57:46Z","timestamp":1528379866000},"page":"1-3","source":"Crossref","is-referenced-by-count":7,"title":["A gaze-contingent intention decoding engine for human augmentation"],"prefix":"10.1145","author":[{"given":"Pavel","family":"Orlov","sequence":"first","affiliation":[{"name":"Imperial College London"}]},{"given":"Ali","family":"Shafti","sequence":"additional","affiliation":[{"name":"Imperial College London"}]},{"given":"Chaiyawan","family":"Auepanwiriyakul","sequence":"additional","affiliation":[{"name":"Imperial College London"}]},{"given":"Noyan","family":"Songur","sequence":"additional","affiliation":[{"name":"Imperial College London"}]},{"given":"A. Aldo","family":"Faisal","sequence":"additional","affiliation":[{"name":"Imperial College London"}]}],"member":"320","published-online":{"date-parts":[[2018,6,14]]},"reference":[{"key":"e_1_3_2_2_1_1","doi-asserted-by":"publisher","DOI":"10.1167\/7.5.6"},{"key":"e_1_3_2_2_2_1","doi-asserted-by":"publisher","DOI":"10.1167\/3.1.6"},{"key":"e_1_3_2_2_3_1","unstructured":"Howard A. G. Zhu M. Chen B. Kalenichenko D. Wang W. Weyand T. Andreetto M. and Adam H. (2017). MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications.  Howard A. G. Zhu M. Chen B. Kalenichenko D. Wang W. Weyand T. Andreetto M. and Adam H. (2017). MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications."},{"key":"e_1_3_2_2_4_1","doi-asserted-by":"crossref","unstructured":"Huang J. Rathod V. Sun C. Zhu M. Korattikara A. Fathi A. Fischer I. Wojna Z. Song Y. Guadarrama S. and Murphy K. (2017). Speed\/accuracy trade-offs for modern convolutional object detectors. IEEE CVPR.  Huang J. Rathod V. Sun C. Zhu M. Korattikara A. Fathi A. Fischer I. Wojna Z. Song Y. Guadarrama S. and Murphy K. (2017). Speed\/accuracy trade-offs for modern convolutional object detectors. IEEE CVPR.","DOI":"10.1109\/CVPR.2017.351"},{"key":"e_1_3_2_2_5_1","doi-asserted-by":"publisher","DOI":"10.1068\/p2935"},{"key":"e_1_3_2_2_6_1","doi-asserted-by":"crossref","unstructured":"Liu W. Anguelov D. Erhan D. Szegedy C. Reed S. Fu C. Y. and Berg A. C. (2016). SSD: Single shot multibox detector. Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics) 9905 LNCS:21--37.  Liu W. Anguelov D. Erhan D. Szegedy C. Reed S. Fu C. Y. and Berg A. C. (2016). SSD: Single shot multibox detector. Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics) 9905 LNCS:21--37.","DOI":"10.1007\/978-3-319-46448-0_2"},{"key":"e_1_3_2_2_7_1","unstructured":"Yarbus A. L. (1965). Eye movements and vision {Rol dvijenyi glaz v procecce zrenia}. \"Nauka\" Moscow.  Yarbus A. L. (1965). Eye movements and vision {Rol dvijenyi glaz v procecce zrenia}. \"Nauka\" Moscow."}],"event":{"name":"ETRA '18: 2018 Symposium on Eye Tracking Research and Applications","sponsor":["SIGGRAPH ACM Special Interest Group on Computer Graphics and Interactive Techniques","SIGCHI ACM Special Interest Group on Computer-Human Interaction"],"location":"Warsaw Poland","acronym":"ETRA '18"},"container-title":["Proceedings of the 2018 ACM Symposium on Eye Tracking Research &amp; Applications"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3204493.3208350","content-type":"unspecified","content-version":"vor","intended-application":"text-mining"},{"URL":"http:\/\/dl.acm.org\/ft_gateway.cfm?id=3208350&ftid=1978718&dwn=1","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,6,18]],"date-time":"2025-06-18T01:08:31Z","timestamp":1750208911000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3204493.3208350"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2018,6,14]]},"references-count":7,"alternative-id":["10.1145\/3204493.3208350","10.1145\/3204493"],"URL":"https:\/\/doi.org\/10.1145\/3204493.3208350","relation":{},"subject":[],"published":{"date-parts":[[2018,6,14]]}}}