{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,10]],"date-time":"2026-01-10T03:07:08Z","timestamp":1768014428997,"version":"3.49.0"},"publisher-location":"New York, NY, USA","reference-count":72,"publisher":"ACM","license":[{"start":{"date-parts":[[2019,5,2]],"date-time":"2019-05-02T00:00:00Z","timestamp":1556755200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.acm.org\/publications\/policies\/copyright_policy#Background"}],"funder":[{"name":"National Institute on Disability, Independent Living, and Rehabilitation Research (NIDILRR)","award":["90REGE0008"],"award-info":[{"award-number":["90REGE0008"]}]}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2019,5,2]]},"DOI":"10.1145\/3290605.3300566","type":"proceedings-article","created":{"date-parts":[[2019,4,29]],"date-time":"2019-04-29T17:36:58Z","timestamp":1556559418000},"page":"1-12","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":43,"title":["Hands Holding Clues for Object Recognition in Teachable Machines"],"prefix":"10.1145","author":[{"given":"Kyungjun","family":"Lee","sequence":"first","affiliation":[{"name":"University of Maryland, College Park, College Park, MD, USA"}]},{"given":"Hernisa","family":"Kacorri","sequence":"additional","affiliation":[{"name":"University of Maryland, College Park, College Park, MD, USA"}]}],"member":"320","published-online":{"date-parts":[[2019,5,2]]},"reference":[{"key":"e_1_3_2_2_1_1","doi-asserted-by":"publisher","DOI":"10.1145\/2982142.2982169"},{"key":"e_1_3_2_2_2_1","doi-asserted-by":"publisher","DOI":"10.1145\/2504335.2504360"},{"key":"e_1_3_2_2_3_1","doi-asserted-by":"publisher","DOI":"10.1145\/2702123.2702334"},{"key":"e_1_3_2_2_4_1","unstructured":"Envision AI. 2018. Enabling vision for the blind. https:\/\/www. letsenvision.com  Envision AI. 2018. Enabling vision for the blind. https:\/\/www. letsenvision.com"},{"key":"e_1_3_2_2_5_1","unstructured":"Seeing AI. 2017. A free app that narrates the world around you. https: \/\/www.microsoft.com\/en-us\/seeing-ai  Seeing AI. 2017. A free app that narrates the world around you. https: \/\/www.microsoft.com\/en-us\/seeing-ai"},{"key":"e_1_3_2_2_6_1","unstructured":"VocalEyes AI. 2017. Computer Vision for the blind. http:\/\/vocaleyes.ai  VocalEyes AI. 2017. Computer Vision for the blind. http:\/\/vocaleyes.ai"},{"key":"e_1_3_2_2_7_1","unstructured":"Aira. 2017. Your Life Your Schedule Right Now. https:\/\/aira.io  Aira. 2017. Your Life Your Schedule Right Now. https:\/\/aira.io"},{"key":"e_1_3_2_2_8_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2015.226"},{"key":"e_1_3_2_2_9_1","unstructured":"BeMyEyes. 2015. Lend you eyes to the blind. http:\/\/www.bemyeyes. org  BeMyEyes. 2015. Lend you eyes to the blind. http:\/\/www.bemyeyes. org"},{"key":"e_1_3_2_2_10_1","unstructured":"BeSpecular. 2016. Let blind people see through your eyes. https: \/\/www.bespecular.com  BeSpecular. 2016. Let blind people see through your eyes. https: \/\/www.bespecular.com"},{"key":"e_1_3_2_2_11_1","doi-asserted-by":"publisher","DOI":"10.1145\/1866029.1866080"},{"key":"e_1_3_2_2_12_1","doi-asserted-by":"publisher","DOI":"10.1145\/2441776.2441915"},{"key":"e_1_3_2_2_13_1","volume-title":"Understanding handobject manipulation by modeling the contextual relationship between actions, grasp types and object attributes. arXiv preprint arXiv:1807.08254","author":"Cai Minjie","year":"2018","unstructured":"Minjie Cai , Kris Kitani , and Yoichi Sato . 2018. Understanding handobject manipulation by modeling the contextual relationship between actions, grasp types and object attributes. arXiv preprint arXiv:1807.08254 ( 2018 ). Minjie Cai, Kris Kitani, and Yoichi Sato. 2018. Understanding handobject manipulation by modeling the contextual relationship between actions, grasp types and object attributes. arXiv preprint arXiv:1807.08254 (2018)."},{"key":"e_1_3_2_2_14_1","doi-asserted-by":"publisher","DOI":"10.15607\/RSS.2016.XII.034"},{"key":"e_1_3_2_2_15_1","unstructured":"CamFind. 2013. Search the physical world. http:\/\/camfndapp.com  CamFind. 2013. Search the physical world. http:\/\/camfndapp.com"},{"key":"e_1_3_2_2_16_1","doi-asserted-by":"publisher","DOI":"10.1109\/TAMD.2011.2106782"},{"key":"e_1_3_2_2_17_1","unstructured":"Digit-Eyes. 2010. Identify and organize your world. http:\/\/www. digit-eyes.com  Digit-Eyes. 2010. Identify and organize your world. http:\/\/www. digit-eyes.com"},{"key":"e_1_3_2_2_18_1","unstructured":"EyeNote. 2010. Mobile device application to denominate Federal Reserve Notes (U.S. paper currency) as an aid for the blind or visually impaired to increase accessibility. https:\/\/www.eyenote.gov  EyeNote. 2010. Mobile device application to denominate Federal Reserve Notes (U.S. paper currency) as an aid for the blind or visually impaired to increase accessibility. https:\/\/www.eyenote.gov"},{"key":"e_1_3_2_2_19_1","unstructured":"EyeSpy. 2015. The worlds best object recognition mobile app. http:\/\/www.eyespy.com  EyeSpy. 2015. The worlds best object recognition mobile app. http:\/\/www.eyespy.com"},{"key":"e_1_3_2_2_20_1","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-642-33718-5_23"},{"key":"e_1_3_2_2_21_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2011.5995444"},{"key":"e_1_3_2_2_22_1","doi-asserted-by":"publisher","DOI":"10.1145\/3161711"},{"key":"e_1_3_2_2_23_1","volume-title":"Contact points during multidigit grasping of geometric objects. Experimental brain research 217, 1","author":"Gilster Ren\u00e9","year":"2012","unstructured":"Ren\u00e9 Gilster , Constanze Hesse , and Heiner Deubel . 2012. Contact points during multidigit grasping of geometric objects. Experimental brain research 217, 1 ( 2012 ), 137--151. Ren\u00e9 Gilster, Constanze Hesse, and Heiner Deubel. 2012. Contact points during multidigit grasping of geometric objects. Experimental brain research 217, 1 (2012), 137--151."},{"key":"e_1_3_2_2_24_1","unstructured":"Talking Goggles. 2013. A camera with speech. http:\/\/www. sparklingapps.com\/goggles  Talking Goggles. 2013. A camera with speech. http:\/\/www. sparklingapps.com\/goggles"},{"key":"e_1_3_2_2_25_1","doi-asserted-by":"publisher","DOI":"10.1523\/JNEUROSCI.2374-08.2009"},{"key":"e_1_3_2_2_26_1","doi-asserted-by":"publisher","DOI":"10.1145\/2984511.2984518"},{"key":"e_1_3_2_2_27_1","volume-title":"VizWiz Grand Challenge: Answering Visual Questions from Blind People. arXiv preprint arXiv:1802.08218","author":"Gurari Danna","year":"2018","unstructured":"Danna Gurari , Qing Li , Abigale J Stangl , Anhong Guo , Chi Lin , Kristen Grauman , Jiebo Luo , and Jefrey P Bigham . 2018. VizWiz Grand Challenge: Answering Visual Questions from Blind People. arXiv preprint arXiv:1802.08218 ( 2018 ). Danna Gurari, Qing Li, Abigale J Stangl, Anhong Guo, Chi Lin, Kristen Grauman, Jiebo Luo, and Jefrey P Bigham. 2018. VizWiz Grand Challenge: Answering Visual Questions from Blind People. arXiv preprint arXiv:1802.08218 (2018)."},{"key":"e_1_3_2_2_28_1","doi-asserted-by":"publisher","DOI":"10.1145\/2470654.2481292"},{"key":"e_1_3_2_2_29_1","doi-asserted-by":"publisher","DOI":"10.1109\/ISWC.2007.4373785"},{"key":"e_1_3_2_2_30_1","doi-asserted-by":"publisher","DOI":"10.1145\/3152129"},{"key":"e_1_3_2_2_31_1","volume-title":"Proceedings of the 6th Workshop on the Representation and Processing of Sign Languages: Beyond the Manual Channel, The 9th International Conference on Language Resources and Evaluation (LREC","author":"Huenerfauth Matt","year":"2014","unstructured":"Matt Huenerfauth and Hernisa Kacorri . 2014 . Release of experimental stimuli and questions for evaluating facial expressions in animations of American Sign Language . In Proceedings of the 6th Workshop on the Representation and Processing of Sign Languages: Beyond the Manual Channel, The 9th International Conference on Language Resources and Evaluation (LREC 2014), Reykjavik, Iceland. Matt Huenerfauth and Hernisa Kacorri. 2014. Release of experimental stimuli and questions for evaluating facial expressions in animations of American Sign Language. In Proceedings of the 6th Workshop on the Representation and Processing of Sign Languages: Beyond the Manual Channel, The 9th International Conference on Language Resources and Evaluation (LREC 2014), Reykjavik, Iceland."},{"key":"e_1_3_2_2_32_1","volume-title":"Computational modelling of visual attention. Nature reviews neuroscience 2, 3","author":"Itti Laurent","year":"2001","unstructured":"Laurent Itti and Christof Koch . 2001. Computational modelling of visual attention. Nature reviews neuroscience 2, 3 ( 2001 ), 194. Laurent Itti and Christof Koch. 2001. Computational modelling of visual attention. Nature reviews neuroscience 2, 3 (2001), 194."},{"key":"e_1_3_2_2_33_1","doi-asserted-by":"publisher","DOI":"10.1007\/s00371-013-0886-1"},{"key":"e_1_3_2_2_34_1","doi-asserted-by":"publisher","DOI":"10.1145\/2049536.2049573"},{"key":"e_1_3_2_2_35_1","doi-asserted-by":"publisher","DOI":"10.1145\/3167902.3167904"},{"key":"e_1_3_2_2_36_1","doi-asserted-by":"publisher","DOI":"10.1145\/3025453.3025899"},{"key":"e_1_3_2_2_37_1","doi-asserted-by":"publisher","DOI":"10.1145\/2982142.2982178"},{"key":"e_1_3_2_2_38_1","volume-title":"Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980","author":"Kingma Diederik P","year":"2014","unstructured":"Diederik P Kingma and Jimmy Ba . 2014 . Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980 (2014). Diederik P Kingma and Jimmy Ba. 2014. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980 (2014)."},{"key":"e_1_3_2_2_39_1","doi-asserted-by":"publisher","DOI":"10.1080\/00222895.1987.10735407"},{"key":"e_1_3_2_2_40_1","doi-asserted-by":"publisher","DOI":"10.1016\/j.cviu.2016.09.001"},{"key":"e_1_3_2_2_41_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2015.7298625"},{"key":"e_1_3_2_2_42_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2015.7298965"},{"key":"e_1_3_2_2_43_1","volume-title":"Learning transferable features with deep adaptation networks. arXiv preprint arXiv:1502.02791","author":"Long Mingsheng","year":"2015","unstructured":"Mingsheng Long , Yue Cao , Jianmin Wang , and Michael I Jordan . 2015. Learning transferable features with deep adaptation networks. arXiv preprint arXiv:1502.02791 ( 2015 ). Mingsheng Long, Yue Cao, Jianmin Wang, and Michael I Jordan. 2015. Learning transferable features with deep adaptation networks. arXiv preprint arXiv:1502.02791 (2015)."},{"key":"e_1_3_2_2_44_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.209"},{"key":"e_1_3_2_2_45_1","doi-asserted-by":"publisher","DOI":"10.5555\/850976.854952"},{"key":"e_1_3_2_2_46_1","doi-asserted-by":"publisher","DOI":"10.1145\/2063176.2063200"},{"key":"e_1_3_2_2_47_1","doi-asserted-by":"publisher","DOI":"10.1145\/3132525.3134802"},{"key":"e_1_3_2_2_48_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2014.187"},{"key":"e_1_3_2_2_49_1","doi-asserted-by":"publisher","DOI":"10.1109\/34.598226"},{"key":"e_1_3_2_2_50_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2015.222"},{"key":"e_1_3_2_2_51_1","volume-title":"The proprioceptive senses: their roles in signaling body shape, body position and movement, and muscle force. Physiological reviews 92, 4","author":"Proske Uwe","year":"2012","unstructured":"Uwe Proske and Simon C Gandevia . 2012. The proprioceptive senses: their roles in signaling body shape, body position and movement, and muscle force. Physiological reviews 92, 4 ( 2012 ), 1651--1697. Uwe Proske and Simon C Gandevia. 2012. The proprioceptive senses: their roles in signaling body shape, body position and movement, and muscle force. Physiological reviews 92, 4 (2012), 1651--1697."},{"key":"e_1_3_2_2_52_1","doi-asserted-by":"publisher","DOI":"10.1007\/s10462-012-9356-9"},{"key":"e_1_3_2_2_53_1","unstructured":"LookTel Recognizer. 2012. Instantly recognize everyday objects. http: \/\/www.looktel.com\/recognizer  LookTel Recognizer. 2012. Instantly recognize everyday objects. http: \/\/www.looktel.com\/recognizer"},{"key":"e_1_3_2_2_54_1","doi-asserted-by":"publisher","DOI":"10.5555\/1769590.1769672"},{"key":"e_1_3_2_2_55_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2010.5540074"},{"key":"e_1_3_2_2_56_1","doi-asserted-by":"publisher","DOI":"10.1145\/2939672.2939778"},{"key":"e_1_3_2_2_57_1","volume-title":"Proceedings of the 2007 joint conference on empirical methods in natural language processing and computational natural language learning (EMNLP-CoNLL).","author":"Rosenberg Andrew","year":"2007","unstructured":"Andrew Rosenberg and Julia Hirschberg . 2007 . V-measure: A conditional entropy-based external cluster evaluation measure . In Proceedings of the 2007 joint conference on empirical methods in natural language processing and computational natural language learning (EMNLP-CoNLL). Andrew Rosenberg and Julia Hirschberg. 2007. V-measure: A conditional entropy-based external cluster evaluation measure. In Proceedings of the 2007 joint conference on empirical methods in natural language processing and computational natural language learning (EMNLP-CoNLL)."},{"key":"e_1_3_2_2_58_1","doi-asserted-by":"publisher","DOI":"10.1007\/s11263-015-0816-y"},{"key":"e_1_3_2_2_59_1","doi-asserted-by":"crossref","unstructured":"Ramprasaath R Selvaraju Michael Cogswell Abhishek Das Ramakrishna Vedantam Devi Parikh Dhruv Batra etal 2017. Grad-CAM: Visual Explanations from Deep Networks via Gradient-Based Localization.. In ICCV. 618--626.  Ramprasaath R Selvaraju Michael Cogswell Abhishek Das Ramakrishna Vedantam Devi Parikh Dhruv Batra et al. 2017. Grad-CAM: Visual Explanations from Deep Networks via Gradient-Based Localization.. In ICCV. 618--626.","DOI":"10.1109\/ICCV.2017.74"},{"key":"e_1_3_2_2_60_1","doi-asserted-by":"publisher","DOI":"10.1093\/brain\/29.4.467"},{"key":"e_1_3_2_2_61_1","doi-asserted-by":"publisher","DOI":"10.1145\/1978942.1979044"},{"key":"e_1_3_2_2_62_1","doi-asserted-by":"publisher","DOI":"10.1145\/3060056"},{"key":"e_1_3_2_2_64_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.308"},{"key":"e_1_3_2_2_65_1","unstructured":"TapTapSee. 2012. Mobile camera application designed specifcally for the blind and visually impaired iOS users. http:\/\/www.taptapseeapp. com  TapTapSee. 2012. Mobile camera application designed specifcally for the blind and visually impaired iOS users. http:\/\/www.taptapseeapp. com"},{"key":"e_1_3_2_2_66_1","doi-asserted-by":"publisher","DOI":"10.1145\/2384916.2384934"},{"key":"e_1_3_2_2_67_1","unstructured":"Aipoly Vision. 2016. Sight for Blind & Visually Impaired. http: \/\/aipoly.com  Aipoly Vision. 2016. Sight for Blind & Visually Impaired. http: \/\/aipoly.com"},{"key":"e_1_3_2_2_68_1","unstructured":"WayAround. 2018. The smart assistant for people who are blind. https:\/\/www.wayaround.com  WayAround. 2018. The smart assistant for people who are blind. https:\/\/www.wayaround.com"},{"key":"e_1_3_2_2_69_1","doi-asserted-by":"publisher","DOI":"10.1109\/IJCNN.1999.832699"},{"key":"e_1_3_2_2_70_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICPR.2006.109"},{"key":"e_1_3_2_2_71_1","doi-asserted-by":"publisher","DOI":"10.1145\/2556288.2557085"},{"key":"e_1_3_2_2_72_1","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2018.2808769"},{"key":"e_1_3_2_2_73_1","doi-asserted-by":"publisher","DOI":"10.1145\/2513383.2513443"}],"event":{"name":"CHI '19: CHI Conference on Human Factors in Computing Systems","location":"Glasgow Scotland Uk","acronym":"CHI '19","sponsor":["SIGCHI ACM Special Interest Group on Computer-Human Interaction"]},"container-title":["Proceedings of the 2019 CHI Conference on Human Factors in Computing Systems"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3290605.3300566","content-type":"unspecified","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3290605.3300566","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,6,17]],"date-time":"2025-06-17T23:53:07Z","timestamp":1750204387000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3290605.3300566"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2019,5,2]]},"references-count":72,"alternative-id":["10.1145\/3290605.3300566","10.1145\/3290605"],"URL":"https:\/\/doi.org\/10.1145\/3290605.3300566","relation":{},"subject":[],"published":{"date-parts":[[2019,5,2]]},"assertion":[{"value":"2019-05-02","order":2,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}