{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,13]],"date-time":"2026-04-13T21:17:18Z","timestamp":1776115038773,"version":"3.50.1"},"publisher-location":"New York, NY, USA","reference-count":53,"publisher":"ACM","license":[{"start":{"date-parts":[[2024,10,27]],"date-time":"2024-10-27T00:00:00Z","timestamp":1729987200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by\/4.0\/"}],"funder":[{"name":"The Institute of Museum and Library Services (IMLS), the Laura Bush 21st Century Librarian Program","award":["#RE-254891-OLS-23"],"award-info":[{"award-number":["#RE-254891-OLS-23"]}]}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2024,10,27]]},"DOI":"10.1145\/3663548.3675660","type":"proceedings-article","created":{"date-parts":[[2024,10,20]],"date-time":"2024-10-20T18:37:25Z","timestamp":1729449445000},"page":"1-31","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":16,"title":["MAIDR Meets AI: Exploring Multimodal LLM-Based Data Visualization Interpretation by and with Blind and Low-Vision Users"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-4064-6012","authenticated-orcid":false,"given":"JooYoung","family":"Seo","sequence":"first","affiliation":[{"name":"School of Information Sciences, University of Illinois Urbana-Champaign, United States"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-6469-0360","authenticated-orcid":false,"given":"Sanchita S.","family":"Kamath","sequence":"additional","affiliation":[{"name":"School of Information Sciences, University of Illinois Urbana-Champaign, United States"}]},{"ORCID":"https:\/\/orcid.org\/0009-0000-9334-8660","authenticated-orcid":false,"given":"Aziz","family":"Zeidieh","sequence":"additional","affiliation":[{"name":"Informatics, University of Illinois Urbana-Champaign, United States"}]},{"ORCID":"https:\/\/orcid.org\/0009-0007-7810-2305","authenticated-orcid":false,"given":"Saairam","family":"Venkatesh","sequence":"additional","affiliation":[{"name":"Department of Computer Science, University of Illinois Urbana-Champaign, United States"}]},{"ORCID":"https:\/\/orcid.org\/0009-0004-4020-534X","authenticated-orcid":false,"given":"Sean","family":"McCurry","sequence":"additional","affiliation":[{"name":"TransPerfect, United States"}]}],"member":"320","published-online":{"date-parts":[[2024,10,27]]},"reference":[{"key":"e_1_3_2_1_1_1","unstructured":"[n. d.]. IDEO Design Thinking. https:\/\/designthinking.ideo.com."},{"key":"e_1_3_2_1_2_1","unstructured":"[n. d.]. Multimodal Generative AI Experiments: GPT-4 Vs Gemini Pro Vision Describing TV News Images & Ukraine War Maps \u2013 The GDELT Project."},{"key":"e_1_3_2_1_3_1","volume-title":"Olli - Screen Reader Accessibility for Data Visualization","unstructured":"2022. Olli - Screen Reader Accessibility for Data Visualization. MIT Visualization Group."},{"key":"e_1_3_2_1_4_1","unstructured":"Accessibility at Penn State. 2014. Charts & Accessibility. https:\/\/accessibility.psu.edu\/images\/charts\/."},{"key":"e_1_3_2_1_5_1","unstructured":"Aira. 2024. We\u2019re Aira a Visual Interpreting Service.https:\/\/airaio.kinsta.cloud\/."},{"key":"e_1_3_2_1_6_1","unstructured":"Alwar Pillai. [n. d.]. Accessible Usability Scale (AUS). https:\/\/makeitfable.com\/accessible-usability-scale\/."},{"key":"e_1_3_2_1_7_1","unstructured":"Anthropic. 2024. Introducing the next Generation of Claude. https:\/\/www.anthropic.com\/news\/claude-3-family."},{"key":"e_1_3_2_1_8_1","doi-asserted-by":"publisher","unstructured":"Yejin Bang Samuel Cahyawijaya Nayeon Lee Wenliang Dai Dan Su Bryan Wilie Holy Lovenia Ziwei Ji Tiezheng Yu Willy Chung Quyet\u00a0V. Do Yan Xu and Pascale Fung. 2023. A Multitask Multilingual Multimodal Evaluation of ChatGPT on Reasoning Hallucination and Interactivity. https:\/\/doi.org\/10.48550\/arXiv.2302.04023 arxiv:2302.04023\u00a0[cs]","DOI":"10.48550\/arXiv.2302.04023"},{"key":"e_1_3_2_1_9_1","unstructured":"Be My Eyes. 2023. Announcing \u2018Be My AI \u2019 Soon Available for Hundreds of Thousands of Be My Eyes Users. https:\/\/www.bemyeyes.com\/blog\/announcing-be-my-ai."},{"key":"e_1_3_2_1_10_1","unstructured":"Be My Eyes. 2024. Be My Eyes - See the World Together. https:\/\/www.bemyeyes.com\/."},{"key":"e_1_3_2_1_11_1","doi-asserted-by":"publisher","DOI":"10.5220\/0010994600003176"},{"key":"e_1_3_2_1_12_1","doi-asserted-by":"publisher","DOI":"10.1145\/3234695.3236348"},{"key":"e_1_3_2_1_13_1","doi-asserted-by":"publisher","DOI":"10.1145\/1866029.1866080"},{"key":"e_1_3_2_1_14_1","first-page":"2","article-title":"SUS: a retrospective","volume":"8","author":"Brooke John","year":"2013","unstructured":"John Brooke. 2013. SUS: a retrospective. Journal of Usability Studies 8, 2 (Feb. 2013), 29\u201340.","journal-title":"Journal of Usability Studies"},{"key":"e_1_3_2_1_15_1","unstructured":"CloudSight. 2024. Image Recognition API & General Purpose Computer Vision and Captioning - CloudSight AI. https:\/\/cloudsight.ai\/."},{"key":"e_1_3_2_1_16_1","unstructured":"Gapminder. 2023. Gapminder. https:\/\/gapminder.org\/."},{"key":"e_1_3_2_1_17_1","doi-asserted-by":"publisher","DOI":"10.1145\/3597638.3614548"},{"key":"e_1_3_2_1_18_1","doi-asserted-by":"publisher","DOI":"10.1145\/3586182.3616669"},{"key":"e_1_3_2_1_19_1","doi-asserted-by":"publisher","DOI":"10.1109\/TVCG.2021.3114846"},{"key":"e_1_3_2_1_20_1","doi-asserted-by":"publisher","DOI":"10.1145\/3313831.3376467"},{"key":"e_1_3_2_1_21_1","doi-asserted-by":"publisher","DOI":"10.1145\/3544548.3581532"},{"key":"e_1_3_2_1_22_1","doi-asserted-by":"publisher","unstructured":"Haotian Liu Chunyuan Li Yuheng Li and Yong\u00a0Jae Lee. 2023. Improved Baselines with Visual Instruction Tuning. https:\/\/doi.org\/10.48550\/arXiv.2310.03744 arxiv:2310.03744\u00a0[cs]","DOI":"10.48550\/arXiv.2310.03744"},{"key":"e_1_3_2_1_23_1","doi-asserted-by":"publisher","DOI":"10.1109\/TVCG.2021.3114770"},{"key":"e_1_3_2_1_24_1","volume-title":"Using the RITE Method to Improve Products","author":"Medlock C.","year":"2007","unstructured":"Michael\u00a0C. Medlock, Dennis Wixon, Mark Terrano, and Ramon\u00a0L. Romero. 2007. Using the RITE Method to Improve Products; a Definition and a Case Study. (Jan. 2007)."},{"key":"e_1_3_2_1_25_1","unstructured":"Microsoft. 2024. Seeing AI - An App for Visually Impaired People That Narrates the World around You. https:\/\/www.microsoft.com\/en-us\/garage\/wall-of-fame\/seeing-ai\/."},{"key":"e_1_3_2_1_26_1","doi-asserted-by":"publisher","DOI":"10.1145\/2764916"},{"key":"e_1_3_2_1_28_1","doi-asserted-by":"publisher","DOI":"10.1080\/17538157.2021.1982949"},{"key":"e_1_3_2_1_29_1","doi-asserted-by":"publisher","unstructured":"Chengwei Qin Aston Zhang Zhuosheng Zhang Jiaao Chen Michihiro Yasunaga and Diyi Yang. 2023. Is ChatGPT a General-Purpose Natural Language Processing Task Solver?https:\/\/doi.org\/10.48550\/arXiv.2302.06476 arxiv:2302.06476\u00a0[cs]","DOI":"10.48550\/arXiv.2302.06476"},{"key":"e_1_3_2_1_30_1","doi-asserted-by":"publisher","unstructured":"Alec Radford Jong\u00a0Wook Kim Tao Xu Greg Brockman Christine McLeavey and Ilya Sutskever. 2022. Robust Speech Recognition via Large-Scale Weak Supervision. https:\/\/doi.org\/10.48550\/arXiv.2212.04356 arXiv:2212.04356 [cs eess].","DOI":"10.48550\/arXiv.2212.04356"},{"key":"e_1_3_2_1_31_1","doi-asserted-by":"publisher","DOI":"10.2312\/eved.20241053"},{"key":"e_1_3_2_1_32_1","doi-asserted-by":"publisher","DOI":"10.1145\/3613904.3642730"},{"key":"e_1_3_2_1_33_1","unstructured":"Ather Sharif. 2022. VoxLens."},{"key":"e_1_3_2_1_34_1","doi-asserted-by":"publisher","DOI":"10.1145\/3517428.3544813"},{"key":"e_1_3_2_1_35_1","doi-asserted-by":"publisher","DOI":"10.1145\/3491102.3517431"},{"key":"e_1_3_2_1_36_1","doi-asserted-by":"publisher","DOI":"10.1145\/3587281.3587284"},{"key":"e_1_3_2_1_37_1","doi-asserted-by":"publisher","DOI":"10.1145\/3597638.3614502"},{"key":"e_1_3_2_1_38_1","doi-asserted-by":"publisher","DOI":"10.1080\/15710882.2023.2214145"},{"key":"e_1_3_2_1_39_1","doi-asserted-by":"publisher","DOI":"10.1016\/0364-0213(88)90023-7"},{"key":"e_1_3_2_1_40_1","doi-asserted-by":"publisher","DOI":"10.1007\/s10648-010-9128-5"},{"key":"e_1_3_2_1_41_1","unstructured":"TapTapSee. [n. d.]. TapTapSee - Blind and Visually Impaired Assistive Technology - Powered by CloudSight.Ai Image Recognition API. https:\/\/taptapseeapp.com\/."},{"key":"e_1_3_2_1_42_1","doi-asserted-by":"publisher","DOI":"10.48550\/arXiv.2312.11805"},{"key":"e_1_3_2_1_43_1","unstructured":"The Consumer Financial Protection Bureau. 2024. Welcome to the CFPB Design System - CFPB Design System. https:\/\/cfpb.github.io\/design-system\/."},{"key":"e_1_3_2_1_44_1","unstructured":"The Diagram Center. 2009. Specific Guidelines - Graphs. http:\/\/diagramcenter.org\/specific-guidelines-e.html\/."},{"key":"e_1_3_2_1_45_1","volume-title":"A general inductive approach for analyzing qualitative evaluation data. American journal of evaluation 27, 2","author":"Thomas R","year":"2006","unstructured":"David\u00a0R Thomas. 2006. A general inductive approach for analyzing qualitative evaluation data. American journal of evaluation 27, 2 (2006), 237\u2013246."},{"key":"e_1_3_2_1_46_1","doi-asserted-by":"publisher","DOI":"10.1145\/3544548.3581186"},{"key":"e_1_3_2_1_47_1","unstructured":"Abid Virani. 2020. Building the Accessible Usability Scale - A Walkthrough. https:\/\/makeitfable.com\/article\/building-the-accessible-usability-scale\/."},{"key":"e_1_3_2_1_48_1","unstructured":"Web Accessibility Initiative. 2022. Complex Images. https:\/\/www.w3.org\/WAI\/tutorials\/images\/complex\/."},{"key":"e_1_3_2_1_49_1","unstructured":"WebAIM. 2024. WebAIM: The WebAIM Million - The 2024 Report on the Accessibility of the Top 1 000 000 Home Pages. https:\/\/webaim.org\/projects\/million\/#labels."},{"key":"e_1_3_2_1_50_1","unstructured":"Billy West. 1995. The Art and Science of Audio Book Production. https:\/\/www.loc.gov\/nls\/who-we-are\/guidelines-and-specifications\/the-art-and-science-of-audio-book-production\/."},{"key":"e_1_3_2_1_51_1","unstructured":"World Wide Web Consortium. 2023. Understanding Success Criterion 1.1.1 | Understanding WCAG 2.0. https:\/\/www.w3.org\/TR\/UNDERSTANDING-WCAG20\/text-equiv-all.html."},{"key":"e_1_3_2_1_52_1","unstructured":"(x)Ability Design\u00a0Lab. 2024. maidr: Multimodal Access and Interactive Data Representation. https:\/\/github.com\/xability\/maidr"},{"key":"e_1_3_2_1_53_1","doi-asserted-by":"publisher","DOI":"10.3389\/fnins.2014.00088"},{"key":"e_1_3_2_1_54_1","doi-asserted-by":"publisher","DOI":"10.1145\/3613904.3641996"}],"event":{"name":"ASSETS '24: The 26th International ACM SIGACCESS Conference on Computers and Accessibility","location":"St. John's NL Canada","acronym":"ASSETS '24","sponsor":["SIGACCESS ACM Special Interest Group on Accessible Computing"]},"container-title":["The 26th International ACM SIGACCESS Conference on Computers and Accessibility"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3663548.3675660","content-type":"unspecified","content-version":"vor","intended-application":"text-mining"}],"deposited":{"date-parts":[[2025,6,18]],"date-time":"2025-06-18T23:57:17Z","timestamp":1750291037000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3663548.3675660"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,10,27]]},"references-count":53,"alternative-id":["10.1145\/3663548.3675660","10.1145\/3663548"],"URL":"https:\/\/doi.org\/10.1145\/3663548.3675660","relation":{},"subject":[],"published":{"date-parts":[[2024,10,27]]},"assertion":[{"value":"2024-10-27","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}