{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,13]],"date-time":"2026-04-13T16:56:48Z","timestamp":1776099408016,"version":"3.50.1"},"publisher-location":"New York, NY, USA","reference-count":55,"publisher":"ACM","license":[{"start":{"date-parts":[[2024,3,18]],"date-time":"2024-03-18T00:00:00Z","timestamp":1710720000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by\/4.0\/"}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2024,3,18]]},"DOI":"10.1145\/3640543.3645212","type":"proceedings-article","created":{"date-parts":[[2024,4,5]],"date-time":"2024-04-05T18:23:12Z","timestamp":1712341392000},"page":"886-906","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":31,"title":["FigurA11y: AI Assistance for Writing Scientific Alt Text"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0000-0003-4465-6469","authenticated-orcid":false,"given":"Nikhil","family":"Singh","sequence":"first","affiliation":[{"name":"MIT Media Lab, Massachusetts Institute of Technology, United States"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-8752-6635","authenticated-orcid":false,"given":"Lucy Lu","family":"Wang","sequence":"additional","affiliation":[{"name":"Information School, University of Washington, United States and Allen Institute for AI, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-5460-9047","authenticated-orcid":false,"given":"Jonathan","family":"Bragg","sequence":"additional","affiliation":[{"name":"Allen Institute for AI, United States"}]}],"member":"320","published-online":{"date-parts":[[2024,4,5]]},"reference":[{"key":"e_1_3_2_1_1_1","first-page":"23716","article-title":"Flamingo: a visual language model for few-shot learning","volume":"35","author":"Alayrac Jean-Baptiste","year":"2022","unstructured":"Jean-Baptiste Alayrac, Jeff Donahue, Pauline Luc, Antoine Miech, Iain Barr, Yana Hasson, Karel Lenc, Arthur Mensch, Katherine Millican, Malcolm Reynolds, 2022. Flamingo: a visual language model for few-shot learning. Advances in Neural Information Processing Systems 35 (2022), 23716\u201323736.","journal-title":"Advances in Neural Information Processing Systems"},{"key":"e_1_3_2_1_2_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICCVW60793.2023.00305"},{"key":"e_1_3_2_1_3_1","doi-asserted-by":"publisher","DOI":"10.1145\/1866029.1866080"},{"key":"e_1_3_2_1_4_1","doi-asserted-by":"publisher","DOI":"10.1145\/3517428.3544796"},{"key":"e_1_3_2_1_5_1","doi-asserted-by":"publisher","DOI":"10.1109\/TIT.2005.844059"},{"key":"e_1_3_2_1_6_1","doi-asserted-by":"publisher","DOI":"10.1145\/2910896.2910904"},{"key":"e_1_3_2_1_7_1","volume-title":"Specter: Document-level representation learning using citation-informed transformers. arXiv preprint arXiv:2004.07180","author":"Cohan Arman","year":"2020","unstructured":"Arman Cohan, Sergey Feldman, Iz Beltagy, Doug Downey, and Daniel\u00a0S Weld. 2020. Specter: Document-level representation learning using citation-informed transformers. arXiv preprint arXiv:2004.07180 (2020)."},{"key":"e_1_3_2_1_8_1","doi-asserted-by":"publisher","DOI":"10.1145\/3526113.3545672"},{"key":"e_1_3_2_1_9_1","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-642-15561-1_2"},{"key":"e_1_3_2_1_10_1","volume-title":"Sparks: Inspiration for science writing using language models. In Designing interactive systems conference. 1002\u20131019.","author":"Gero Katy\u00a0Ilonka","year":"2022","unstructured":"Katy\u00a0Ilonka Gero, Vivian Liu, and Lydia Chilton. 2022. Sparks: Inspiration for science writing using language models. In Designing interactive systems conference. 1002\u20131019."},{"key":"e_1_3_2_1_11_1","doi-asserted-by":"publisher","DOI":"10.1145\/3308558.3313605"},{"key":"e_1_3_2_1_12_1","doi-asserted-by":"publisher","DOI":"10.1145\/3313831.3376728"},{"key":"e_1_3_2_1_13_1","volume-title":"News summarization and evaluation in the era of gpt-3. arXiv preprint arXiv:2209.12356","author":"Goyal Tanya","year":"2022","unstructured":"Tanya Goyal, Junyi\u00a0Jessy Li, and Greg Durrett. 2022. News summarization and evaluation in the era of gpt-3. arXiv preprint arXiv:2209.12356 (2022)."},{"key":"e_1_3_2_1_14_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.01436"},{"key":"e_1_3_2_1_15_1","volume-title":"Proceedings, Part XVII 16","author":"Gurari Danna","year":"2020","unstructured":"Danna Gurari, Yinan Zhao, Meng Zhang, and Nilavra Bhattacharya. 2020. Captioning images taken by people who are blind. In Computer Vision\u2013ECCV 2020: 16th European Conference, Glasgow, UK, August 23\u201328, 2020, Proceedings, Part XVII 16. Springer, 417\u2013434."},{"key":"e_1_3_2_1_16_1","volume-title":"SciCap: Generating captions for scientific figures. arXiv preprint arXiv:2110.11624","author":"Hsu Ting-Yao","year":"2021","unstructured":"Ting-Yao Hsu, C\u00a0Lee Giles, and Ting-Hao\u2019Kenneth\u2019 Huang. 2021. SciCap: Generating captions for scientific figures. arXiv preprint arXiv:2110.11624 (2021)."},{"key":"e_1_3_2_1_17_1","volume-title":"Do LVLMs Understand Charts? Analyzing and Correcting Factual Errors in Chart Captioning. arXiv preprint arXiv:2312.10160","author":"Huang Kung-Hsiang","year":"2023","unstructured":"Kung-Hsiang Huang, Mingyang Zhou, Hou\u00a0Pong Chan, Yi\u00a0R Fung, Zhenhailong Wang, Lingyu Zhang, Shih-Fu Chang, and Heng Ji. 2023. Do LVLMs Understand Charts? Analyzing and Correcting Factual Errors in Chart Captioning. arXiv preprint arXiv:2312.10160 (2023)."},{"key":"e_1_3_2_1_18_1","volume-title":"Grounded Intuition of GPT-Vision\u2019s Abilities with Scientific Images. arXiv preprint arXiv:2311.02069","author":"Hwang Alyssa","year":"2023","unstructured":"Alyssa Hwang, Andrew Head, and Chris Callison-Burch. 2023. Grounded Intuition of GPT-Vision\u2019s Abilities with Scientific Images. arXiv preprint arXiv:2311.02069 (2023)."},{"key":"e_1_3_2_1_19_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICDARW.2019.00018"},{"key":"e_1_3_2_1_20_1","volume-title":"Highly accurate protein structure prediction with AlphaFold. Nature 596, 7873","author":"Jumper John","year":"2021","unstructured":"John Jumper, Richard Evans, Alexander Pritzel, Tim Green, Michael Figurnov, Olaf Ronneberger, Kathryn Tunyasuvunakool, Russ Bates, Augustin \u017d\u00eddek, Anna Potapenko, 2021. Highly accurate protein structure prediction with AlphaFold. Nature 596, 7873 (2021), 583\u2013589."},{"key":"e_1_3_2_1_21_1","doi-asserted-by":"publisher","DOI":"10.1145\/3290605.3300641"},{"key":"e_1_3_2_1_22_1","volume-title":"Babytalk: Understanding and generating simple image descriptions","author":"Kulkarni Girish","year":"2013","unstructured":"Girish Kulkarni, Visruth Premraj, Vicente Ordonez, Sagnik Dhar, Siming Li, Yejin Choi, Alexander\u00a0C Berg, and Tamara\u00a0L Berg. 2013. Babytalk: Understanding and generating simple image descriptions. IEEE transactions on pattern analysis and machine intelligence 35, 12 (2013), 2891\u20132903."},{"key":"e_1_3_2_1_23_1","doi-asserted-by":"publisher","DOI":"10.1145\/3491102.3502030"},{"key":"e_1_3_2_1_24_1","volume-title":"Soviet physics doklady, Vol.\u00a010","author":"I Levenshtein","unstructured":"Vladimir\u00a0I Levenshtein 1966. Binary codes capable of correcting deletions, insertions, and reversals. In Soviet physics doklady, Vol.\u00a010. Soviet Union, 707\u2013710."},{"key":"e_1_3_2_1_25_1","volume-title":"Blip-2: Bootstrapping language-image pre-training with frozen image encoders and large language models. arXiv preprint arXiv:2301.12597","author":"Li Junnan","year":"2023","unstructured":"Junnan Li, Dongxu Li, Silvio Savarese, and Steven Hoi. 2023. Blip-2: Bootstrapping language-image pre-training with frozen image encoders and large language models. arXiv preprint arXiv:2301.12597 (2023)."},{"key":"e_1_3_2_1_26_1","volume-title":"DePlot: One-shot visual language reasoning by plot-to-table translation. arXiv preprint arXiv:2212.10505","author":"Liu Fangyu","year":"2022","unstructured":"Fangyu Liu, Julian\u00a0Martin Eisenschlos, Francesco Piccinno, Syrine Krichene, Chenxi Pang, Kenton Lee, Mandar Joshi, Wenhu Chen, Nigel Collier, and Yasemin Altun. 2022. DePlot: One-shot visual language reasoning by plot-to-table translation. arXiv preprint arXiv:2212.10505 (2022)."},{"key":"e_1_3_2_1_27_1","volume-title":"Visual instruction tuning. arXiv preprint arXiv:2304.08485","author":"Liu Haotian","year":"2023","unstructured":"Haotian Liu, Chunyuan Li, Qingyang Wu, and Yong\u00a0Jae Lee. 2023. Visual instruction tuning. arXiv preprint arXiv:2304.08485 (2023)."},{"key":"e_1_3_2_1_28_1","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-642-04346-8_62"},{"key":"e_1_3_2_1_29_1","volume-title":"Accessible visualization via natural language descriptions: A four-level model of semantic content","author":"Lundgard Alan","year":"2021","unstructured":"Alan Lundgard and Arvind Satyanarayan. 2021. Accessible visualization via natural language descriptions: A four-level model of semantic content. IEEE transactions on visualization and computer graphics 28, 1 (2021), 1073\u20131083."},{"key":"e_1_3_2_1_30_1","doi-asserted-by":"publisher","DOI":"10.1145\/3441852.3471207"},{"key":"e_1_3_2_1_31_1","volume-title":"UniChart: A Universal Vision-language Pretrained Model for Chart Comprehension and Reasoning. arXiv preprint arXiv:2305.14761","author":"Masry Ahmed","year":"2023","unstructured":"Ahmed Masry, Parsa Kavehzadeh, Xuan\u00a0Long Do, Enamul Hoque, and Shafiq Joty. 2023. UniChart: A Universal Vision-language Pretrained Model for Chart Comprehension and Reasoning. arXiv preprint arXiv:2305.14761 (2023)."},{"key":"e_1_3_2_1_32_1","doi-asserted-by":"publisher","DOI":"10.1145\/3544548.3581225"},{"key":"e_1_3_2_1_33_1","doi-asserted-by":"publisher","DOI":"10.1145\/2764916"},{"key":"e_1_3_2_1_34_1","first-page":"2806","article-title":"Accessibility of Figures in Leading Biomedical and Ophthalmology Journals: Analysis of Alternative Text Use","volume":"64","author":"Nguyen Mickey","year":"2023","unstructured":"Mickey Nguyen, Matthew Crane, John Romley, and Yannis\u00a0M Paulus. 2023. Accessibility of Figures in Leading Biomedical and Ophthalmology Journals: Analysis of Alternative Text Use. Investigative Ophthalmology & Visual Science 64, 8 (2023), 2806\u20132806.","journal-title":"Investigative Ophthalmology & Visual Science"},{"key":"e_1_3_2_1_35_1","doi-asserted-by":"publisher","DOI":"10.1145\/3424636.3426903"},{"key":"e_1_3_2_1_36_1","unstructured":"OpenAI. 2023. GPT-4 Technical Report. arxiv:2303.08774\u00a0[cs.CL]"},{"key":"e_1_3_2_1_37_1","volume-title":"International conference on machine learning. PMLR, 8748\u20138763","author":"Radford Alec","year":"2021","unstructured":"Alec Radford, Jong\u00a0Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, 2021. Learning transferable visual models from natural language supervision. In International conference on machine learning. PMLR, 8748\u20138763."},{"key":"e_1_3_2_1_38_1","volume-title":"Sentence-bert: Sentence embeddings using siamese bert-networks. arXiv preprint arXiv:1908.10084","author":"Reimers Nils","year":"2019","unstructured":"Nils Reimers and Iryna Gurevych. 2019. Sentence-bert: Sentence embeddings using siamese bert-networks. arXiv preprint arXiv:1908.10084 (2019)."},{"key":"e_1_3_2_1_39_1","doi-asserted-by":"publisher","DOI":"10.5555\/3455716.3455877"},{"key":"e_1_3_2_1_40_1","volume-title":"Hugginggpt: Solving ai tasks with chatgpt and its friends in huggingface. arXiv preprint arXiv:2303.17580","author":"Shen Yongliang","year":"2023","unstructured":"Yongliang Shen, Kaitao Song, Xu Tan, Dongsheng Li, Weiming Lu, and Yueting Zhuang. 2023. Hugginggpt: Solving ai tasks with chatgpt and its friends in huggingface. arXiv preprint arXiv:2303.17580 (2023)."},{"key":"e_1_3_2_1_41_1","volume-title":"Beyond Summarization: Designing AI Support for Real-World Expository Writing Tasks. arXiv preprint arXiv:2304.02623","author":"Shen Zejiang","year":"2023","unstructured":"Zejiang Shen, Tal August, Pao Siangliulue, Kyle Lo, Jonathan Bragg, Jeff Hammerbacher, Doug Downey, Joseph\u00a0Chee Chang, and David Sontag. 2023. Beyond Summarization: Designing AI Support for Real-World Expository Writing Tasks. arXiv preprint arXiv:2304.02623 (2023)."},{"key":"e_1_3_2_1_42_1","doi-asserted-by":"publisher","DOI":"10.1145\/3511599"},{"key":"e_1_3_2_1_43_1","volume-title":"Vipergpt: Visual inference via python execution for reasoning. arXiv preprint arXiv:2303.08128","author":"Sur\u00eds D\u00eddac","year":"2023","unstructured":"D\u00eddac Sur\u00eds, Sachit Menon, and Carl Vondrick. 2023. Vipergpt: Visual inference via python execution for reasoning. arXiv preprint arXiv:2303.08128 (2023)."},{"key":"e_1_3_2_1_44_1","volume-title":"VisText: A Benchmark for Semantically Rich Chart Captioning. arXiv preprint arXiv:2307.05356","author":"Tang J","year":"2023","unstructured":"Benny\u00a0J Tang, Angie Boggust, and Arvind Satyanarayan. 2023. VisText: A Benchmark for Semantically Rich Chart Captioning. arXiv preprint arXiv:2307.05356 (2023)."},{"key":"e_1_3_2_1_45_1","volume-title":"Caption anything: Interactive image description with diverse multimodal controls. arXiv preprint arXiv:2305.02677","author":"Wang Teng","year":"2023","unstructured":"Teng Wang, Jinrui Zhang, Junjie Fei, Yixiao Ge, Hao Zheng, Yunlong Tang, Zhe Li, Mingqi Gao, Shanshan Zhao, Ying Shan, 2023. Caption anything: Interactive image description with diverse multimodal controls. arXiv preprint arXiv:2305.02677 (2023)."},{"key":"e_1_3_2_1_46_1","doi-asserted-by":"publisher","DOI":"10.1145\/3493612.3520449"},{"key":"e_1_3_2_1_47_1","volume-title":"Visual chatgpt: Talking, drawing and editing with visual foundation models. arXiv preprint arXiv:2303.04671","author":"Wu Chenfei","year":"2023","unstructured":"Chenfei Wu, Shengming Yin, Weizhen Qi, Xiaodong Wang, Zecheng Tang, and Nan Duan. 2023. Visual chatgpt: Talking, drawing and editing with visual foundation models. arXiv preprint arXiv:2303.04671 (2023)."},{"key":"e_1_3_2_1_48_1","doi-asserted-by":"publisher","DOI":"10.1145\/2998181.2998364"},{"key":"e_1_3_2_1_49_1","volume-title":"Exploring the limits of chatgpt for query or aspect-based text summarization. arXiv preprint arXiv:2302.08081","author":"Yang Xianjun","year":"2023","unstructured":"Xianjun Yang, Yan Li, Xinlu Zhang, Haifeng Chen, and Wei Cheng. 2023. Exploring the limits of chatgpt for query or aspect-based text summarization. arXiv preprint arXiv:2302.08081 (2023)."},{"key":"e_1_3_2_1_50_1","volume-title":"A Knowledge Augmented Dataset to Study the Challenges of Scientific Figure Captioning. arXiv preprint arXiv:2306.03491","author":"Yang Zhishen","year":"2023","unstructured":"Zhishen Yang, Raj Dabre, Hideki Tanaka, and Naoaki Okazaki. 2023. SciCap+: A Knowledge Augmented Dataset to Study the Challenges of Scientific Figure Captioning. arXiv preprint arXiv:2306.03491 (2023)."},{"key":"e_1_3_2_1_51_1","volume-title":"Mm-react: Prompting chatgpt for multimodal reasoning and action. arXiv preprint arXiv:2303.11381","author":"Yang Zhengyuan","year":"2023","unstructured":"Zhengyuan Yang, Linjie Li, Jianfeng Wang, Kevin Lin, Ehsan Azarnasab, Faisal Ahmed, Zicheng Liu, Ce Liu, Michael Zeng, and Lijuan Wang. 2023. Mm-react: Prompting chatgpt for multimodal reasoning and action. arXiv preprint arXiv:2303.11381 (2023)."},{"key":"e_1_3_2_1_52_1","volume-title":"IdealGPT: Iteratively Decomposing Vision and Language Reasoning via Large Language Models. arXiv preprint arXiv:2305.14985","author":"You Haoxuan","year":"2023","unstructured":"Haoxuan You, Rui Sun, Zhecan Wang, Long Chen, Gengyu Wang, Hammad\u00a0A Ayyubi, Kai-Wei Chang, and Shih-Fu Chang. 2023. IdealGPT: Iteratively Decomposing Vision and Language Reasoning via Large Language Models. arXiv preprint arXiv:2305.14985 (2023)."},{"key":"e_1_3_2_1_53_1","doi-asserted-by":"publisher","DOI":"10.1145\/3490099.3511105"},{"key":"e_1_3_2_1_54_1","volume-title":"Benchmarking large language models for news summarization. arXiv preprint arXiv:2301.13848","author":"Zhang Tianyi","year":"2023","unstructured":"Tianyi Zhang, Faisal Ladhak, Esin Durmus, Percy Liang, Kathleen McKeown, and Tatsunori\u00a0B Hashimoto. 2023. Benchmarking large language models for news summarization. arXiv preprint arXiv:2301.13848 (2023)."},{"key":"e_1_3_2_1_55_1","volume-title":"Chatgpt asks, blip-2 answers: Automatic questioning towards enriched visual descriptions. arXiv preprint arXiv:2303.06594","author":"Zhu Deyao","year":"2023","unstructured":"Deyao Zhu, Jun Chen, Kilichbek Haydarov, Xiaoqian Shen, Wenxuan Zhang, and Mohamed Elhoseiny. 2023. Chatgpt asks, blip-2 answers: Automatic questioning towards enriched visual descriptions. arXiv preprint arXiv:2303.06594 (2023)."}],"event":{"name":"IUI '24: 29th International Conference on Intelligent User Interfaces","location":"Greenville SC USA","acronym":"IUI '24","sponsor":["SIGAI ACM Special Interest Group on Artificial Intelligence","SIGCHI ACM Special Interest Group on Computer-Human Interaction"]},"container-title":["Proceedings of the 29th International Conference on Intelligent User Interfaces"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3640543.3645212","content-type":"unspecified","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3640543.3645212","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,12,1]],"date-time":"2025-12-01T00:57:34Z","timestamp":1764550654000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3640543.3645212"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,3,18]]},"references-count":55,"alternative-id":["10.1145\/3640543.3645212","10.1145\/3640543"],"URL":"https:\/\/doi.org\/10.1145\/3640543.3645212","relation":{},"subject":[],"published":{"date-parts":[[2024,3,18]]},"assertion":[{"value":"2024-04-05","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}