{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,21]],"date-time":"2026-04-21T17:24:42Z","timestamp":1776792282007,"version":"3.51.2"},"publisher-location":"New York, NY, USA","reference-count":53,"publisher":"ACM","license":[{"start":{"date-parts":[[2024,3,18]],"date-time":"2024-03-18T00:00:00Z","timestamp":1710720000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.acm.org\/publications\/policies\/copyright_policy#Background"}],"funder":[{"name":"NSERC Canada","award":["RGPIN-2020-06432"],"award-info":[{"award-number":["RGPIN-2020-06432"]}]}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2024,3,18]]},"DOI":"10.1145\/3640543.3645200","type":"proceedings-article","created":{"date-parts":[[2024,4,5]],"date-time":"2024-04-05T18:23:12Z","timestamp":1712341392000},"page":"288-303","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":54,"title":["Why and When LLM-Based Assistants Can Go Wrong: Investigating the Effectiveness of Prompt-Based Interactions for Software Help-Seeking"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-2730-5512","authenticated-orcid":false,"given":"Anjali","family":"Khurana","sequence":"first","affiliation":[{"name":"Computing Science, Simon Fraser University, Canada"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-3450-0447","authenticated-orcid":false,"given":"Hariharan","family":"Subramonyam","sequence":"additional","affiliation":[{"name":"Stanford University, United States"}]},{"ORCID":"https:\/\/orcid.org\/0009-0007-0173-1752","authenticated-orcid":false,"given":"Parmit K","family":"Chilana","sequence":"additional","affiliation":[{"name":"Computing Science, Simon Fraser University, Canada"}]}],"member":"320","published-online":{"date-parts":[[2024,4,5]]},"reference":[{"key":"e_1_3_2_1_1_1","unstructured":"Adobe. 2023. Adobe unveils Firefly a family of new creative generative ai. https:\/\/news.adobe.com\/news\/news-details\/2023\/Adobe-Unveils-Firefly-a-Family-of-new-Creative-Generative-AI\/default.aspx"},{"key":"e_1_3_2_1_2_1","unstructured":"Open AI. 2022. Introducing chatgpt. https:\/\/openai.com\/blog\/chatgpt"},{"key":"e_1_3_2_1_3_1","doi-asserted-by":"publisher","DOI":"10.1145\/1621995.1622022"},{"key":"e_1_3_2_1_4_1","volume-title":"A multitask, multilingual, multimodal evaluation of chatgpt on reasoning, hallucination, and interactivity. arXiv preprint arXiv:2302.04023","author":"Bang Yejin","year":"2023","unstructured":"Yejin Bang, Samuel Cahyawijaya, Nayeon Lee, Wenliang Dai, Dan Su, Bryan Wilie, Holy Lovenia, Ziwei Ji, Tiezheng Yu, Willy Chung, 2023. A multitask, multilingual, multimodal evaluation of chatgpt on reasoning, hallucination, and interactivity. arXiv preprint arXiv:2302.04023 (2023)."},{"key":"e_1_3_2_1_5_1","doi-asserted-by":"publisher","DOI":"10.1145\/3586030"},{"key":"e_1_3_2_1_6_1","volume-title":"A categorical archive of chatgpt failures. arXiv preprint arXiv:2302.03494","author":"Borji Ali","year":"2023","unstructured":"Ali Borji. 2023. A categorical archive of chatgpt failures. arXiv preprint arXiv:2302.03494 (2023)."},{"key":"e_1_3_2_1_7_1","doi-asserted-by":"publisher","DOI":"10.1145\/1753326.1753402"},{"key":"e_1_3_2_1_8_1","volume-title":"Advances in Neural Information Processing Systems, H.\u00a0Larochelle, M.\u00a0Ranzato, R.\u00a0Hadsell, M.F. Balcan, and H.\u00a0Lin (Eds.). Vol.\u00a033. Curran Associates","author":"Brown Tom","year":"1877","unstructured":"Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared\u00a0D Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, Sandhini Agarwal, Ariel Herbert-Voss, Gretchen Krueger, Tom Henighan, Rewon Child, Aditya Ramesh, Daniel Ziegler, Jeffrey Wu, Clemens Winter, Chris Hesse, Mark Chen, Eric Sigler, Mateusz Litwin, Scott Gray, Benjamin Chess, Jack Clark, Christopher Berner, Sam McCandlish, Alec Radford, Ilya Sutskever, and Dario Amodei. 2020. Language Models are Few-Shot Learners. In Advances in Neural Information Processing Systems, H.\u00a0Larochelle, M.\u00a0Ranzato, R.\u00a0Hadsell, M.F. Balcan, and H.\u00a0Lin (Eds.). Vol.\u00a033. Curran Associates, Inc., 1877\u20131901. https:\/\/proceedings.neurips.cc\/paper_files\/paper\/2020\/file\/1457c0d6bfcb4967418bfb8ac142f64a-Paper.pdf"},{"key":"e_1_3_2_1_9_1","doi-asserted-by":"publisher","DOI":"10.1145\/3301275.3302289"},{"key":"e_1_3_2_1_10_1","volume-title":"Carroll and Mary\u00a0Beth Rosson","author":"M.","year":"1987","unstructured":"John\u00a0M. Carroll and Mary\u00a0Beth Rosson. 1987. Paradox of the Active User. MIT Press, Cambridge, MA, USA, 80\u2013111."},{"key":"e_1_3_2_1_11_1","doi-asserted-by":"publisher","DOI":"10.1145\/2207676.2208620"},{"key":"e_1_3_2_1_12_1","volume-title":"Grounded theory research: Procedures, canons, and evaluative criteria. Qualitative sociology 13, 1","author":"Corbin M","year":"1990","unstructured":"Juliet\u00a0M Corbin and Anselm Strauss. 1990. Grounded theory research: Procedures, canons, and evaluative criteria. Qualitative sociology 13, 1 (1990), 3\u201321."},{"key":"e_1_3_2_1_13_1","unstructured":"Raymond Fok and Daniel\u00a0S Weld. 2023. In Search of Verifiability: Explanations Rarely Enable Complementary Performance in AI-Advised Decision Making. arXiv preprint arXiv:2305.07722 (2023)."},{"key":"e_1_3_2_1_14_1","doi-asserted-by":"publisher","DOI":"10.1145\/2642918.2647420"},{"key":"e_1_3_2_1_15_1","doi-asserted-by":"publisher","DOI":"10.1145\/3379337.3415592"},{"key":"e_1_3_2_1_16_1","doi-asserted-by":"publisher","DOI":"10.1145\/32206.32212"},{"key":"e_1_3_2_1_17_1","doi-asserted-by":"publisher","DOI":"10.1145\/1753326.1753552"},{"key":"e_1_3_2_1_18_1","doi-asserted-by":"publisher","DOI":"10.1145\/1518701.1518803"},{"key":"e_1_3_2_1_19_1","doi-asserted-by":"publisher","DOI":"10.1145\/1753326.1753478"},{"key":"e_1_3_2_1_20_1","unstructured":"Samantha\u00a0Murphy Kelly. 2023. 5 jaw-dropping things GPT-4 can do that chatgpt couldn\u2019t | CNN business. https:\/\/www.cnn.com\/2023\/03\/16\/tech\/gpt-4-use-cases\/index.html"},{"key":"e_1_3_2_1_21_1","doi-asserted-by":"publisher","DOI":"10.1109\/VL\/HCC51201.2021.9576440"},{"key":"e_1_3_2_1_22_1","doi-asserted-by":"publisher","DOI":"10.1145\/3290605.3300570"},{"key":"e_1_3_2_1_23_1","doi-asserted-by":"publisher","DOI":"10.1145\/2556288.2556986"},{"key":"e_1_3_2_1_24_1","doi-asserted-by":"publisher","DOI":"10.1145\/2807442.2807482"},{"key":"e_1_3_2_1_25_1","doi-asserted-by":"publisher","DOI":"10.1145\/2470654.2466235"},{"key":"e_1_3_2_1_26_1","doi-asserted-by":"publisher","DOI":"10.5555\/3495724.3496517"},{"key":"e_1_3_2_1_27_1","doi-asserted-by":"publisher","DOI":"10.1145\/3379337.3415820"},{"key":"e_1_3_2_1_28_1","doi-asserted-by":"publisher","DOI":"10.1109\/VLHCC.2018.8506506"},{"key":"e_1_3_2_1_29_1","volume-title":"AI Transparency in the Age of LLMs: A Human-Centered Research Roadmap. arXiv preprint arXiv:2306.01941","author":"Liao Q\u00a0Vera","year":"2023","unstructured":"Q\u00a0Vera Liao and Jennifer\u00a0Wortman Vaughan. 2023. AI Transparency in the Age of LLMs: A Human-Centered Research Roadmap. arXiv preprint arXiv:2306.01941 (2023)."},{"key":"e_1_3_2_1_30_1","volume-title":"Advances in Neural Information Processing Systems, S.\u00a0Koyejo, S.\u00a0Mohamed, A.\u00a0Agarwal, D.\u00a0Belgrave, K.\u00a0Cho, and A.\u00a0Oh (Eds.). Vol.\u00a035. Curran Associates","author":"Liu Haokun","year":"1950","unstructured":"Haokun Liu, Derek Tam, Mohammed Muqeeth, Jay Mohta, Tenghao Huang, Mohit Bansal, and Colin\u00a0A Raffel. 2022. Few-Shot Parameter-Efficient Fine-Tuning is Better and Cheaper than In-Context Learning. In Advances in Neural Information Processing Systems, S.\u00a0Koyejo, S.\u00a0Mohamed, A.\u00a0Agarwal, D.\u00a0Belgrave, K.\u00a0Cho, and A.\u00a0Oh (Eds.). Vol.\u00a035. Curran Associates, Inc., 1950\u20131965. https:\/\/proceedings.neurips.cc\/paper_files\/paper\/2022\/file\/0cde695b83bd186c1fd456302888454c-Paper-Conference.pdf"},{"key":"e_1_3_2_1_31_1","unstructured":"HARRY MCCRACKEN. 2023. Microsoft\u2019s Satya Nadella is winning Big Tech\u2019s AI War. here\u2019s how. https:\/\/www.fastcompany.com\/90931084\/satya-nadella-microsoft-ai-frontrunner"},{"key":"e_1_3_2_1_32_1","volume-title":"MTEB: Massive text embedding benchmark. arXiv preprint arXiv:2210.07316","author":"Muennighoff Niklas","year":"2022","unstructured":"Niklas Muennighoff, Nouamane Tazi, Lo\u00efc Magne, and Nils Reimers. 2022. MTEB: Massive text embedding benchmark. arXiv preprint arXiv:2210.07316 (2022)."},{"key":"e_1_3_2_1_33_1","unstructured":"Don Norman. 2013. The design of everyday things: Revised and expanded edition. Basic books."},{"key":"e_1_3_2_1_34_1","doi-asserted-by":"publisher","DOI":"10.1145\/1621995.1622014"},{"key":"e_1_3_2_1_35_1","doi-asserted-by":"publisher","DOI":"10.1145\/1456536.1456538"},{"key":"e_1_3_2_1_36_1","doi-asserted-by":"publisher","DOI":"10.1145\/1166324.1166329"},{"key":"e_1_3_2_1_37_1","unstructured":"Samir Passi and Mihaela Vorvoreanu. 2022. Overreliance on AI: Literature Review. Technical Report MSR-TR-2022-12. Microsoft. https:\/\/www.microsoft.com\/en-us\/research\/publication\/overreliance-on-ai-literature-review\/"},{"key":"e_1_3_2_1_38_1","volume-title":"Companion Encyclopedia of Psychology","author":"Raulin L","unstructured":"Michael\u00a0L Raulin and Anthony\u00a0M Graziano. 2019. Quasi-experiments and correlational studies. In Companion Encyclopedia of Psychology. Routledge, 1122\u20131141."},{"key":"e_1_3_2_1_39_1","doi-asserted-by":"publisher","DOI":"10.1145\/105783.105788"},{"key":"e_1_3_2_1_40_1","unstructured":"Ted Sanders. 2023. How to format inputs to CHATGPT models. https:\/\/cookbook.openai.com\/examples\/how_to_format_inputs_to_chatgpt_models"},{"key":"e_1_3_2_1_41_1","volume-title":"What is it like to program with artificial intelligence?arXiv preprint arXiv:2208.06213","author":"Sarkar Advait","year":"2022","unstructured":"Advait Sarkar, Andrew\u00a0D Gordon, Carina Negreanu, Christian Poelitz, Sruti\u00a0Srinivasa Ragavan, and Ben Zorn. 2022. What is it like to program with artificial intelligence?arXiv preprint arXiv:2208.06213 (2022)."},{"key":"e_1_3_2_1_42_1","unstructured":"Jessica Shieh. 2023. Best practices for prompt engineering with openai API: Openai help center. https:\/\/help.openai.com\/en\/articles\/6654000-best-practices-for-prompt-engineering-with-openai-api"},{"key":"e_1_3_2_1_43_1","unstructured":"Jared Spataro. 2023. Introducing Microsoft 365 copilot \u2013 your copilot for work. https:\/\/blogs.microsoft.com\/blog\/2023\/03\/16\/introducing-microsoft-365-copilot-your-copilot-for-work\/"},{"key":"e_1_3_2_1_44_1","unstructured":"Suhridpalsule. 2023. Prompt engineering techniques with Azure Openai - Azure Openai Service. https:\/\/learn.microsoft.com\/en-us\/azure\/ai-services\/openai\/concepts\/advanced-prompt-engineering?pivots=programming-language-chat-completions"},{"key":"e_1_3_2_1_45_1","doi-asserted-by":"publisher","DOI":"10.1145\/3490099.3511119"},{"key":"e_1_3_2_1_46_1","unstructured":"[46] Gradio Team. 2023. https:\/\/www.gradio.app\/"},{"key":"e_1_3_2_1_47_1","volume-title":"Is ChatGPT the Ultimate Programming Assistant\u2013How far is it?arXiv preprint arXiv:2304.11938","author":"Tian Haoye","year":"2023","unstructured":"Haoye Tian, Weiqi Lu, Tsz\u00a0On Li, Xunzhu Tang, Shing-Chi Cheung, Jacques Klein, and Tegawend\u00e9\u00a0F Bissyand\u00e9. 2023. Is ChatGPT the Ultimate Programming Assistant\u2013How far is it?arXiv preprint arXiv:2304.11938 (2023)."},{"key":"e_1_3_2_1_48_1","doi-asserted-by":"publisher","DOI":"10.1145\/3491101.3519665"},{"key":"e_1_3_2_1_49_1","doi-asserted-by":"publisher","DOI":"10.1145\/3579605"},{"key":"e_1_3_2_1_50_1","volume-title":"A prompt pattern catalog to enhance prompt engineering with chatgpt. arXiv preprint arXiv:2302.11382","author":"White Jules","year":"2023","unstructured":"Jules White, Quchen Fu, Sam Hays, Michael Sandborn, Carlos Olea, Henry Gilbert, Ashraf Elnashar, Jesse Spencer-Smith, and Douglas\u00a0C Schmidt. 2023. A prompt pattern catalog to enhance prompt engineering with chatgpt. arXiv preprint arXiv:2302.11382 (2023)."},{"key":"e_1_3_2_1_51_1","doi-asserted-by":"publisher","DOI":"10.1145\/3487569"},{"key":"e_1_3_2_1_52_1","doi-asserted-by":"publisher","DOI":"10.1145\/3544548.3581388"},{"key":"e_1_3_2_1_53_1","volume-title":"Instruction tuning for large language models: A survey. arXiv preprint arXiv:2308.10792","author":"Zhang Shengyu","year":"2023","unstructured":"Shengyu Zhang, Linfeng Dong, Xiaoya Li, Sen Zhang, Xiaofei Sun, Shuhe Wang, Jiwei Li, Runyi Hu, Tianwei Zhang, Fei Wu, 2023. Instruction tuning for large language models: A survey. arXiv preprint arXiv:2308.10792 (2023)."}],"event":{"name":"IUI '24: 29th International Conference on Intelligent User Interfaces","location":"Greenville SC USA","acronym":"IUI '24","sponsor":["SIGAI ACM Special Interest Group on Artificial Intelligence","SIGCHI ACM Special Interest Group on Computer-Human Interaction"]},"container-title":["Proceedings of the 29th International Conference on Intelligent User Interfaces"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3640543.3645200","content-type":"unspecified","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3640543.3645200","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,12,1]],"date-time":"2025-12-01T00:59:17Z","timestamp":1764550757000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3640543.3645200"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,3,18]]},"references-count":53,"alternative-id":["10.1145\/3640543.3645200","10.1145\/3640543"],"URL":"https:\/\/doi.org\/10.1145\/3640543.3645200","relation":{},"subject":[],"published":{"date-parts":[[2024,3,18]]},"assertion":[{"value":"2024-04-05","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}