{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,24]],"date-time":"2026-02-24T17:01:52Z","timestamp":1771952512362,"version":"3.50.1"},"publisher-location":"New York, NY, USA","reference-count":58,"publisher":"ACM","license":[{"start":{"date-parts":[[2024,5,11]],"date-time":"2024-05-11T00:00:00Z","timestamp":1715385600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.acm.org\/publications\/policies\/copyright_policy#Background"}],"funder":[{"name":"Washington NASA Space Grant Consortium"},{"DOI":"10.13039\/https:\/\/doi.org\/10.13039\/100008536","name":"Amazon Web Services","doi-asserted-by":"publisher","id":[{"id":"10.13039\/https:\/\/doi.org\/10.13039\/100008536","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/https:\/\/doi.org\/10.13039\/100006785","name":"Google","doi-asserted-by":"publisher","id":[{"id":"10.13039\/https:\/\/doi.org\/10.13039\/100006785","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2024,5,11]]},"DOI":"10.1145\/3613905.3650764","type":"proceedings-article","created":{"date-parts":[[2024,5,11]],"date-time":"2024-05-11T08:15:21Z","timestamp":1715415321000},"page":"1-9","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":20,"title":["Exploring and Characterizing Large Language Models for Embedded System Development and Debugging"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-6646-6466","authenticated-orcid":false,"given":"Zachary","family":"Englhardt","sequence":"first","affiliation":[{"name":"Paul G. Allen School of Computer Science &amp; Engineering, University of Washington, United States"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-0192-3371","authenticated-orcid":false,"given":"Richard","family":"Li","sequence":"additional","affiliation":[{"name":"Paul G. Allen School of Computer Science &amp; Engineering, University of Washington, United States"}]},{"ORCID":"https:\/\/orcid.org\/0009-0007-0649-9112","authenticated-orcid":false,"given":"Dilini","family":"Nissanka","sequence":"additional","affiliation":[{"name":"Paul G. Allen School of Computer Science &amp; Engineering, University of Washington, United States"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-7394-5409","authenticated-orcid":false,"given":"Zhihan","family":"Zhang","sequence":"additional","affiliation":[{"name":"Paul G. Allen School of Computer Science &amp; Engineering, University of Washington, United States"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-5647-3577","authenticated-orcid":false,"given":"Girish","family":"Narayanswamy","sequence":"additional","affiliation":[{"name":"Department of Electrical and Computer Engineering, University of Washington, United States"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-2795-7334","authenticated-orcid":false,"given":"Joseph","family":"Breda","sequence":"additional","affiliation":[{"name":"Paul G. Allen School of Computer Science &amp; Engineering, University of Washington, United States"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-9279-5386","authenticated-orcid":false,"given":"Xin","family":"Liu","sequence":"additional","affiliation":[{"name":"Paul G. Allen School of Computer Science &amp; Engineering, University of Washington, United States"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-6300-4389","authenticated-orcid":false,"given":"Shwetak","family":"Patel","sequence":"additional","affiliation":[{"name":"Paul G. Allen School of Computer Science &amp; Engineering, University of Washington, United States and Department of Electrical and Computer Engineering, University of Washington, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-3025-7953","authenticated-orcid":false,"given":"Vikram","family":"Iyer","sequence":"additional","affiliation":[{"name":"Paul G. Allen School of Computer Science &amp; Engineering, University of Washington, United States"}]}],"member":"320","published-online":{"date-parts":[[2024,5,11]]},"reference":[{"key":"e_1_3_3_3_1_1","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-540-74048-3_4"},{"key":"e_1_3_3_3_2_1","doi-asserted-by":"publisher","DOI":"10.1145\/3358711.3361630"},{"key":"e_1_3_3_3_3_1","volume-title":"International Workshop on Service Orientation in Holonic and Multi-Agent Manufacturing. Springer, 254\u2013266","author":"Brennan W","year":"2022","unstructured":"Robert\u00a0W Brennan and Jonathan Lesage. 2022. Exploring the Implications of Openai Codex on Education for Industry 4.0. In International Workshop on Service Orientation in Holonic and Multi-Agent Manufacturing. Springer, 254\u2013266."},{"key":"e_1_3_3_3_4_1","volume-title":"Language models are few-shot learners. Advances in neural information processing systems 33","author":"Brown Tom","year":"2020","unstructured":"Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared\u00a0D Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, 2020. Language models are few-shot learners. Advances in neural information processing systems 33 (2020), 1877\u20131901."},{"key":"e_1_3_3_3_5_1","volume-title":"Sparks of artificial general intelligence: Early experiments with gpt-4. arXiv preprint arXiv:2303.12712","author":"Bubeck S\u00e9bastien","year":"2023","unstructured":"S\u00e9bastien Bubeck, Varun Chandrasekaran, Ronen Eldan, Johannes Gehrke, Eric Horvitz, Ece Kamar, Peter Lee, Yin\u00a0Tat Lee, Yuanzhi Li, Scott Lundberg, 2023. Sparks of artificial general intelligence: Early experiments with gpt-4. arXiv preprint arXiv:2303.12712 (2023)."},{"key":"e_1_3_3_3_6_1","volume-title":"Improving code generation by training with natural language feedback. arXiv preprint arXiv:2303.16749","author":"Chen Angelica","year":"2023","unstructured":"Angelica Chen, J\u00e9r\u00e9my Scheurer, Tomasz Korbak, Jon\u00a0Ander Campos, Jun\u00a0Shern Chan, Samuel\u00a0R Bowman, Kyunghyun Cho, and Ethan Perez. 2023. Improving code generation by training with natural language feedback. arXiv preprint arXiv:2303.16749 (2023)."},{"key":"e_1_3_3_3_7_1","volume-title":"Jared Kaplan, Harri Edwards, Yuri Burda","author":"Chen Mark","year":"2021","unstructured":"Mark Chen, Jerry Tworek, Heewoo Jun, Qiming Yuan, Henrique Ponde de\u00a0Oliveira Pinto, Jared Kaplan, Harri Edwards, Yuri Burda, Nicholas Joseph, Greg Brockman, 2021. Evaluating large language models trained on code. arXiv preprint arXiv:2107.03374 (2021)."},{"key":"e_1_3_3_3_8_1","volume-title":"Teaching large language models to self-debug. arXiv preprint arXiv:2304.05128","author":"Chen Xinyun","year":"2023","unstructured":"Xinyun Chen, Maxwell Lin, Nathanael Sch\u00e4rli, and Denny Zhou. 2023. Teaching large language models to self-debug. arXiv preprint arXiv:2304.05128 (2023)."},{"key":"e_1_3_3_3_9_1","volume-title":"Palm: Scaling language modeling with pathways. arXiv preprint arXiv:2204.02311","author":"Chowdhery Aakanksha","year":"2022","unstructured":"Aakanksha Chowdhery, Sharan Narang, Jacob Devlin, Maarten Bosma, Gaurav Mishra, Adam Roberts, Paul Barham, Hyung\u00a0Won Chung, Charles Sutton, Sebastian Gehrmann, 2022. Palm: Scaling language modeling with pathways. arXiv preprint arXiv:2204.02311 (2022)."},{"key":"e_1_3_3_3_10_1","doi-asserted-by":"publisher","DOI":"10.1145\/3299710.3211335"},{"key":"e_1_3_3_3_11_1","doi-asserted-by":"publisher","DOI":"10.1016\/j.sysarc.2019.05.005"},{"key":"e_1_3_3_3_12_1","volume-title":"International conference on machine learning. PMLR, 990\u2013998","author":"Devlin Jacob","year":"2017","unstructured":"Jacob Devlin, Jonathan Uesato, Surya Bhupatiraju, Rishabh Singh, Abdel-rahman Mohamed, and Pushmeet Kohli. 2017. Robustfill: Neural program learning under noisy i\/o. In International conference on machine learning. PMLR, 990\u2013998."},{"key":"e_1_3_3_3_13_1","unstructured":"Yihong Dong Xue Jiang Zhi Jin and Ge Li. 2023. Self-collaboration Code Generation via ChatGPT. arxiv:2304.07590\u00a0[cs.SE]"},{"key":"e_1_3_3_3_14_1","volume-title":"The SPACE of Developer Productivity: There\u2019s more to it than you think.Queue 19, 1","author":"Forsgren Nicole","year":"2021","unstructured":"Nicole Forsgren, Margaret-Anne Storey, Chandra Maddila, Thomas Zimmermann, Brian Houck, and Jenna Butler. 2021. The SPACE of Developer Productivity: There\u2019s more to it than you think.Queue 19, 1 (2021), 20\u201348."},{"key":"e_1_3_3_3_15_1","doi-asserted-by":"crossref","unstructured":"Rohit Girdhar Alaaeldin El-Nouby Zhuang Liu Mannat Singh Kalyan\u00a0Vasudev Alwala Armand Joulin and Ishan Misra. 2023. ImageBind: One Embedding Space To Bind Them All. arxiv:2305.05665\u00a0[cs.CV]","DOI":"10.1109\/CVPR52729.2023.01457"},{"key":"e_1_3_3_3_16_1","unstructured":"Suriya Gunasekar Yi Zhang Jyoti Aneja Caio C\u00e9sar\u00a0Teodoro Mendes Allie\u00a0Del Giorno Sivakanth Gopi Mojan Javaheripi Piero Kauffmann Gustavo de Rosa Olli Saarikivi Adil Salim Shital Shah Harkirat\u00a0Singh Behl Xin Wang S\u00e9bastien Bubeck Ronen Eldan Adam\u00a0Tauman Kalai Yin\u00a0Tat Lee and Yuanzhi Li. 2023. Textbooks Are All You Need. arxiv:2306.11644\u00a0[cs.CL]"},{"key":"e_1_3_3_3_17_1","doi-asserted-by":"publisher","DOI":"10.1145\/1369396.1369402"},{"key":"e_1_3_3_3_18_1","unstructured":"Sandra\u00a0G Hart. 1986. NASA task load index (TLX). (1986)."},{"key":"e_1_3_3_3_19_1","doi-asserted-by":"publisher","DOI":"10.1145\/3510003.3510203"},{"key":"e_1_3_3_3_20_1","unstructured":"Kimmo Karvinen 2019. Lowering barriers on embedded system design-Turning innovations into prototypes. (2019)."},{"key":"e_1_3_3_3_21_1","volume-title":"Language models can solve computer tasks. arXiv preprint arXiv:2303.17491","author":"Kim Geunwoo","year":"2023","unstructured":"Geunwoo Kim, Pierre Baldi, and Stephen McAleer. 2023. Language models can solve computer tasks. arXiv preprint arXiv:2303.17491 (2023)."},{"key":"e_1_3_3_3_22_1","volume-title":"d.]. Large language models are zero-shot reasoners","author":"Kojima Takeshi","year":"2022","unstructured":"Takeshi Kojima, Shixiang\u00a0Shane Gu, Machel Reid, Yutaka Matsuo, and Yusuke Iwasawa. [n. d.]. Large language models are zero-shot reasoners, 2022. URL https:\/\/arxiv. org\/abs\/2205.11916 ([n. d.])."},{"key":"e_1_3_3_3_23_1","doi-asserted-by":"publisher","DOI":"10.1145\/1086519.1086522"},{"key":"e_1_3_3_3_24_1","volume-title":"Interactive Code Generation via Test-Driven User-Intent Formalization. arXiv preprint arXiv:2208.05950","author":"Lahiri K","year":"2022","unstructured":"Shuvendu\u00a0K Lahiri, Aaditya Naik, Georgios Sakkas, Piali Choudhury, Curtis von Veh, Madanlal Musuvathi, Jeevana\u00a0Priya Inala, Chenglong Wang, and Jianfeng Gao. 2022. Interactive Code Generation via Test-Driven User-Intent Formalization. arXiv preprint arXiv:2208.05950 (2022)."},{"key":"e_1_3_3_3_25_1","volume-title":"Introduction to embedded systems. A cyber-physical systems approach 499","author":"Lee A","year":"2011","unstructured":"Edward\u00a0A Lee, Sanjit\u00a0A Seshia, 2011. Introduction to embedded systems. A cyber-physical systems approach 499 (2011)."},{"key":"e_1_3_3_3_26_1","volume-title":"Is your code generated by chatgpt really correct? rigorous evaluation of large language models for code generation. arXiv preprint arXiv:2305.01210","author":"Liu Jiawei","year":"2023","unstructured":"Jiawei Liu, Chunqiu\u00a0Steven Xia, Yuyao Wang, and Lingming Zhang. 2023. Is your code generated by chatgpt really correct? rigorous evaluation of large language models for code generation. arXiv preprint arXiv:2305.01210 (2023)."},{"key":"e_1_3_3_3_27_1","volume-title":"ChipNeMo: Domain-Adapted LLMs for Chip Design. arXiv preprint arXiv:2311.00176","author":"Liu Mingjie","year":"2023","unstructured":"Mingjie Liu, Teo Ene, Robert Kirby, Chris Cheng, Nathaniel Pinckney, Rongjian Liang, Jonah Alben, Himyanshu Anand, Sanmitra Banerjee, Ismet Bayraktaroglu, 2023. ChipNeMo: Domain-Adapted LLMs for Chip Design. arXiv preprint arXiv:2311.00176 (2023)."},{"key":"e_1_3_3_3_28_1","volume-title":"Large Language Models are Few-Shot Health Learners. arXiv preprint arXiv:2305.15525","author":"Liu Xin","year":"2023","unstructured":"Xin Liu, Daniel McDuff, Geza Kovacs, Isaac Galatzer-Levy, Jacob Sunshine, Jiening Zhan, Ming-Zher Poh, Shun Liao, Paolo Di\u00a0Achille, and Shwetak Patel. 2023. Large Language Models are Few-Shot Health Learners. arXiv preprint arXiv:2305.15525 (2023)."},{"key":"e_1_3_3_3_29_1","volume-title":"2016 International conference on field-programmable technology (FPT). IEEE, 61\u201368","author":"Liu Zhiqiang","year":"2016","unstructured":"Zhiqiang Liu, Yong Dou, Jingfei Jiang, and Jinwei Xu. 2016. Automatic code generation of convolutional neural networks in FPGA implementation. In 2016 International conference on field-programmable technology (FPT). IEEE, 61\u201368."},{"key":"e_1_3_3_3_30_1","doi-asserted-by":"publisher","DOI":"10.1109\/INDIN.2010.5549590"},{"key":"e_1_3_3_3_31_1","doi-asserted-by":"publisher","DOI":"10.1145\/3524842.3528470"},{"key":"e_1_3_3_3_32_1","doi-asserted-by":"publisher","DOI":"10.1145\/3524842.3528470"},{"key":"e_1_3_3_3_33_1","volume-title":"Codegen2: Lessons for training llms on programming and natural languages. arXiv preprint arXiv:2305.02309","author":"Nijkamp Erik","year":"2023","unstructured":"Erik Nijkamp, Hiroaki Hayashi, Caiming Xiong, Silvio Savarese, and Yingbo Zhou. 2023. Codegen2: Lessons for training llms on programming and natural languages. arXiv preprint arXiv:2305.02309 (2023)."},{"key":"e_1_3_3_3_34_1","volume-title":"Codegen: An open large language model for code with multi-turn program synthesis. arXiv preprint arXiv:2203.13474","author":"Nijkamp Erik","year":"2022","unstructured":"Erik Nijkamp, Bo Pang, Hiroaki Hayashi, Lifu Tu, Huan Wang, Yingbo Zhou, Silvio Savarese, and Caiming Xiong. 2022. Codegen: An open large language model for code with multi-turn program synthesis. arXiv preprint arXiv:2203.13474 (2022)."},{"key":"e_1_3_3_3_35_1","unstructured":"Theo\u00a0X. Olausson Jeevana\u00a0Priya Inala Chenglong Wang Jianfeng Gao and Armando Solar-Lezama. 2023. Demystifying GPT Self-Repair for Code Generation. arxiv:2306.09896\u00a0[cs.CL]"},{"key":"e_1_3_3_3_37_1","volume-title":"Proceedings of the Great Lakes Symposium on VLSI","author":"Pasricha Sudeep","year":"2022","unstructured":"Sudeep Pasricha. 2022. Embedded Systems Education in the 2020s: Challenges, Reflections, and Future Directions. In Proceedings of the Great Lakes Symposium on VLSI 2022. 519\u2013524."},{"key":"e_1_3_3_3_38_1","volume-title":"Synchromesh: Reliable code generation from pre-trained language models. arXiv preprint arXiv:2201.11227","author":"Poesia Gabriel","year":"2022","unstructured":"Gabriel Poesia, Oleksandr Polozov, Vu Le, Ashish Tiwari, Gustavo Soares, Christopher Meek, and Sumit Gulwani. 2022. Synchromesh: Reliable code generation from pre-trained language models. arXiv preprint arXiv:2201.11227 (2022)."},{"key":"e_1_3_3_3_39_1","doi-asserted-by":"publisher","DOI":"10.1145\/3617367"},{"key":"e_1_3_3_3_40_1","unstructured":"Puneeth. 2023. I tried ChatGPT for Arduino - It\u2019s Surprising. https:\/\/blog.wokwi.com\/learn-arduino-using-ai-chatgpt\/"},{"key":"e_1_3_3_3_41_1","doi-asserted-by":"publisher","DOI":"10.1021\/acs.est.3c01106"},{"key":"e_1_3_3_3_42_1","unstructured":"CJS Robotics. 2023. Duino Code Generator. https:\/\/www.duinocodegenerator.com\/"},{"key":"e_1_3_3_3_43_1","volume-title":"Reflexion: an autonomous agent with dynamic memory and self-reflection. arXiv preprint arXiv:2303.11366","author":"Shinn Noah","year":"2023","unstructured":"Noah Shinn, Beck Labash, and Ashwin Gopinath. 2023. Reflexion: an autonomous agent with dynamic memory and self-reflection. arXiv preprint arXiv:2303.11366 (2023)."},{"key":"e_1_3_3_3_44_1","volume-title":"Towards expert-level medical question answering with large language models. arXiv preprint arXiv:2305.09617","author":"Singhal Karan","year":"2023","unstructured":"Karan Singhal, Tao Tu, Juraj Gottweis, Rory Sayres, Ellery Wulczyn, Le Hou, Kevin Clark, Stephen Pfohl, Heather Cole-Lewis, Darlene Neal, 2023. Towards expert-level medical question answering with large language models. arXiv preprint arXiv:2305.09617 (2023)."},{"key":"e_1_3_3_3_45_1","volume-title":"Llama: Open and efficient foundation language models. arXiv preprint arXiv:2302.13971","author":"Touvron Hugo","year":"2023","unstructured":"Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timoth\u00e9e Lacroix, Baptiste Rozi\u00e8re, Naman Goyal, Eric Hambro, Faisal Azhar, 2023. Llama: Open and efficient foundation language models. arXiv preprint arXiv:2302.13971 (2023)."},{"key":"e_1_3_3_3_46_1","volume-title":"Attention is all you need. Advances in neural information processing systems 30","author":"Vaswani Ashish","year":"2017","unstructured":"Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan\u00a0N Gomez, \u0141ukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. Advances in neural information processing systems 30 (2017)."},{"key":"e_1_3_3_3_47_1","volume-title":"Superglue: A stickier benchmark for general-purpose language understanding systems. Advances in neural information processing systems 32","author":"Wang Alex","year":"2019","unstructured":"Alex Wang, Yada Pruksachatkun, Nikita Nangia, Amanpreet Singh, Julian Michael, Felix Hill, Omer Levy, and Samuel Bowman. 2019. Superglue: A stickier benchmark for general-purpose language understanding systems. Advances in neural information processing systems 32 (2019)."},{"key":"e_1_3_3_3_48_1","volume-title":"Aakanksha Chowdhery, and Denny Zhou.","author":"Wang Xuezhi","year":"2022","unstructured":"Xuezhi Wang, Jason Wei, Dale Schuurmans, Quoc Le, Ed Chi, Sharan Narang, Aakanksha Chowdhery, and Denny Zhou. 2022. Self-consistency improves chain of thought reasoning in language models. arXiv preprint arXiv:2203.11171 (2022)."},{"key":"e_1_3_3_3_49_1","volume-title":"Chi, Quoc Le, and Denny Zhou","author":"Wei Jason","year":"2022","unstructured":"Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Ed Chi, Quoc Le, and Denny Zhou. 2022. Chain of thought prompting elicits reasoning in large language models. arXiv preprint arXiv:2201.11903 (2022)."},{"key":"e_1_3_3_3_50_1","volume-title":"A prompt pattern catalog to enhance prompt engineering with chatgpt. arXiv preprint arXiv:2302.11382","author":"White Jules","year":"2023","unstructured":"Jules White, Quchen Fu, Sam Hays, Michael Sandborn, Carlos Olea, Henry Gilbert, Ashraf Elnashar, Jesse Spencer-Smith, and Douglas\u00a0C Schmidt. 2023. A prompt pattern catalog to enhance prompt engineering with chatgpt. arXiv preprint arXiv:2302.11382 (2023)."},{"key":"e_1_3_3_3_51_1","volume-title":"High-performance embedded computing: architectures, applications, and methodologies","author":"Wolf Wayne","unstructured":"Wayne Wolf. 2010. High-performance embedded computing: architectures, applications, and methodologies. Elsevier."},{"key":"e_1_3_3_3_52_1","volume-title":"Workshop.","year":"2023","unstructured":"DroneBot Workshop. 2023. Using ChatGPT to Write Code for Arduino and ESP32. https:\/\/dronebotworkshop.com\/chatgpt\/"},{"key":"e_1_3_3_3_53_1","volume-title":"Bloomberggpt: A large language model for finance. arXiv preprint arXiv:2303.17564","author":"Wu Shijie","year":"2023","unstructured":"Shijie Wu, Ozan Irsoy, Steven Lu, Vadim Dabravolski, Mark Dredze, Sebastian Gehrmann, Prabhanjan Kambadur, David Rosenberg, and Gideon Mann. 2023. Bloomberggpt: A large language model for finance. arXiv preprint arXiv:2303.17564 (2023)."},{"key":"e_1_3_3_3_54_1","doi-asserted-by":"publisher","DOI":"10.1145\/3520312.3534862"},{"key":"e_1_3_3_3_55_1","volume-title":"Tree of thoughts: Deliberate problem solving with large language models. arXiv preprint arXiv:2305.10601","author":"Yao Shunyu","year":"2023","unstructured":"Shunyu Yao, Dian Yu, Jeffrey Zhao, Izhak Shafran, Thomas\u00a0L Griffiths, Yuan Cao, and Karthik Narasimhan. 2023. Tree of thoughts: Deliberate problem solving with large language models. arXiv preprint arXiv:2305.10601 (2023)."},{"key":"e_1_3_3_3_56_1","volume-title":"React: Synergizing reasoning and acting in language models. arXiv preprint arXiv:2210.03629","author":"Yao Shunyu","year":"2022","unstructured":"Shunyu Yao, Jeffrey Zhao, Dian Yu, Nan Du, Izhak Shafran, Karthik Narasimhan, and Yuan Cao. 2022. React: Synergizing reasoning and acting in language models. arXiv preprint arXiv:2210.03629 (2022)."},{"key":"e_1_3_3_3_57_1","doi-asserted-by":"crossref","unstructured":"Hang Zhang Xin Li and Lidong Bing. 2023. Video-LLaMA: An Instruction-tuned Audio-Visual Language Model for Video Understanding. arxiv:2306.02858\u00a0[cs.CL]","DOI":"10.18653\/v1\/2023.emnlp-demo.49"},{"key":"e_1_3_3_3_58_1","volume-title":"Large language models are human-level prompt engineers. arXiv preprint arXiv:2211.01910","author":"Zhou Yongchao","year":"2022","unstructured":"Yongchao Zhou, Andrei\u00a0Ioan Muresanu, Ziwen Han, Keiran Paster, Silviu Pitis, Harris Chan, and Jimmy Ba. 2022. Large language models are human-level prompt engineers. arXiv preprint arXiv:2211.01910 (2022)."},{"key":"e_1_3_3_3_59_1","doi-asserted-by":"publisher","DOI":"10.1145\/3520312.3534864"}],"event":{"name":"CHI '24: CHI Conference on Human Factors in Computing Systems","location":"Honolulu HI USA","acronym":"CHI '24","sponsor":["SIGCHI ACM Special Interest Group on Computer-Human Interaction","SIGACCESS ACM Special Interest Group on Accessible Computing"]},"container-title":["Extended Abstracts of the CHI Conference on Human Factors in Computing Systems"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3613905.3650764","content-type":"unspecified","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3613905.3650764","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,6,18]],"date-time":"2025-06-18T23:44:16Z","timestamp":1750290256000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3613905.3650764"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,5,11]]},"references-count":58,"alternative-id":["10.1145\/3613905.3650764","10.1145\/3613905"],"URL":"https:\/\/doi.org\/10.1145\/3613905.3650764","relation":{},"subject":[],"published":{"date-parts":[[2024,5,11]]},"assertion":[{"value":"2024-05-11","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}