{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,14]],"date-time":"2026-03-14T20:30:11Z","timestamp":1773520211716,"version":"3.50.1"},"publisher-location":"New York, NY, USA","reference-count":11,"publisher":"ACM","content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2026,2,18]]},"DOI":"10.1145\/3770761.3777269","type":"proceedings-article","created":{"date-parts":[[2026,2,13]],"date-time":"2026-02-13T15:18:26Z","timestamp":1770995906000},"page":"1589-1590","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":0,"title":["Integrating Large Language Models with Cybersecurity Education"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0000-0001-9015-0333","authenticated-orcid":false,"given":"Wei","family":"Yan","sequence":"first","affiliation":[{"name":"Northern Arizona University, Flagstaff, AZ, USA"}]},{"ORCID":"https:\/\/orcid.org\/0009-0002-1564-8400","authenticated-orcid":false,"given":"Soumiki","family":"Chattopadhyay","sequence":"additional","affiliation":[{"name":"Northern Arizona University, Flagstaff, AZ, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-3964-8034","authenticated-orcid":false,"given":"Lan","family":"Zhang","sequence":"additional","affiliation":[{"name":"Northern Arizona University, Flagstaff, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-3722-0720","authenticated-orcid":false,"given":"Ashish","family":"Amresh","sequence":"additional","affiliation":[{"name":"Northern Arizona University, Flagstaff, AZ, USA"}]}],"member":"320","published-online":{"date-parts":[[2026,2,17]]},"reference":[{"key":"e_1_3_2_1_1_1","volume-title":"Striving for effective cyber workforce development","author":"Baker Marie","year":"2016","unstructured":"Marie Baker. 2016. Striving for effective cyber workforce development. Software Engineering Institute (2016), 1-26."},{"key":"e_1_3_2_1_2_1","doi-asserted-by":"publisher","DOI":"10.1016\/j.caeai.2023.100172"},{"key":"e_1_3_2_1_3_1","unstructured":"Tom Brown Benjamin Mann Nick Ryder Melanie Subbiah Jared D Kaplan Prafulla Dhariwal Arvind Neelakantan Pranav Shyam Girish Sastry Amanda Askell et al. 2020. Language models are few-shot learners. Advances in neural information processing systems 33 (2020) 1877-1901."},{"key":"e_1_3_2_1_4_1","volume-title":"Application of Large Language Models in Cybersecurity: A Systematic Literature Review","author":"Hasanov Ismayil","year":"2024","unstructured":"Ismayil Hasanov, Seppo Virtanen, Antti Hakkala, and Jouni Isoaho. 2024. Application of Large Language Models in Cybersecurity: A Systematic Literature Review. IEEE Access (2024)."},{"key":"e_1_3_2_1_5_1","volume-title":"Technology Acceptance Model: A Review","author":"Marikyan Davit","unstructured":"Davit Marikyan and Savvas Papagiannidis. 2025. Technology Acceptance Model: A Review. In TheoryHub Book, Savvas Papagiannidis (Ed.). TheoryHub, Newcastle, UK. https:\/\/open.ncl.ac.uk\/"},{"key":"e_1_3_2_1_6_1","volume-title":"Informabon Security Analysts. https:\/\/www.bls.gov\/ooh\/computer-and-informasontechnology\/informason-security-analysts.htm","author":"U.S. Bureau of Labor Stassscs. 2024.","year":"2024","unstructured":"U.S. Bureau of Labor Stassscs. 2024. Informabon Security Analysts. https:\/\/www.bls.gov\/ooh\/computer-and-informasontechnology\/informason-security-analysts.htm (2024)."},{"key":"e_1_3_2_1_7_1","volume-title":"Cybersecurity Recruitment in the US: 5 Key Trends Shaping","author":"Entrepreneur Recruitment","year":"2025","unstructured":"Recruitment Entrepreneur. 2024. Cybersecurity Recruitment in the US: 5 Key Trends Shaping 2025. https:\/\/www.linkedin.com\/pulse\/cybersecurity-recruitment-us-5-key-trends-shaping-jyvdf\/. Accessed: 2025-12-01."},{"key":"e_1_3_2_1_8_1","unstructured":"Software Engineering Institute. 2025. Reverse Engineering for Malware Analysis. https:\/\/www.sei.cmu.edu\/reverse-engineering-for-malware-analysis\/. Accessed: 2025-12-01."},{"key":"e_1_3_2_1_9_1","volume-title":"Self-instruct: Aligning language mod- els with self-generated instructions. arXiv preprint arXiv:2212.10560","author":"Wang Yizhong","year":"2022","unstructured":"Yizhong Wang, Yeganeh Kordi, Swaroop Mishra, Alisa Liu, Noah A Smith, Daniel Khashabi, and Hannaneh Hajishirzi. 2022. Self-instruct: Aligning language mod- els with self-generated instructions. arXiv preprint arXiv:2212.10560 (2022)."},{"key":"e_1_3_2_1_10_1","doi-asserted-by":"publisher","DOI":"10.1016\/j.chbah.2024.100072"},{"key":"e_1_3_2_1_11_1","volume-title":"International conference on machine learning. PMLR, 12697-12706","author":"Zhao Zihao","year":"2021","unstructured":"Zihao Zhao, Eric Wallace, Shi Feng, Dan Klein, and Sameer Singh. 2021. Calibrate before use: Improving few-shot performance of language models. In International conference on machine learning. PMLR, 12697-12706."}],"event":{"name":"SIGCSE TS 2026:The 57th ACM Technical Symposium on Computer Science Education","location":"St. Louis MO USA","sponsor":["SIGCSE ACM Special Interest Group on Computer Science Education"]},"container-title":["Proceedings of the 57th ACM Technical Symposium on Computer Science Education V.2"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3770761.3777269","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,3,14]],"date-time":"2026-03-14T17:19:14Z","timestamp":1773508754000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3770761.3777269"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2026,2,17]]},"references-count":11,"alternative-id":["10.1145\/3770761.3777269","10.1145\/3770761"],"URL":"https:\/\/doi.org\/10.1145\/3770761.3777269","relation":{},"subject":[],"published":{"date-parts":[[2026,2,17]]},"assertion":[{"value":"2026-02-17","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}