{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,19]],"date-time":"2026-02-19T05:33:30Z","timestamp":1771479210112,"version":"3.50.1"},"publisher-location":"New York, NY, USA","reference-count":34,"publisher":"ACM","license":[{"start":{"date-parts":[[2023,4,30]],"date-time":"2023-04-30T00:00:00Z","timestamp":1682812800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.acm.org\/publications\/policies\/copyright_policy#Background"}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2023,4,30]]},"DOI":"10.1145\/3543873.3587655","type":"proceedings-article","created":{"date-parts":[[2023,4,28]],"date-time":"2023-04-28T11:36:14Z","timestamp":1682681774000},"page":"1145-1149","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":10,"title":["Decoding Prompt Syntax: Analysing its Impact on Knowledge Retrieval in Large Language Models"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0009-0009-6955-2368","authenticated-orcid":false,"given":"Stephan","family":"Linzbach","sequence":"first","affiliation":[{"name":"GESIS Leibniz Institut f\u00fcr Sozialwissenschaften, Germany"}]},{"ORCID":"https:\/\/orcid.org\/0009-0000-0167-9201","authenticated-orcid":false,"given":"Tim","family":"Tressel","sequence":"additional","affiliation":[{"name":"Heinrich Heine University, Germany"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-9691-5990","authenticated-orcid":false,"given":"Laura","family":"Kallmeyer","sequence":"additional","affiliation":[{"name":"Heinrich Heine University, Germany"}]},{"ORCID":"https:\/\/orcid.org\/0009-0001-4364-9243","authenticated-orcid":false,"given":"Stefan","family":"Dietze","sequence":"additional","affiliation":[{"name":"GESIS Leibniz Institute for Social Sciences, Germany and Heinrich Heine University, Germany"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-1476-2121","authenticated-orcid":false,"given":"Hajira","family":"Jabeen","sequence":"additional","affiliation":[{"name":"GESIS Leibniz Institute for Social Sciences, Germany"}]}],"member":"320","published-online":{"date-parts":[[2023,4,30]]},"reference":[{"key":"e_1_3_2_1_1_1","volume-title":"What does bert look at? an analysis of bert\u2019s attention. arXiv preprint arXiv:1906.04341","author":"Clark Kevin","year":"2019","unstructured":"Kevin Clark, Urvashi Khandelwal, Omer Levy, and Christopher\u00a0D Manning. 2019. What does bert look at? an analysis of bert\u2019s attention. arXiv preprint arXiv:1906.04341 (2019)."},{"key":"e_1_3_2_1_2_1","volume-title":"Bert: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805","author":"Devlin Jacob","year":"2018","unstructured":"Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2018. Bert: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805 (2018)."},{"key":"e_1_3_2_1_3_1","doi-asserted-by":"publisher","DOI":"10.1162\/tacl_a_00359"},{"key":"e_1_3_2_1_4_1","volume-title":"Proceedings of the Eleventh International Conference on Language Resources and Evaluation (LREC","author":"Elsahar Hady","year":"2018","unstructured":"Hady Elsahar, Pavlos Vougiouklis, Arslen Remaci, Christophe Gravier, Jonathon Hare, Frederique Laforest, and Elena Simperl. 2018. T-rex: A large scale alignment of natural language with knowledge base triples. In Proceedings of the Eleventh International Conference on Language Resources and Evaluation (LREC 2018)."},{"key":"e_1_3_2_1_5_1","volume-title":"Entities as experts: Sparse memory access with entity supervision. arXiv preprint arXiv:2004.07202","author":"F\u00e9vry Thibault","year":"2020","unstructured":"Thibault F\u00e9vry, Livio\u00a0Baldini Soares, Nicholas FitzGerald, Eunsol Choi, and Tom Kwiatkowski. 2020. Entities as experts: Sparse memory access with entity supervision. arXiv preprint arXiv:2004.07202 (2020)."},{"key":"e_1_3_2_1_6_1","volume-title":"KMIR: A Benchmark for Evaluating Knowledge Memorization, Identification and Reasoning Abilities of Language Models. arXiv preprint arXiv:2202.13529","author":"Gao Daniel","year":"2022","unstructured":"Daniel Gao, Yantao Jia, Lei Li, Chengzhen Fu, Zhicheng Dou, Hao Jiang, Xinyu Zhang, Lei Chen, and Zhao Cao. 2022. KMIR: A Benchmark for Evaluating Knowledge Memorization, Identification and Reasoning Abilities of Language Models. arXiv preprint arXiv:2202.13529 (2022)."},{"key":"e_1_3_2_1_7_1","unstructured":"Yoav Goldberg. 2019. Assessing BERT\u2019s Syntactic Abilities. arxiv:1901.05287\u00a0[cs.CL]"},{"key":"e_1_3_2_1_8_1","volume-title":"Language models as knowledge bases: On entity representations, storage capacity, and paraphrased queries. arXiv preprint arXiv:2008.09036","author":"Heinzerling Benjamin","year":"2020","unstructured":"Benjamin Heinzerling and Kentaro Inui. 2020. Language models as knowledge bases: On entity representations, storage capacity, and paraphrased queries. arXiv preprint arXiv:2008.09036 (2020)."},{"key":"e_1_3_2_1_9_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.acl-main.158"},{"key":"e_1_3_2_1_10_1","volume-title":"Introduction to the Grammar of English","author":"Huddleston Rodney","unstructured":"Rodney Huddleston. 1984. Introduction to the Grammar of English. Cambridge University Press."},{"key":"e_1_3_2_1_11_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/P19-1356"},{"key":"e_1_3_2_1_12_1","volume-title":"How can we know what language models know?Transactions of the Association for Computational Linguistics 8","author":"Jiang Zhengbao","year":"2020","unstructured":"Zhengbao Jiang, Frank\u00a0F Xu, Jun Araki, and Graham Neubig. 2020. How can we know what language models know?Transactions of the Association for Computational Linguistics 8 (2020), 423\u2013438."},{"key":"e_1_3_2_1_13_1","volume-title":"Proceedings of the Conference on Automated Knowledge Base Construction.","author":"Kalo Jan-Christoph","year":"2022","unstructured":"Jan-Christoph Kalo and Leandra Fichtel. 2022. KAMEL: Knowledge Analysis with Multitoken Entities in Language Models. In Proceedings of the Conference on Automated Knowledge Base Construction."},{"key":"e_1_3_2_1_14_1","volume-title":"Are Pretrained Language Models Symbolic Reasoners Over Knowledge?arXiv preprint arXiv:2006.10413","author":"Kassner Nora","year":"2020","unstructured":"Nora Kassner, Benno Krojer, and Hinrich Sch\u00fctze. 2020. Are Pretrained Language Models Symbolic Reasoners Over Knowledge?arXiv preprint arXiv:2006.10413 (2020)."},{"key":"e_1_3_2_1_15_1","volume-title":"Proceedings of the 24th Conference on Computational Natural Language Learning. CoRR.","author":"Kassner Nora","year":"2020","unstructured":"Nora Kassner, Benno Krojer, and Hinrich Sch\u00fctze. 2020. Pre-trained Language Models as Symbolic Reasoners over Knowledge?, In Proceedings of the 24th Conference on Computational Natural Language Learning. CoRR."},{"key":"e_1_3_2_1_16_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.naacl-main.376"},{"key":"e_1_3_2_1_17_1","volume-title":"SentiLARE: Sentiment-aware language representation learning with linguistic knowledge. arXiv preprint arXiv:1911.02493","author":"Ke Pei","year":"2019","unstructured":"Pei Ke, Haozhe Ji, Siyang Liu, Xiaoyan Zhu, and Minlie Huang. 2019. SentiLARE: Sentiment-aware language representation learning with linguistic knowledge. arXiv preprint arXiv:1911.02493 (2019)."},{"key":"e_1_3_2_1_18_1","doi-asserted-by":"crossref","unstructured":"Anne Lauscher Ivan Vuli\u0107 Edoardo\u00a0Maria Ponti Anna Korhonen and Goran Glava\u0161. 2019. Specializing Unsupervised Pretraining Models for Word-Level Semantic Similarity. arxiv:1909.02339\u00a0[cs.CL]","DOI":"10.18653\/v1\/2020.coling-main.118"},{"key":"e_1_3_2_1_19_1","volume-title":"Sensebert: Driving some sense into bert. arXiv preprint arXiv:1908.05646","author":"Levine Yoav","year":"2019","unstructured":"Yoav Levine, Barak Lenz, Or Dagan, Ori Ram, Dan Padnos, Or Sharir, Shai Shalev-Shwartz, Amnon Shashua, and Yoav Shoham. 2019. Sensebert: Driving some sense into bert. arXiv preprint arXiv:1908.05646 (2019)."},{"key":"e_1_3_2_1_20_1","volume-title":"Multi-task deep neural networks for natural language understanding. arXiv preprint arXiv:1901.11504","author":"Liu Xiaodong","year":"2019","unstructured":"Xiaodong Liu, Pengcheng He, Weizhu Chen, and Jianfeng Gao. 2019. Multi-task deep neural networks for natural language understanding. arXiv preprint arXiv:1901.11504 (2019)."},{"key":"e_1_3_2_1_21_1","volume-title":"Knowledge enhanced contextual word representations. arXiv preprint arXiv:1909.04164","author":"Peters E","year":"2019","unstructured":"Matthew\u00a0E Peters, Mark Neumann, Robert\u00a0L Logan\u00a0IV, Roy Schwartz, Vidur Joshi, Sameer Singh, and Noah\u00a0A Smith. 2019. Knowledge enhanced contextual word representations. arXiv preprint arXiv:1909.04164 (2019)."},{"key":"e_1_3_2_1_22_1","unstructured":"Fabio Petroni Patrick Lewis Aleksandra Piktus Tim Rockt\u00e4schel Yuxiang Wu Alexander\u00a0H. Miller and Sebastian Riedel. 2020. How Context Affects Language Models\u2019 Factual Predictions. arxiv:2005.04611\u00a0[cs.CL]"},{"key":"e_1_3_2_1_23_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/D19-1250"},{"key":"e_1_3_2_1_24_1","volume-title":"How Much Knowledge Can You Pack Into the Parameters of a Language Model?arXiv preprint arXiv:2002.08910","author":"Roberts Adam","year":"2020","unstructured":"Adam Roberts, Colin Raffel, and Noam Shazeer. 2020. How Much Knowledge Can You Pack Into the Parameters of a Language Model?arXiv preprint arXiv:2002.08910 (2020)."},{"key":"e_1_3_2_1_25_1","doi-asserted-by":"publisher","DOI":"10.1162\/tacl_a_00349"},{"key":"e_1_3_2_1_26_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/D18-1548"},{"key":"e_1_3_2_1_27_1","volume-title":"Ernie: Enhanced representation through knowledge integration. arXiv preprint arXiv:1904.09223","author":"Sun Yu","year":"2019","unstructured":"Yu Sun, Shuohuan Wang, Yukun Li, Shikun Feng, Xuyi Chen, Han Zhang, Xin Tian, Danxiang Zhu, Hao Tian, and Hua Wu. 2019. Ernie: Enhanced representation through knowledge integration. arXiv preprint arXiv:1904.09223 (2019)."},{"key":"e_1_3_2_1_28_1","volume-title":"CEUR Workshop Proceedings, Vol.\u00a03052","author":"Sundararaman Dhanasekar","year":"2021","unstructured":"Dhanasekar Sundararaman, Vivek Subramanian, Guoyin Wang, Shijing Si, Dinghan Shen, Dong Wang, and Lawrence Carin. 2021. Syntactic Knowledge-Infused Transformer and BERT models. In CEUR Workshop Proceedings, Vol.\u00a03052. CEUR Workshop Proceedings."},{"key":"e_1_3_2_1_29_1","volume-title":"What do you learn from context? probing for sentence structure in contextualized word representations. arXiv preprint arXiv:1905.06316","author":"Tenney Ian","year":"2019","unstructured":"Ian Tenney, Patrick Xia, Berlin Chen, Alex Wang, Adam Poliak, R\u00a0Thomas McCoy, Najoung Kim, Benjamin Van\u00a0Durme, Samuel\u00a0R Bowman, Dipanjan Das, 2019. What do you learn from context? probing for sentence structure in contextualized word representations. arXiv preprint arXiv:1905.06316 (2019)."},{"key":"e_1_3_2_1_30_1","volume-title":"SKEP: Sentiment knowledge enhanced pre-training for sentiment analysis. arXiv preprint arXiv:2005.05635","author":"Tian Hao","year":"2020","unstructured":"Hao Tian, Can Gao, Xinyan Xiao, Hao Liu, Bolei He, Hua Wu, Haifeng Wang, and Feng Wu. 2020. SKEP: Sentiment knowledge enhanced pre-training for sentiment analysis. arXiv preprint arXiv:2005.05635 (2020)."},{"key":"e_1_3_2_1_31_1","volume-title":"K-adapter: Infusing knowledge into pre-trained models with adapters. arXiv preprint arXiv:2002.01808","author":"Wang Ruize","year":"2020","unstructured":"Ruize Wang, Duyu Tang, Nan Duan, Zhongyu Wei, Xuanjing Huang, Guihong Cao, Daxin Jiang, Ming Zhou, 2020. K-adapter: Infusing knowledge into pre-trained models with adapters. arXiv preprint arXiv:2002.01808 (2020)."},{"key":"e_1_3_2_1_32_1","doi-asserted-by":"publisher","DOI":"10.1162\/tacl_a_00360"},{"key":"e_1_3_2_1_33_1","volume-title":"A Survey on Knowledge-Enhanced Pre-trained Language Models. arXiv preprint arXiv:2212.13428","author":"Zhen Chaoqi","year":"2022","unstructured":"Chaoqi Zhen, Yanlei Shang, Xiangyu Liu, Yifei Li, Yong Chen, and Dell Zhang. 2022. A Survey on Knowledge-Enhanced Pre-trained Language Models. arXiv preprint arXiv:2212.13428 (2022)."},{"key":"e_1_3_2_1_34_1","volume-title":"Factual Probing Is [MASK]: Learning vs. Learning to Recall","author":"Zhong Zexuan","unstructured":"Zexuan Zhong, Dan Friedman, and Danqi Chen. 2021. Factual Probing Is [MASK]: Learning vs. Learning to Recall. In North American Association for Computational Linguistics (NAACL)."}],"event":{"name":"WWW '23: The ACM Web Conference 2023","location":"Austin TX USA","acronym":"WWW '23","sponsor":["SIGWEB ACM Special Interest Group on Hypertext, Hypermedia, and Web"]},"container-title":["Companion Proceedings of the ACM Web Conference 2023"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3543873.3587655","content-type":"unspecified","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3543873.3587655","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,8,21]],"date-time":"2025-08-21T23:47:59Z","timestamp":1755820079000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3543873.3587655"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,4,30]]},"references-count":34,"alternative-id":["10.1145\/3543873.3587655","10.1145\/3543873"],"URL":"https:\/\/doi.org\/10.1145\/3543873.3587655","relation":{},"subject":[],"published":{"date-parts":[[2023,4,30]]},"assertion":[{"value":"2023-04-30","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}