{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,6,18]],"date-time":"2025-06-18T04:08:37Z","timestamp":1750219717793,"version":"3.41.0"},"publisher-location":"New York, NY, USA","reference-count":51,"publisher":"ACM","license":[{"start":{"date-parts":[[2024,4,8]],"date-time":"2024-04-08T00:00:00Z","timestamp":1712534400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by\/4.0\/"}],"funder":[{"name":"DataCloud","award":["H2020 101016835"],"award-info":[{"award-number":["H2020 101016835"]}]}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2024,4,8]]},"DOI":"10.1145\/3605098.3635949","type":"proceedings-article","created":{"date-parts":[[2024,5,21]],"date-time":"2024-05-21T17:59:16Z","timestamp":1716314356000},"page":"731-740","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":2,"title":["Wiki-based Prompts for Enhancing Relation Extraction using Language Models"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-3264-974X","authenticated-orcid":false,"given":"Amirhossein","family":"Layegh","sequence":"first","affiliation":[{"name":"KTH Royal Institute of Technology, Stockholm, Sweden"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-2748-8929","authenticated-orcid":false,"given":"Amir H.","family":"Payberah","sequence":"additional","affiliation":[{"name":"KTH Royal Institute of Technology, Stockholm, Sweden"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-6034-4137","authenticated-orcid":false,"given":"Ahmet","family":"Soylu","sequence":"additional","affiliation":[{"name":"Oslo Metropolitan University, Oslo, Norway"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-6397-3705","authenticated-orcid":false,"given":"Dumitru","family":"Roman","sequence":"additional","affiliation":[{"name":"SINTEF AS, Oslo, Norway"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-4722-0823","authenticated-orcid":false,"given":"Mihhail","family":"Matskin","sequence":"additional","affiliation":[{"name":"KTH Royal Institute of Technology, Stockholm, Sweden"}]}],"member":"320","published-online":{"date-parts":[[2024,5,21]]},"reference":[{"key":"e_1_3_2_1_1_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.acl-main.142"},{"key":"e_1_3_2_1_2_1","doi-asserted-by":"publisher","DOI":"10.1145\/3442381.3449917"},{"key":"e_1_3_2_1_3_1","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-00671-6_2"},{"key":"e_1_3_2_1_4_1","doi-asserted-by":"publisher","DOI":"10.1145\/3442188.3445922"},{"key":"e_1_3_2_1_5_1","first-page":"1877","article-title":"Language models are few-shot learners","volume":"33","author":"Brown Tom","year":"2020","unstructured":"Tom Brown, Benjamin Mann, Nick Ryder, et al. 2020. Language models are few-shot learners. NeurIPS 33 (2020), 1877--1901.","journal-title":"NeurIPS"},{"key":"e_1_3_2_1_6_1","doi-asserted-by":"publisher","DOI":"10.1145\/3485447.3511998"},{"key":"e_1_3_2_1_7_1","volume-title":"Empirical evaluation of gated recurrent neural networks on sequence modeling. arXiv preprint arXiv:1412.3555","author":"Chung Junyoung","year":"2014","unstructured":"Junyoung Chung, Caglar Gulcehre, KyungHyun Cho, and Yoshua Bengio. 2014. Empirical evaluation of gated recurrent neural networks on sequence modeling. arXiv preprint arXiv:1412.3555 (2014)."},{"key":"e_1_3_2_1_8_1","unstructured":"Hyung Won Chung et al. 2022. Scaling instruction-finetuned language models. arXiv preprint arXiv:2210.11416 (2022)."},{"key":"e_1_3_2_1_9_1","volume-title":"Bert: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805","author":"Devlin Jacob","year":"2018","unstructured":"Jacob Devlin, Ming-Wei Chang, et al. 2018. Bert: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805 (2018)."},{"key":"e_1_3_2_1_10_1","doi-asserted-by":"publisher","unstructured":"Yaojie Lu et al. 2022. Unified structure generation for universal information extraction. (2022). 10.18653\/v1\/2022.acl-long.395","DOI":"10.18653\/v1\/2022.acl-long.395"},{"key":"e_1_3_2_1_11_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.acl-long.295"},{"key":"e_1_3_2_1_12_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.acl-long.381"},{"key":"e_1_3_2_1_13_1","doi-asserted-by":"publisher","DOI":"10.1016\/j.aiopen.2022.11.003"},{"volume-title":"Proceedings of the 5th International Workshop on Semantic Evaluation. 33--38","author":"Iris","key":"e_1_3_2_1_14_1","unstructured":"Iris Hendrickx et al. 2010. SemEval-2010 Task 8: Multi-Way Classification of Semantic Relations between Pairs of Nominals. In Proceedings of the 5th International Workshop on Semantic Evaluation. 33--38."},{"key":"e_1_3_2_1_15_1","unstructured":"hirui Pan et al. 2023. Unifying Large Language Models and Knowledge Graphs: A Roadmap. arXiv preprint arXiv:2306.08302 (2023)."},{"key":"e_1_3_2_1_16_1","volume-title":"yelong shen, et al","author":"Hu Edward J","year":"2022","unstructured":"Edward J Hu, yelong shen, et al. 2022. LoRA: Low-Rank Adaptation of Large Language Models. In ICLR 2022. https:\/\/openreview.net\/forum?id=nZeVKeeFYf9"},{"volume-title":"Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics. 2225--2240","author":"Shengding","key":"e_1_3_2_1_17_1","unstructured":"Shengding Hu et al. 2022. Knowledgeable Prompt-tuning: Incorporating Knowledge into Prompt Verbalizer for Text Classification. In Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics. 2225--2240."},{"key":"e_1_3_2_1_18_1","volume-title":"Spanbert: Improving pre-training by representing and predicting spans. Transactions of the association for computational linguistics","author":"Joshi Mandar","year":"2020","unstructured":"Mandar Joshi, Danqi Chen, Yinhan Liu, Daniel S Weld, Luke Zettlemoyer, and Omer Levy. 2020. Spanbert: Improving pre-training by representing and predicting spans. Transactions of the association for computational linguistics (2020)."},{"key":"e_1_3_2_1_19_1","volume-title":"Proceedings of the 2021 Conference of NAACL. 2627--2636","author":"Scao Teven Le","year":"2021","unstructured":"Teven Le Scao and Alexander M Rush. 2021. How many data points is a prompt worth?. In Proceedings of the 2021 Conference of NAACL. 2627--2636."},{"key":"e_1_3_2_1_20_1","volume-title":"Improving relation extraction with knowledge-attention. arXiv preprint arXiv:1910.02724","author":"Li Pengfei","year":"2019","unstructured":"Pengfei Li, Kezhi Mao, Xuefeng Yang, and Qi Li. 2019. Improving relation extraction with knowledge-attention. arXiv preprint arXiv:1910.02724 (2019)."},{"key":"e_1_3_2_1_21_1","volume-title":"Prefix-tuning: Optimizing continuous prompts for generation. arXiv preprint arXiv:2101.00190","author":"Li Xiang Lisa","year":"2021","unstructured":"Xiang Lisa Li and Percy Liang. 2021. Prefix-tuning: Optimizing continuous prompts for generation. arXiv preprint arXiv:2101.00190 (2021)."},{"key":"e_1_3_2_1_22_1","doi-asserted-by":"crossref","unstructured":"Belinda Z Li et al. 2020. Efficient one-pass end-to-end entity linking for questions. arXiv preprint arXiv:2010.02413 (2020).","DOI":"10.18653\/v1\/2020.emnlp-main.522"},{"key":"e_1_3_2_1_23_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.acl-long.225"},{"key":"e_1_3_2_1_24_1","unstructured":"Pengfei Liu et al. 2023. Pre-train prompt and predict: A systematic survey of prompting methods in natural language processing. Comput. Surveys (2023)."},{"key":"e_1_3_2_1_25_1","doi-asserted-by":"publisher","unstructured":"Xiao Liu Kaixuan Ji Yicheng Fu et al. 2022. P-Tuning: Prompt Tuning Can Be Comparable to Fine-tuning Across Scales and Tasks. Association for Computational Linguistics. 10.18653\/v1\/2022.acl-short.8","DOI":"10.18653\/v1\/2022.acl-short.8"},{"key":"e_1_3_2_1_26_1","volume-title":"Roberta: A robustly optimized bert pretraining approach. arXiv preprint arXiv:1907.11692","author":"Liu Yinhan","year":"2019","unstructured":"Yinhan Liu, Myle Ott, Naman Goyal, et al. 2019. Roberta: A robustly optimized bert pretraining approach. arXiv preprint arXiv:1907.11692 (2019)."},{"volume-title":"The Semantic Web-ISWC 2019: 18th International Semantic Web Conference","author":"Lukovnikov Denis","key":"e_1_3_2_1_27_1","unstructured":"Denis Lukovnikov, Asja Fischer, and Jens Lehmann. 2019. Pretrained transformers for simple question answering over knowledge graphs. In The Semantic Web-ISWC 2019: 18th International Semantic Web Conference. Springer."},{"volume-title":"A BERT-based approach with relation-aware attention for knowledge base question answering. In 2020 IJCNN","author":"Luo Da","key":"e_1_3_2_1_28_1","unstructured":"Da Luo, Jindian Su, and Shanshan Yu. 2020. A BERT-based approach with relation-aware attention for knowledge base question answering. In 2020 IJCNN. IEEE."},{"key":"e_1_3_2_1_29_1","volume-title":"Cross-task generalization via natural language crowdsourcing instructions. arXiv preprint arXiv:2104.08773","author":"Mishra Swaroop","year":"2021","unstructured":"Swaroop Mishra, Daniel Khashabi, Chitta Baral, and Hannaneh Hajishirzi. 2021. Cross-task generalization via natural language crowdsourcing instructions. arXiv preprint arXiv:2104.08773 (2021)."},{"key":"e_1_3_2_1_30_1","volume-title":"Proceedings of the 16th national conference on artificial intelligence.","author":"Mooney R","year":"1999","unstructured":"R Mooney. 1999. Relational learning of pattern-match rules for information extraction. In Proceedings of the 16th national conference on artificial intelligence."},{"key":"e_1_3_2_1_31_1","first-page":"27730","article-title":"Training language models to follow instructions with human feedback","volume":"35","author":"Ouyang Long","year":"2022","unstructured":"Long Ouyang, Jeffrey Wu, Xu Jiang, et al. 2022. Training language models to follow instructions with human feedback. NeurIPS 35 (2022), 27730--27744.","journal-title":"NeurIPS"},{"key":"e_1_3_2_1_32_1","unstructured":"Rafael Rafailov Archit Sharma Eric Mitchell et al. 2023. Direct preference optimization: Your language model is secretly a reward model. arXiv preprint arXiv:2305.18290 (2023)."},{"key":"e_1_3_2_1_33_1","unstructured":"Brate Ryan et al. 2022. Improving Language Model Predictions via Prompts Enriched with Knowledge Graphs. In DL4KG ISWC2022."},{"key":"e_1_3_2_1_34_1","doi-asserted-by":"crossref","unstructured":"Timo Schick Helmut Schmid and Hinrich Sch\u00fctze. 2020. Automatically Identifying Words That Can Serve as Labels for Few-Shot Text Classification. International Committee on Computational Linguistics. https:\/\/aclanthology.org\/2020.coling-main.488","DOI":"10.18653\/v1\/2020.coling-main.488"},{"key":"e_1_3_2_1_35_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.eacl-main.20"},{"key":"e_1_3_2_1_36_1","volume-title":"Matching the blanks: Distributional similarity for relation learning. arXiv preprint arXiv:1906.03158","author":"Soares Livio Baldini","year":"2019","unstructured":"Livio Baldini Soares, Nicholas FitzGerald, Jeffrey Ling, and Tom Kwiatkowski. 2019. Matching the blanks: Distributional similarity for relation learning. arXiv preprint arXiv:1906.03158 (2019)."},{"key":"e_1_3_2_1_37_1","volume-title":"Proceedings of the AAAI Conference on Artificial Intelligence","volume":"35","author":"Stoica George","year":"2021","unstructured":"George Stoica, Emmanouil Antonios Platanios, and Barnab\u00e1s P\u00f3czos. 2021. Retacred: Addressing shortcomings of the tacred dataset. In Proceedings of the AAAI Conference on Artificial Intelligence, Vol. 35. 13843--13850."},{"key":"e_1_3_2_1_38_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/P17-2035"},{"key":"e_1_3_2_1_39_1","unstructured":"Hugo Touvron et al. 2023. Llama 2: Open foundation and fine-tuned chat models. arXiv preprint arXiv:2307.09288 (2023)."},{"key":"e_1_3_2_1_40_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.emnlp-main.214"},{"key":"e_1_3_2_1_41_1","unstructured":"Xiao Wang Weikang Zhou et al. 2023. InstructUIE: Multi-task Instruction Tuning for Unified Information Extraction. arXiv preprint arXiv:2304.08085 (2023)."},{"key":"e_1_3_2_1_42_1","volume-title":"Aligning large language models with human: A survey. arXiv preprint arXiv:2307.12966","author":"Wang Yufei","year":"2023","unstructured":"Yufei Wang, Wanjun Zhong, Liangyou Li, Fei Mi, Xingshan Zeng, Wenyong Huang, Lifeng Shang, Xin Jiang, and Qun Liu. 2023. Aligning large language models with human: A survey. arXiv preprint arXiv:2307.12966 (2023)."},{"key":"e_1_3_2_1_43_1","unstructured":"Zeqiu Wu Yushi Hu et al. 2023. Fine-Grained Human Feedback Gives Better Rewards for Language Model Training. arXiv preprint arXiv:2306.01693 (2023)."},{"key":"e_1_3_2_1_44_1","doi-asserted-by":"publisher","unstructured":"Xin Xu Yuqi Zhu Xiaohan Wang and Ningyu Zhang. 2023. How to Unleash the Power of Large Language Models for Few-shot Relation Extraction? Association for Computational Linguistics. 10.18653\/v1\/2023.sustainlp-1.13","DOI":"10.18653\/v1\/2023.sustainlp-1.13"},{"key":"e_1_3_2_1_45_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.emnlp-main.523"},{"key":"e_1_3_2_1_46_1","doi-asserted-by":"publisher","DOI":"10.1145\/3485447.3511921"},{"key":"e_1_3_2_1_47_1","volume-title":"Document-level relation extraction as semantic segmentation. arXiv preprint arXiv:2106.03618","author":"Zhang Ningyu","year":"2021","unstructured":"Ningyu Zhang, Xiang Chen, Xin Xie, Shumin Deng, Chuanqi Tan, Mosha Chen, Fei Huang, Luo Si, and Huajun Chen. 2021. Document-level relation extraction as semantic segmentation. arXiv preprint arXiv:2106.03618 (2021)."},{"key":"e_1_3_2_1_48_1","volume-title":"Llama-adapter: Efficient fine-tuning of language models with zero-init attention. arXiv preprint arXiv:2303.16199","author":"Zhang Renrui","year":"2023","unstructured":"Renrui Zhang, Jiaming Han, Aojun Zhou, Xiangfei Hu, Shilin Yan, Pan Lu, Hongsheng Li, Peng Gao, and Yu Qiao. 2023. Llama-adapter: Efficient fine-tuning of language models with zero-init attention. arXiv preprint arXiv:2303.16199 (2023)."},{"key":"e_1_3_2_1_49_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/D17-1004"},{"key":"e_1_3_2_1_50_1","unstructured":"Shengyu Zhang et al. 2023. Instruction Tuning for Large Language Models: A Survey. arXiv preprint arXiv:2308.10792 (2023)."},{"key":"e_1_3_2_1_51_1","volume-title":"Proceedings of the ACL. 161--168","author":"Zhou Wenxuan","year":"2022","unstructured":"Wenxuan Zhou and Muhao Chen. 2022. An Improved Baseline for Sentence-level Relation Extraction. In Proceedings of the ACL. 161--168."}],"event":{"name":"SAC '24: 39th ACM\/SIGAPP Symposium on Applied Computing","sponsor":["SIGAPP ACM Special Interest Group on Applied Computing"],"location":"Avila Spain","acronym":"SAC '24"},"container-title":["Proceedings of the 39th ACM\/SIGAPP Symposium on Applied Computing"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3605098.3635949","content-type":"unspecified","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3605098.3635949","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,6,17]],"date-time":"2025-06-17T16:36:14Z","timestamp":1750178174000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3605098.3635949"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,4,8]]},"references-count":51,"alternative-id":["10.1145\/3605098.3635949","10.1145\/3605098"],"URL":"https:\/\/doi.org\/10.1145\/3605098.3635949","relation":{},"subject":[],"published":{"date-parts":[[2024,4,8]]},"assertion":[{"value":"2024-05-21","order":2,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}