{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,16]],"date-time":"2026-01-16T02:27:07Z","timestamp":1768530427646,"version":"3.49.0"},"publisher-location":"New York, NY, USA","reference-count":23,"publisher":"ACM","license":[{"start":{"date-parts":[[2023,12,5]],"date-time":"2023-12-05T00:00:00Z","timestamp":1701734400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by-nc-sa\/4.0\/"}],"funder":[{"name":"German BMBF project SCINEXT (ID 01lS22070)","award":["ID 01lS22070"],"award-info":[{"award-number":["ID 01lS22070"]}]},{"name":"MICS (Made in Italy ? Circular and Sustainable) Extended Partnership and received funding from Next-GenerationEU (Italian PNRR ? M4 C2, Invest 1.3 ? D.D. 1551.11-10-2022)","award":["PE00000004"],"award-info":[{"award-number":["PE00000004"]}]}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2023,12,5]]},"DOI":"10.1145\/3587259.3627572","type":"proceedings-article","created":{"date-parts":[[2023,11,28]],"date-time":"2023-11-28T19:30:28Z","timestamp":1701199828000},"page":"9-16","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":13,"title":["Procedural Text Mining with Large Language Models"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-8046-7502","authenticated-orcid":false,"given":"Anisa","family":"Rula","sequence":"first","affiliation":[{"name":"Information Engineering, University of Brescia, Italy"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-6616-9509","authenticated-orcid":false,"given":"Jennifer","family":"D'Souza","sequence":"additional","affiliation":[{"name":"TIB Leibniz Information Centre for Science and Technology, Germany"}]}],"member":"320","published-online":{"date-parts":[[2023,12,5]]},"reference":[{"key":"e_1_3_2_1_1_1","volume-title":"Process Extraction from Text: state of the art and challenges for the future. CoRR abs\/2110.03754","author":"Bellan Patrizio","year":"2021","unstructured":"Patrizio Bellan, Mauro Dragoni, and Chiara Ghidini. 2021. Process Extraction from Text: state of the art and challenges for the future. CoRR abs\/2110.03754 (2021)."},{"key":"e_1_3_2_1_2_1","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-17604-3_11"},{"key":"e_1_3_2_1_3_1","doi-asserted-by":"publisher","DOI":"10.1016\/j.eswa.2022.116702"},{"key":"e_1_3_2_1_4_1","volume-title":"Scaling instruction-finetuned language models. arXiv preprint arXiv:2210.11416","author":"Chung Hyung\u00a0Won","year":"2022","unstructured":"Hyung\u00a0Won Chung, Le Hou, Shayne Longpre, Barret Zoph, Yi Tay, William Fedus, Eric Li, Xuezhi Wang, Mostafa Dehghani, Siddhartha Brahma, 2022. Scaling instruction-finetuned language models. arXiv preprint arXiv:2210.11416 (2022)."},{"key":"e_1_3_2_1_5_1","doi-asserted-by":"publisher","DOI":"10.1016\/j.compind.2021.103439"},{"key":"e_1_3_2_1_6_1","volume-title":"2nd International Workshop on Linked Science (ISWC2012)","author":"Garijo Daniel","year":"2012","unstructured":"Daniel Garijo and Yolanda Gil. 2012. Augmenting prov with plans in p-plan: scientific processes as linked data. In 2nd International Workshop on Linked Science (ISWC2012). CEUR Workshop Proceedings."},{"key":"e_1_3_2_1_7_1","unstructured":"Xinyang Geng and Hao Liu. 2023. OpenLLaMA: An Open Reproduction of LLaMA. https:\/\/github.com\/openlm-research\/open_llama"},{"key":"e_1_3_2_1_8_1","volume-title":"ICWE(Lecture Notes in Computer Science, Vol.\u00a012706)","author":"Jaradeh Mohamad\u00a0Yaser","unstructured":"Mohamad\u00a0Yaser Jaradeh, Kuldeep Singh, Markus Stocker, Andreas Both, and S\u00f6ren Auer. 2021. Better Call the Plumber: Orchestrating Dynamic Information Extraction Pipelines. In ICWE(Lecture Notes in Computer Science, Vol.\u00a012706). Springer, 240\u2013254."},{"key":"e_1_3_2_1_9_1","volume-title":"ROUGE: A Package for Automatic Evaluation of Summaries. In Text Summarization Branches Out","author":"Lin Chin-Yew","year":"2004","unstructured":"Chin-Yew Lin. 2004. ROUGE: A Package for Automatic Evaluation of Summaries. In Text Summarization Branches Out. Association for Computational Linguistics, 74\u201381. https:\/\/aclanthology.org\/W04-1013"},{"key":"e_1_3_2_1_10_1","volume-title":"The flan collection: Designing data and methods for effective instruction tuning. arXiv preprint arXiv:2301.13688","author":"Longpre Shayne","year":"2023","unstructured":"Shayne Longpre, Le Hou, Tu Vu, Albert Webson, Hyung\u00a0Won Chung, Yi Tay, Denny Zhou, Quoc\u00a0V Le, Barret Zoph, Jason Wei, 2023. The flan collection: Designing data and methods for effective instruction tuning. arXiv preprint arXiv:2301.13688 (2023)."},{"key":"e_1_3_2_1_11_1","unstructured":"Kyle Mahowald Anna\u00a0A. Ivanova Idan\u00a0A. Blank Nancy Kanwisher Joshua\u00a0B. Tenenbaum and Evelina Fedorenko. 2023. Dissociating language and thought in large language models: a cognitive perspective. arxiv:2301.06627\u00a0[cs.CL]"},{"key":"e_1_3_2_1_12_1","doi-asserted-by":"publisher","DOI":"10.1145\/3605943"},{"key":"e_1_3_2_1_13_1","unstructured":"OpenAI. 2023. GPT-4 Technical Report. arxiv:2303.08774\u00a0[cs.CL]"},{"key":"e_1_3_2_1_14_1","unstructured":"Shirui Pan Linhao Luo Yufei Wang Chen Chen Jiapu Wang and Xindong Wu. 2023. Unifying Large Language Models and Knowledge Graphs: A Roadmap. arxiv:2306.08302\u00a0[cs.CL]"},{"key":"e_1_3_2_1_15_1","doi-asserted-by":"publisher","DOI":"10.5555\/3455716.3455856"},{"key":"e_1_3_2_1_16_1","volume-title":"CAiSE, Vol.\u00a012751","author":"Rebmann Adrian","unstructured":"Adrian Rebmann and Han van\u00a0der Aa. 2021. Extracting Semantic Process Information from the Natural Language in Event Logs. In CAiSE, Vol.\u00a012751. Springer, 57\u201374."},{"key":"e_1_3_2_1_17_1","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-33455-9_27"},{"key":"e_1_3_2_1_18_1","volume-title":"Abubakar Abid, Adam Fisch, Adam\u00a0R Brown, Adam Santoro, Aditya Gupta","author":"Srivastava Aarohi","year":"2022","unstructured":"Aarohi Srivastava, Abhinav Rastogi, Abhishek Rao, Abu Awal\u00a0Md Shoeb, Abubakar Abid, Adam Fisch, Adam\u00a0R Brown, Adam Santoro, Aditya Gupta, Adri\u00e0 Garriga-Alonso, 2022. Beyond the imitation game: Quantifying and extrapolating the capabilities of language models. arXiv preprint arXiv:2206.04615 (2022)."},{"key":"e_1_3_2_1_19_1","unstructured":"Kai Sun Yifan\u00a0Ethan Xu Hanwen Zha Yue Liu and Xin\u00a0Luna Dong. 2023. Head-to-Tail: How Knowledgeable are Large Language Models (LLM)? A.K.A. Will LLMs Replace Knowledge Graphs?arxiv:2308.10168\u00a0[cs.CL]"},{"key":"e_1_3_2_1_20_1","volume-title":"Llama: Open and efficient foundation language models. arXiv preprint arXiv:2302.13971","author":"Touvron Hugo","year":"2023","unstructured":"Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timoth\u00e9e Lacroix, Baptiste Rozi\u00e8re, Naman Goyal, Eric Hambro, Faisal Azhar, 2023. Llama: Open and efficient foundation language models. arXiv preprint arXiv:2302.13971 (2023)."},{"key":"e_1_3_2_1_21_1","volume-title":"Llama 2: Open foundation and fine-tuned chat models. arXiv preprint arXiv:2307.09288","author":"Touvron Hugo","year":"2023","unstructured":"Hugo Touvron, Louis Martin, Kevin Stone, Peter Albert, Amjad Almahairi, Yasmine Babaei, Nikolay Bashlykov, Soumya Batra, Prajjwal Bhargava, Shruti Bhosale, 2023. Llama 2: Open foundation and fine-tuned chat models. arXiv preprint arXiv:2307.09288 (2023)."},{"key":"e_1_3_2_1_22_1","volume-title":"Finetuned language models are zero-shot learners. arXiv preprint arXiv:2109.01652","author":"Wei Jason","year":"2021","unstructured":"Jason Wei, Maarten Bosma, Vincent\u00a0Y Zhao, Kelvin Guu, Adams\u00a0Wei Yu, Brian Lester, Nan Du, Andrew\u00a0M Dai, and Quoc\u00a0V Le. 2021. Finetuned language models are zero-shot learners. arXiv preprint arXiv:2109.01652 (2021)."},{"key":"e_1_3_2_1_23_1","volume-title":"Emergent abilities of large language models. arXiv preprint arXiv:2206.07682","author":"Wei Jason","year":"2022","unstructured":"Jason Wei, Yi Tay, Rishi Bommasani, Colin Raffel, Barret Zoph, Sebastian Borgeaud, Dani Yogatama, Maarten Bosma, Denny Zhou, Donald Metzler, 2022. Emergent abilities of large language models. arXiv preprint arXiv:2206.07682 (2022)."}],"event":{"name":"K-CAP '23: Knowledge Capture Conference 2023","location":"Pensacola FL USA","acronym":"K-CAP '23","sponsor":["SIGAI ACM Special Interest Group on Artificial Intelligence"]},"container-title":["Proceedings of the 12th Knowledge Capture Conference 2023"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3587259.3627572","content-type":"unspecified","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3587259.3627572","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,8,22]],"date-time":"2025-08-22T23:44:05Z","timestamp":1755906245000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3587259.3627572"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,12,5]]},"references-count":23,"alternative-id":["10.1145\/3587259.3627572","10.1145\/3587259"],"URL":"https:\/\/doi.org\/10.1145\/3587259.3627572","relation":{},"subject":[],"published":{"date-parts":[[2023,12,5]]},"assertion":[{"value":"2023-12-05","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}