{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,6,19]],"date-time":"2025-06-19T05:01:33Z","timestamp":1750309293536,"version":"3.41.0"},"publisher-location":"New York, NY, USA","reference-count":34,"publisher":"ACM","license":[{"start":{"date-parts":[[2024,6,9]],"date-time":"2024-06-09T00:00:00Z","timestamp":1717891200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by\/4.0\/"}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2024,6,9]]},"DOI":"10.1145\/3650203.3663328","type":"proceedings-article","created":{"date-parts":[[2024,5,29]],"date-time":"2024-05-29T20:13:23Z","timestamp":1717013603000},"page":"12-22","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":0,"title":["tailwiz: Empowering Domain Experts with Easy-to-Use, Task-Specific Natural Language Processing Models"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0009-0001-9074-2037","authenticated-orcid":false,"given":"Timothy","family":"Dai","sequence":"first","affiliation":[{"name":"Stanford University"}]},{"ORCID":"https:\/\/orcid.org\/0009-0005-8901-6352","authenticated-orcid":false,"given":"Austin","family":"Peters","sequence":"additional","affiliation":[{"name":"Stanford University"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-3978-5250","authenticated-orcid":false,"given":"Jonah B.","family":"Gelbach","sequence":"additional","affiliation":[{"name":"UC Berkeley, School of Law"}]},{"ORCID":"https:\/\/orcid.org\/0009-0005-8049-9474","authenticated-orcid":false,"given":"David Freeman","family":"Engstrom","sequence":"additional","affiliation":[{"name":"Stanford Law School"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-9860-9938","authenticated-orcid":false,"given":"Daniel","family":"Kang","sequence":"additional","affiliation":[{"name":"UIUC"}]}],"member":"320","published-online":{"date-parts":[[2024,6,9]]},"reference":[{"key":"e_1_3_2_1_1_1","unstructured":"[n. d.]. Complaint. https:\/\/www.law.cornell.edu\/wex\/complaint"},{"key":"e_1_3_2_1_2_1","unstructured":"[n. d.]. Intro.8.2 Textualism and Constitutional Interpretation. ([n. d.]). https:\/\/constitution.congress.gov\/browse\/essay\/intro.8-2\/ALDE_00001303\/"},{"key":"e_1_3_2_1_3_1","unstructured":"[n. d.]. Motion. https:\/\/www.law.cornell.edu\/wex\/motion"},{"key":"e_1_3_2_1_4_1","unstructured":"[n. d.]. Opinion. https:\/\/www.law.cornell.edu\/wex\/opinion"},{"key":"e_1_3_2_1_5_1","unstructured":"[n. d.]. Proceeding. https:\/\/www.law.cornell.edu\/wex\/proceeding"},{"key":"e_1_3_2_1_6_1","unstructured":"[n. d.]. Sealing of Records. https:\/\/www.law.cornell.edu\/wex\/sealing_of_records"},{"key":"e_1_3_2_1_7_1","unstructured":"[n. d.]. Statutory Interpretation. https:\/\/www.law.cornell.edu\/wex\/statutory_interpretation"},{"key":"e_1_3_2_1_8_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1"},{"key":"e_1_3_2_1_9_1","unstructured":"Greg Brockman Atty Eleti Elie Georges Joanne Jang Logan Kilpatrick Rachel Lim Luke Miller and Michelle Pokrass. [n. d.]. Introducing ChatGPT and Whisper APIs. https:\/\/openai.com\/blog\/introducing-chatgpt-and-whisper-apis"},{"key":"e_1_3_2_1_10_1","unstructured":"Tom B. Brown Benjamin Mann Nick Ryder Melanie Subbiah Jared Kaplan Prafulla Dhariwal Arvind Neelakantan Pranav Shyam Girish Sastry Amanda Askell Sandhini Agarwal Ariel Herbert-Voss Gretchen Krueger Tom Henighan Rewon Child Aditya Ramesh Daniel M. Ziegler Jeffrey Wu Clemens Winter Christopher Hesse Mark Chen Eric Sigler Mateusz Litwin Scott Gray Benjamin Chess Jack Clark Christopher Berner Sam McCandlish Alec Radford Ilya Sutskever and Dario Amodei. 2020. Language Models are Few-Shot Learners. http:\/\/arxiv.org\/abs\/2005.14165 arXiv:2005.14165 [cs]."},{"key":"e_1_3_2_1_11_1","volume-title":"BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding. https:\/\/arxiv.org\/abs\/1810.04805v2","author":"Devlin Jacob","year":"2018","unstructured":"Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2018. BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding. https:\/\/arxiv.org\/abs\/1810.04805v2"},{"key":"e_1_3_2_1_12_1","first-page":"1244","article-title":"Harnessing the private attorney general: Evidence from qui tam litigation","volume":"112","author":"Engstrom David Freeman","year":"2012","unstructured":"David Freeman Engstrom. 2012. Harnessing the private attorney general: Evidence from qui tam litigation. Colum. L. Rev. 112 (2012), 1244. Publisher: HeinOnline.","journal-title":"Colum. L. Rev."},{"key":"e_1_3_2_1_13_1","doi-asserted-by":"publisher","DOI":"10.1017\/lsi.2020.25"},{"key":"e_1_3_2_1_14_1","doi-asserted-by":"publisher","DOI":"10.1257\/000282802760015748"},{"key":"e_1_3_2_1_15_1","doi-asserted-by":"publisher","unstructured":"Jonah B. Gelbach. 2021. Testing Economic Models of Discrimination in Criminal Justice. https:\/\/doi.org\/10.2139\/ssrn.3784953","DOI":"10.2139\/ssrn.3784953"},{"key":"e_1_3_2_1_16_1","unstructured":"Google. [n. d.]. Getting started: training and prediction with Keras | AI Platform. https:\/\/cloud.google.com\/ai-platform\/docs\/getting-started-keras"},{"key":"e_1_3_2_1_17_1","volume-title":"Stewart","author":"Grimmer Justin","year":"2022","unstructured":"Justin Grimmer, Margaret E. Roberts, and Brandon M. Stewart. 2022. Text as Data: A New Framework for Machine Learning and the Social Sciences. Princeton University Press, Princeton."},{"key":"e_1_3_2_1_18_1","volume-title":"Sofie Van Landeghem, and Adriane Boyd","author":"Honnibal Matthew","year":"2020","unstructured":"Matthew Honnibal, Ines Montani, Sofie Van Landeghem, and Adriane Boyd. 2020. spaCy: Industrial-strength Natural Language Processing in Python. https:\/\/spacy.io\/"},{"key":"e_1_3_2_1_19_1","unstructured":"Armand Joulin Edouard Grave Piotr Bojanowski and Tomas Mikolov. 2016. Bag of Tricks for Efficient Text Classification. http:\/\/arxiv.org\/abs\/1607.01759arXiv:1607.01759 [cs]."},{"key":"e_1_3_2_1_20_1","volume-title":"Shibani Santurkar, Surya Ganguli, Tatsunori Hashimoto, Thomas Icard, Tianyi Zhang, Vishrav Chaudhary, William Wang, Xuechen Li, Yifan Mai, Yuhui Zhang, and Yuta Koreeda.","author":"Liang Percy","year":"2022","unstructured":"Percy Liang, Rishi Bommasani, Tony Lee, Dimitris Tsipras, Dilara Soylu, Michihiro Yasunaga, Yian Zhang, Deepak Narayanan, Yuhuai Wu, Ananya Kumar, Benjamin Newman, Binhang Yuan, Bobby Yan, Ce Zhang, Christian Cosgrove, Christopher D. Manning, Christopher R\u00e9, Diana Acosta-Navas, Drew A. Hudson, Eric Zelikman, Esin Durmus, Faisal Ladhak, Frieda Rong, Hongyu Ren, Huaxiu Yao, Jue Wang, Keshav Santhanam, Laurel Orr, Lucia Zheng, Mert Yuksekgonul, Mirac Suzgun, Nathan Kim, Neel Guha, Niladri Chatterji, Omar Khattab, Peter Henderson, Qian Huang, Ryan Chi, Sang Michael Xie, Shibani Santurkar, Surya Ganguli, Tatsunori Hashimoto, Thomas Icard, Tianyi Zhang, Vishrav Chaudhary, William Wang, Xuechen Li, Yifan Mai, Yuhui Zhang, and Yuta Koreeda. 2022. Holistic Evaluation of Language Models. http:\/\/arxiv.org\/abs\/2211.09110arXiv:2211.09110 [cs]."},{"key":"e_1_3_2_1_21_1","doi-asserted-by":"publisher","DOI":"10.3115\/1118108.1118117"},{"key":"e_1_3_2_1_22_1","doi-asserted-by":"publisher","DOI":"10.3115\/v1\/P14-5010"},{"key":"e_1_3_2_1_23_1","volume-title":"Digital Humanities and Natural Language Processing: \"Je t'aime... Moi non plus\". DHQ: Digital Humanities Quarterly 14, 2","author":"McGillivray Barbara","year":"2020","unstructured":"Barbara McGillivray and Thierry Poibeau. 2020. Digital Humanities and Natural Language Processing: \"Je t'aime... Moi non plus\". DHQ: Digital Humanities Quarterly 14, 2 (2020)."},{"key":"e_1_3_2_1_24_1","doi-asserted-by":"publisher","DOI":"10.1145\/3397481.3450637"},{"key":"e_1_3_2_1_25_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1"},{"key":"e_1_3_2_1_26_1","unstructured":"Alec Radford Karthik Narasimhan Tim Salimans and Ilya Sutskever. 2018. Improving Language Understanding by Generative Pre-Training. (2018)."},{"key":"e_1_3_2_1_27_1","unstructured":"Alec Radford Jeffrey Wu Rewon Child David Luan Dario Amodei and Ilya Sutskever. 2019. Language Models are Unsupervised Multitask Learners. (2019)."},{"key":"e_1_3_2_1_28_1","volume-title":"Liu","author":"Raffel Colin","year":"2019","unstructured":"Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, and Peter J. Liu. 2019. Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer. https:\/\/arxiv.org\/abs\/1910.10683v3"},{"key":"e_1_3_2_1_29_1","doi-asserted-by":"crossref","unstructured":"Laria Reynolds and Kyle McDonell. 2021. Prompt Programming for Large Language Models: Beyond the Few-Shot Paradigm. http:\/\/arxiv.org\/abs\/2102.07350 arXiv:2102.07350 [cs].","DOI":"10.1145\/3411763.3451760"},{"key":"e_1_3_2_1_30_1","unstructured":"Victor Sanh Albert Webson Colin Raffel Stephen H. Bach Lintang Sutawika Zaid Alyafeai Antoine Chaffin Arnaud Stiegler Teven Le Scao Arun Raja Manan Dey M. Saiful Bari Canwen Xu Urmish Thakker Shanya Sharma Sharma Eliza Szczechla Taewoon Kim Gunjan Chhablani Nihal Nayak Debajyoti Datta Jonathan Chang Mike Tian-Jian Jiang Han Wang Matteo Manica Sheng Shen Zheng Xin Yong Harshit Pandey Rachel Bawden Thomas Wang Trishala Neeraj Jos Rozen Abheesht Sharma Andrea Santilli Thibault Fevry Jason Alan Fries Ryan Teehan Tali Bers Stella Biderman Leo Gao Thomas Wolf and Alexander M. Rush. 2022. Multitask Prompted Training Enables Zero-Shot Task Generalization. http:\/\/arxiv.org\/abs\/2110.08207 arXiv:2110.08207 [cs]."},{"key":"e_1_3_2_1_31_1","doi-asserted-by":"publisher","DOI":"10.1002\/asi.24544_eprint:https:"},{"key":"e_1_3_2_1_32_1","volume-title":"Brian Lester, Nan Du, Andrew M. Dai, and Quoc V. Le.","author":"Wei Jason","year":"2022","unstructured":"Jason Wei, Maarten Bosma, Vincent Y. Zhao, Kelvin Guu, Adams Wei Yu, Brian Lester, Nan Du, Andrew M. Dai, and Quoc V. Le. 2022. Finetuned Language Models Are Zero-Shot Learners. arXiv:2109.01652 [cs]."},{"key":"e_1_3_2_1_33_1","unstructured":"Benfeng Xu Quan Wang Zhendong Mao Yajuan Lyu Qiaoqiao She and Yongdong Zhang. 2023. kNN Prompting: Beyond-Context Learning with Calibration-Free Nearest Neighbor Inference. (2023)."},{"key":"e_1_3_2_1_34_1","volume-title":"Ziwen Han, Keiran Paster, Silviu Pitis, Harris Chan, and Jimmy Ba.","author":"Zhou Yongchao","year":"2023","unstructured":"Yongchao Zhou, Andrei Ioan Muresanu, Ziwen Han, Keiran Paster, Silviu Pitis, Harris Chan, and Jimmy Ba. 2023. Large Language Models Are Human-Level Prompt Engineers. http:\/\/arxiv.org\/abs\/2211.01910 arXiv:2211.01910 [cs]."}],"event":{"name":"SIGMOD\/PODS '24: International Conference on Management of Data","sponsor":["SIGMOD ACM Special Interest Group on Management of Data"],"location":"Santiago AA Chile","acronym":"SIGMOD\/PODS '24"},"container-title":["Proceedings of the Eighth Workshop on Data Management for End-to-End Machine Learning"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3650203.3663328","content-type":"unspecified","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3650203.3663328","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,6,19]],"date-time":"2025-06-19T00:03:31Z","timestamp":1750291411000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3650203.3663328"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,6,9]]},"references-count":34,"alternative-id":["10.1145\/3650203.3663328","10.1145\/3650203"],"URL":"https:\/\/doi.org\/10.1145\/3650203.3663328","relation":{},"subject":[],"published":{"date-parts":[[2024,6,9]]},"assertion":[{"value":"2024-06-09","order":2,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}