{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,8]],"date-time":"2026-01-08T20:55:34Z","timestamp":1767905734141,"version":"3.49.0"},"publisher-location":"New York, NY, USA","reference-count":17,"publisher":"ACM","content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2025,5,8]]},"DOI":"10.1145\/3701716.3715567","type":"proceedings-article","created":{"date-parts":[[2025,6,23]],"date-time":"2025-06-23T14:10:32Z","timestamp":1750687832000},"page":"1005-1008","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":1,"title":["Cross-Lingual Text Classification with Large Language Models"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-5280-9456","authenticated-orcid":false,"given":"Bin","family":"Han","sequence":"first","affiliation":[{"name":"University of Washington, Seattle, Washington, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-3239-9645","authenticated-orcid":false,"given":"Sean T.","family":"Yang","sequence":"additional","affiliation":[{"name":"Yahoo Research, Sunnyvale, California, USA"}]},{"ORCID":"https:\/\/orcid.org\/0009-0002-2567-1305","authenticated-orcid":false,"given":"Christopher","family":"LuVogt","sequence":"additional","affiliation":[{"name":"Yahoo Research, Sunnyvale, California, USA"}]}],"member":"320","published-online":{"date-parts":[[2025,5,23]]},"reference":[{"key":"e_1_3_2_1_1_1","volume-title":"Diogo Almeida, Janko Altenschmidt, Sam Altman, Shyamal Anadkat, et al.","author":"Achiam Josh","year":"2023","unstructured":"Josh Achiam, Steven Adler, Sandhini Agarwal, Lama Ahmad, Ilge Akkaya, Florencia Leoni Aleman, Diogo Almeida, Janko Altenschmidt, Sam Altman, Shyamal Anadkat, et al. 2023. Gpt-4 technical report. arXiv:2303.08774 (2023)."},{"key":"e_1_3_2_1_2_1","doi-asserted-by":"crossref","unstructured":"Mikel Artetxe Vedanuj Goswami Shruti Bhosale Angela Fan and Luke Zettlemoyer. 2023. Revisiting Machine Translation for Cross-lingual Classification. In EMNLP.","DOI":"10.18653\/v1\/2023.emnlp-main.399"},{"key":"e_1_3_2_1_3_1","doi-asserted-by":"crossref","unstructured":"Ilias Chalkidis Manos Fergadiotis and Ion Androutsopoulos. 2021. MultiEURLEX - A multi-lingual and multi-label legal document classification dataset for zero-shot cross-lingual transfer. In EMNLP.","DOI":"10.18653\/v1\/2021.emnlp-main.559"},{"key":"e_1_3_2_1_4_1","volume-title":"Cross-lingual language model pretraining. NeuRIPs","author":"Conneau Alexis","year":"2019","unstructured":"Alexis Conneau and Guillaume Lample. 2019. Cross-lingual language model pretraining. NeuRIPs (2019)."},{"key":"e_1_3_2_1_5_1","volume-title":"XNLI: Evaluating Cross-lingual Sentence Representations. In EMNLP. https:\/\/aclanthology.org\/D18--1269","author":"Conneau Alexis","year":"2018","unstructured":"Alexis Conneau, Ruty Rinott, Guillaume Lample, Adina Williams, Samuel Bowman, Holger Schwenk, and Veselin Stoyanov. 2018. XNLI: Evaluating Cross-lingual Sentence Representations. In EMNLP. https:\/\/aclanthology.org\/D18--1269"},{"key":"e_1_3_2_1_6_1","volume-title":"Qlora: Efficient finetuning of quantized llms.","author":"Dettmers Tim","year":"2024","unstructured":"Tim Dettmers, Artidoro Pagnoni, Ari Holtzman, and Luke Zettlemoyer. 2024. Qlora: Efficient finetuning of quantized llms. NeuRIPs (2024)."},{"key":"e_1_3_2_1_7_1","volume-title":"Lora: Low-rank adaptation of large language models. In ICLR.","author":"Hu Edward J","year":"2021","unstructured":"Edward J Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuanzhi Li, Shean Wang, Lu Wang, and Weizhu Chen. 2021. Lora: Low-rank adaptation of large language models. In ICLR."},{"key":"e_1_3_2_1_8_1","doi-asserted-by":"crossref","unstructured":"Phillip Keung Yichao Lu Gy\u00f6rgy Szarvas and Noah A Smith. 2020. The Multilingual Amazon Reviews Corpus. In EMNLP.","DOI":"10.18653\/v1\/2020.emnlp-main.369"},{"key":"e_1_3_2_1_9_1","unstructured":"Yinhan Liu Jiatao Gu Naman Goyal Xian Li Sergey Edunov Marjan Ghazvininejad Mike Lewis and Luke Zettlemoyer. [n. d.]. Multilingual Denoising Pre-training for Neural Machine Translation. Transactions of the Association for Computational Linguistics ( [n. d.]). https:\/\/aclanthology.org\/2020.tacl-1.47"},{"key":"e_1_3_2_1_10_1","unstructured":"Long Ouyang Jeffrey Wu Xu Jiang Diogo Almeida Carroll Wainwright Pamela Mishkin Chong Zhang Sandhini Agarwal Katarina Slama Alex Ray et al. 2022. Training language models to follow instructions with human feedback. NeuRIPs (2022)."},{"key":"e_1_3_2_1_11_1","unstructured":"Alec Radford Karthik Narasimhan Tim Salimans Ilya Sutskever et al. 2018. Improving language understanding by generative pre-training. (2018)."},{"key":"e_1_3_2_1_12_1","doi-asserted-by":"crossref","unstructured":"Giridhar Kaushik Ramachandran Yujuan Fu Bin Han Kevin Lybarger Nic Dobbins Ozlem Uzuner and Meliha Yetisgen-Yildiz. 2023. Prompt-based Extraction of Social Determinants of Health Using Few-shot Learning. In ClinicalNLP. 385--393.","DOI":"10.18653\/v1\/2023.clinicalnlp-1.41"},{"key":"e_1_3_2_1_13_1","volume-title":"Llama: Open and efficient foundation language models. arXiv:2302.13971","author":"Touvron Hugo","year":"2023","unstructured":"Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timoth\u00e9e Lacroix, Baptiste Rozi\u00e8re, Naman Goyal, Eric Hambro, Faisal Azhar, et al. 2023. Llama: Open and efficient foundation language models. arXiv:2302.13971 (2023)."},{"key":"e_1_3_2_1_14_1","volume-title":"T3l: Translate-and-test transfer learning for cross-lingual text classification. Transactions of the Association for Computational Linguistics","author":"Unanue Inigo Jauregi","year":"2023","unstructured":"Inigo Jauregi Unanue, Gholamreza Haffari, and Massimo Piccardi. 2023. T3l: Translate-and-test transfer learning for cross-lingual text classification. Transactions of the Association for Computational Linguistics (2023)."},{"key":"e_1_3_2_1_15_1","doi-asserted-by":"crossref","unstructured":"Robert Wolfe Isaac Slaughter Bin Han Bingbing Wen Yiwei Yang Lucas Rosenblatt Bernease Herman Eva Brown Zening Qu Nic Weber et al. 2024. Laboratory-Scale AI: Open-Weight Models are Competitive with ChatGPT Even in Low-Resource Settings. In FAccT. 1199--1210.","DOI":"10.1145\/3630106.3658966"},{"key":"e_1_3_2_1_16_1","unstructured":"Biao Zhang Barry Haddow and Alexandra Birch. 2023. Prompting large language model for machine translation: A case study. In ICML. PMLR."},{"key":"e_1_3_2_1_17_1","volume-title":"Multilingual machine translation with large language models: Empirical results and analysis. arXiv preprint arXiv:2304.04675","author":"Zhu Wenhao","year":"2023","unstructured":"Wenhao Zhu, Hongyi Liu, Qingxiu Dong, Jingjing Xu, Shujian Huang, Lingpeng Kong, Jiajun Chen, and Lei Li. 2023. Multilingual machine translation with large language models: Empirical results and analysis. arXiv preprint arXiv:2304.04675 (2023)."}],"event":{"name":"WWW '25: The ACM Web Conference 2025","location":"Sydney NSW Australia","acronym":"WWW '25","sponsor":["SIGWEB ACM Special Interest Group on Hypertext, Hypermedia, and Web"]},"container-title":["Companion Proceedings of the ACM on Web Conference 2025"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3701716.3715567","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,10,7]],"date-time":"2025-10-07T18:22:38Z","timestamp":1759861358000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3701716.3715567"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,5,8]]},"references-count":17,"alternative-id":["10.1145\/3701716.3715567","10.1145\/3701716"],"URL":"https:\/\/doi.org\/10.1145\/3701716.3715567","relation":{},"subject":[],"published":{"date-parts":[[2025,5,8]]},"assertion":[{"value":"2025-05-23","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}