{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,26]],"date-time":"2026-02-26T15:35:04Z","timestamp":1772120104460,"version":"3.50.1"},"reference-count":34,"publisher":"Springer Science and Business Media LLC","issue":"1","license":[{"start":{"date-parts":[[2023,12,6]],"date-time":"2023-12-06T00:00:00Z","timestamp":1701820800000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2023,12,6]],"date-time":"2023-12-06T00:00:00Z","timestamp":1701820800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Soc. Netw. Anal. Min."],"DOI":"10.1007\/s13278-023-01159-9","type":"journal-article","created":{"date-parts":[[2023,12,5]],"date-time":"2023-12-05T21:01:36Z","timestamp":1701810096000},"update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":5,"title":["Multilingual, monolingual and mono-dialectal transfer learning for Moroccan Arabic sentiment classification"],"prefix":"10.1007","volume":"14","author":[{"given":"Naaima","family":"Boudad","sequence":"first","affiliation":[]},{"given":"Rdouan","family":"Faizi","sequence":"additional","affiliation":[]},{"given":"Rachid","family":"Oulad Haj Thami","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2023,12,6]]},"reference":[{"key":"1159_CR1","unstructured":"Abdaoui A, Berrimi M, Oussalah M, Moussaoui A (2021) Dziribert: a pre-trained language model for the algerian dialect. ArXiv Prepr. arXiv:2109.12346."},{"key":"1159_CR2","unstructured":"Abdelali A, Hassan S, Mubarak H, Darwish K, Samih Y (2021) Pre-training bert on arabic tweets: practical considerations. ArXiv Prepr.arXiv:2102.10684"},{"key":"1159_CR3","doi-asserted-by":"publisher","first-page":"1196","DOI":"10.11591\/eei.v12i2.3914","volume":"12","author":"MF Abdelfattah","year":"2023","unstructured":"Abdelfattah MF, Fakhr MW, Rizka MA (2023) ArSentBERT: fine-tuned bidirectional encoder representations from transformers model for Arabic sentiment classification. Bull Electr Eng Inform 12:1196\u20131202","journal-title":"Bull Electr Eng Inform"},{"key":"1159_CR4","doi-asserted-by":"crossref","unstructured":"Abdul-Mageed M, Elmadany A, Nagoudi EMB (2020) ARBERT & MARBERT: deep bidirectional transformers for Arabic. ArXiv Prepr. arXiv:2101.01785","DOI":"10.18653\/v1\/2021.acl-long.551"},{"key":"1159_CR5","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1186\/s40537-022-00625-z","volume":"9","author":"A Alduailej","year":"2022","unstructured":"Alduailej A, Alothaim A (2022) AraXLNet: pre-trained language model for sentiment analysis of Arabic. J Big Data 9:1\u201321","journal-title":"J Big Data"},{"key":"1159_CR6","doi-asserted-by":"publisher","first-page":"1048","DOI":"10.3390\/electronics12041048","volume":"12","author":"M Almaliki","year":"2023","unstructured":"Almaliki M, Almars AM, Gad I, Atlam E-S (2023) ABMM: Arabic BERT-mini model for hate-speech detection on social media. Electronics 12:1048","journal-title":"Electronics"},{"key":"1159_CR7","doi-asserted-by":"publisher","first-page":"615","DOI":"10.3390\/jcp1040031","volume":"1","author":"K Ameri","year":"2021","unstructured":"Ameri K, Hempel M, Sharif H, Lopez J Jr, Perumalla K (2021) CyBERT: cybersecurity claim classification by fine-tuning the BERT language model. J Cybersecurity Priv 1:615\u2013637","journal-title":"J Cybersecurity Priv"},{"key":"1159_CR8","first-page":"227","volume-title":"TunRoBERTa: a tunisian robustly optimized BERT approach model for sentiment analysis","author":"C Antit","year":"2022","unstructured":"Antit C, Mechti S, Faiz R (2022) TunRoBERTa: a tunisian robustly optimized BERT approach model for sentiment analysis. Atlantis Press, Netherlands, pp 227\u2013231"},{"key":"1159_CR9","unstructured":"Antoun W, Baly F, Hajj H (2020) Arabert: transformer-based model for arabic language understanding. ArXiv Prepr.arXiv:2003.00104"},{"key":"1159_CR10","first-page":"233","volume":"13","author":"N Boudad","year":"2017","unstructured":"Boudad N, Faizi R, Thami ROH, Chiheb R (2017) Sentiment classification of Arabic tweets: a supervised approach. J Mob Multimed 13:233\u2013243","journal-title":"J Mob Multimed"},{"key":"1159_CR11","doi-asserted-by":"crossref","unstructured":"Boudad N, Ezzahid S, Faizi R, Thami ROH (2019) Exploring the use of word embedding and deep learning in arabic sentiment analysis. In: Presented at the international conference on advanced intelligent systems for sustainable development, Springer pp 243\u2013253","DOI":"10.1007\/978-3-030-36674-2_26"},{"key":"1159_CR12","unstructured":"Boujou E, Chataoui H, Mekki AE, Benjelloun S, Chairi I, Berrada I (2021) An open access NLP dataset for Arabic dialects: data collection, labeling, and model construction. ArXiv Prepr. arXiv:2102.11000"},{"key":"1159_CR13","first-page":"1877","volume":"33","author":"T Brown","year":"2020","unstructured":"Brown T, Mann B, Ryder N, Subbiah M, Kaplan JD, Dhariwal P, Neelakantan A, Shyam P, Sastry G, Askell A (2020) Language models are few-shot learners. Adv Neural Inf Process Syst 33:1877\u20131901","journal-title":"Adv Neural Inf Process Syst"},{"key":"1159_CR14","unstructured":"Clark K, Luong M-T, Le QV, Manning CD (2020) Electra: pre-training text encoders as discriminators rather than generators. ArXiv Prepr. arXiv:2003.10555"},{"key":"1159_CR15","doi-asserted-by":"crossref","unstructured":"Conneau A, Khandelwal K, Goyal N, Chaudhary V, Wenzek G, Guzm\u00e1n F, Grave E, Ott M, Zettlemoyer L, Stoyanov V (2019) Unsupervised cross-lingual representation learning at scale. ArXiv Prepr. arXiv:1911.02116","DOI":"10.18653\/v1\/2020.acl-main.747"},{"key":"1159_CR16","unstructured":"de Vries W, van Cranenburgh A, Bisazza A, Caselli T, van Noord G, Nissim M (2019). Bertje: a dutch bert model. ArXiv Prepr.arXiv:1912.09582"},{"key":"1159_CR17","unstructured":"Devlin J, Chang M-W, Lee K, Toutanova K (2018) Bert: pre-training of deep bidirectional transformers for language understanding. ArXiv Prepr.arXiv:1810.04805 ArXiv."},{"key":"1159_CR18","unstructured":"Dodge J, Ilharco G, Schwartz R, Farhadi A, Hajishirzi H, Smith N (2020) Fine-tuning pretrained language models: weight initializations, data orders, and early stopping. ArXiv Prepr.arXiv:2002.06305"},{"key":"1159_CR19","first-page":"262","volume-title":"Collecting and processing arabic facebook comments for sentiment analysis","author":"A Elouardighi","year":"2017","unstructured":"Elouardighi A, Maghfour M, Hammia H (2017) Collecting and processing arabic facebook comments for sentiment analysis. Springer, Berlin, pp 262\u2013274"},{"key":"1159_CR20","doi-asserted-by":"crossref","unstructured":"Garouani M, Kharroubi J (2021) MAC: an open and free Moroccan Arabic corpus for sentiment analysis. In: Presented at the the proceedings of the international conference on smart city applications, Springer, pp. 849\u2013858.","DOI":"10.1007\/978-3-030-94191-8_68"},{"key":"1159_CR21","doi-asserted-by":"crossref","unstructured":"Garouani, M., Chrita, H., Kharroubi, J., 2021. Sentiment analysis of Moroccan tweets using text mining.","DOI":"10.1007\/978-3-030-73882-2_54"},{"key":"1159_CR22","unstructured":"Ghaddar A, Wu Y, Rashid A, Bibi K, Rezagholizadeh M, Xing C, Wang Y, Xinyu D, Wang Z, Huai B (2021) JABER: junior Arabic BERt. ArXiv Prepr.arXiv:2112.04329 ArXiv."},{"key":"1159_CR23","unstructured":"Inoue G, Alhafni B, Baimukan N, Bouamor H, Habash N (2021) The interplay of variant, size, and task type in Arabic pre-trained language models. ArXiv Prepr. arXiv:2103.06678"},{"key":"1159_CR24","doi-asserted-by":"crossref","unstructured":"Lan W, Chen Y, Xu W, Ritter A (2020) An empirical study of pre-trained transformers for Arabic information extraction. ArXiv Prepr. arXiv:2004.14519","DOI":"10.18653\/v1\/2020.emnlp-main.382"},{"key":"1159_CR25","doi-asserted-by":"crossref","unstructured":"Lewis M, Liu Y, Goyal N, Ghazvininejad M, Mohamed A, Levy O, Stoyanov V, Zettlemoyer L (2019) Bart: denoising sequence-to-sequence pre-training for natural language generation, translation, and comprehension. ArXiv Prepr. arXiv:1910.13461","DOI":"10.18653\/v1\/2020.acl-main.703"},{"key":"1159_CR26","unstructured":"Liu Y, Ott M, Goyal N, Du J, Joshi M, Chen D, Levy O, Lewis M, Zettlemoyer L, Stoyanov V (2019) Roberta: a robustly optimized bert pretraining approach. ArXiv Prepr.arXiv:1907.11692"},{"key":"1159_CR27","doi-asserted-by":"crossref","unstructured":"Martin L, Muller B, Su\u00e1rez PJO, Dupont Y, Romary L, de La Clergerie \u00c9V, Seddah D, Sagot B (2019) CamemBERT: a tasty French language model. ArXiv Prepr. arXiv:1911.03894","DOI":"10.18653\/v1\/2020.acl-main.645"},{"key":"1159_CR28","doi-asserted-by":"crossref","unstructured":"Messaoudi A, Cheikhrouhou A, Haddad H, Ferchichi N, BenHajhmida M, Korched A, Naski M, Ghriss F, Kerkeni A (2021) TunBERT: pretrained contextualized text representation for tunisian dialect. ArXiv Prepr.arXiv:2111.13138","DOI":"10.1007\/978-3-031-08277-1_23"},{"key":"1159_CR29","doi-asserted-by":"publisher","first-page":"11","DOI":"10.1007\/s13278-022-01009-0","volume":"13","author":"O Mohamed","year":"2022","unstructured":"Mohamed O, Kassem AM, Ashraf A, Jamal S, Mohamed EH (2022) An ensemble transformer-based model for Arabic sentiment analysis. Soc Netw Anal Min 13:11","journal-title":"Soc Netw Anal Min"},{"key":"1159_CR30","doi-asserted-by":"publisher","first-page":"544","DOI":"10.1177\/0165551519849516","volume":"46","author":"A Oussous","year":"2020","unstructured":"Oussous A, Benjelloun F-Z, Lahcen AA, Belfkih S (2020) ASA: a framework for Arabic sentiment analysis. J Inf Sci 46:544\u2013559","journal-title":"J Inf Sci"},{"key":"1159_CR31","doi-asserted-by":"crossref","unstructured":"Pires T, Schlinger E, Garrette D (2019) How multilingual is multilingual BERT? ArXiv Prepr. arXiv:1906.01502ArXiv.","DOI":"10.18653\/v1\/P19-1493"},{"key":"1159_CR32","first-page":"9","volume":"1","author":"A Radford","year":"2019","unstructured":"Radford A, Wu J, Child R, Luan D, Amodei D, Sutskever I (2019) Language models are unsupervised multitask learners. OpenAI Blog 1:9","journal-title":"OpenAI Blog"},{"key":"1159_CR33","doi-asserted-by":"crossref","unstructured":"Safaya A, Abdullatif M, Yuret D (2020) Kuisail at semeval-2020 task 12: Bert-cnn for offensive speech identification in social media. pp. 2054\u20132059","DOI":"10.18653\/v1\/2020.semeval-1.271"},{"key":"1159_CR34","unstructured":"Vaswani A, Shazeer N, Parmar N, Uszkoreit J, Jones L, Gomez AN, Kaiser \u0141, Polosukhin I (2017) Attention is all you need. Adv Neural Inf Process Syst 30."}],"container-title":["Social Network Analysis and Mining"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s13278-023-01159-9.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s13278-023-01159-9\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s13278-023-01159-9.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,2,25]],"date-time":"2025-02-25T00:35:04Z","timestamp":1740443704000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s13278-023-01159-9"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,12,6]]},"references-count":34,"journal-issue":{"issue":"1","published-online":{"date-parts":[[2024,12]]}},"alternative-id":["1159"],"URL":"https:\/\/doi.org\/10.1007\/s13278-023-01159-9","relation":{"has-preprint":[{"id-type":"doi","id":"10.21203\/rs.3.rs-3167222\/v1","asserted-by":"object"}]},"ISSN":["1869-5469"],"issn-type":[{"value":"1869-5469","type":"electronic"}],"subject":[],"published":{"date-parts":[[2023,12,6]]},"assertion":[{"value":"13 July 2023","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"15 October 2023","order":2,"name":"revised","label":"Revised","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"27 October 2023","order":3,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"6 December 2023","order":4,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors declare no competing interests.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}}],"article-number":"3"}}