{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,25]],"date-time":"2026-03-25T14:41:02Z","timestamp":1774449662242,"version":"3.50.1"},"reference-count":41,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","license":[{"start":{"date-parts":[[2021,1,1]],"date-time":"2021-01-01T00:00:00Z","timestamp":1609459200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by\/4.0\/legalcode"},{"start":{"date-parts":[[2021,1,1]],"date-time":"2021-01-01T00:00:00Z","timestamp":1609459200000},"content-version":"am","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by\/4.0\/legalcode"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Access"],"published-print":{"date-parts":[[2021]]},"DOI":"10.1109\/access.2021.3128742","type":"journal-article","created":{"date-parts":[[2021,11,16]],"date-time":"2021-11-16T20:31:06Z","timestamp":1637094666000},"page":"154704-154716","source":"Crossref","is-referenced-by-count":55,"title":["BERT, XLNet or RoBERTa: The Best Transfer Learning Model to Detect Clickbaits"],"prefix":"10.1109","volume":"9","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-6747-6367","authenticated-orcid":false,"given":"Praboda","family":"Rajapaksha","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-3219-3700","authenticated-orcid":false,"given":"Reza","family":"Farahbakhsh","sequence":"additional","affiliation":[]},{"given":"Noel","family":"Crespi","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1186\/s12864-019-6413-7"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/W19-4828"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/D19-5022"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.repl4nlp-1.18"},{"key":"ref31","article-title":"Compressing large-scale transformer-based models: A case study on BERT","author":"ganesh","year":"2020","journal-title":"arXiv 2002 11985"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.coling-main.558"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1613\/jair.953"},{"key":"ref36","article-title":"BERTs of a feather do not generalize together: Large variability in generalization across models with similar test set performance","author":"thomas mccoy","year":"2019","journal-title":"arXiv 1911 02969"},{"key":"ref35","article-title":"Learning and evaluating general linguistic intelligence","author":"yogatama","year":"2019","journal-title":"arXiv 1901 11373"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1162\/tacl_a_00342"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.3115\/v1\/D14-1179"},{"key":"ref40","article-title":"Clickbait detection in tweets using self-attentive network","author":"zhou","year":"2017","journal-title":"arXiv 1710 05364"},{"key":"ref11","first-page":"5998","article-title":"Attention is all you need","author":"vaswani","year":"2017","journal-title":"Proc Adv Neural Inf Process Syst"},{"key":"ref12","year":"2021","journal-title":"The General Language Understanding Evaluation (GLUE) Benchmark"},{"key":"ref13","article-title":"Xlnet: Generalized autoregressive pretraining for language understanding","volume":"32","author":"yang","year":"2019","journal-title":"Proc Adv Neural Inf Process Syst"},{"key":"ref14","article-title":"RoBERTa: A robustly optimized BERT pretraining approach","author":"liu","year":"2019","journal-title":"arXiv 1907 11692"},{"key":"ref15","year":"2021","journal-title":"Webis Clickbait Challenge"},{"key":"ref16","year":"2021","journal-title":"Clickbait News Detection"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/ASONAM.2016.7752207"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1145\/3110025.3110054"},{"key":"ref19","article-title":"Characterizing clickbaits on Instagram","volume":"12","author":"ha","year":"2018","journal-title":"Proc Int AAAI Conf Web Social Media"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.3390\/app9194062"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-30671-1_72"},{"key":"ref27","article-title":"Defending against neural fake news","author":"zellers","year":"2019","journal-title":"arXiv 1905 12616"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/ASONAM.2018.8508534"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.3390\/sym10050138"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/ICIoT48696.2020.9089487"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1145\/3209978.3210144"},{"key":"ref8","article-title":"Using neural network for identifying clickbaits in online news media","author":"omidvar","year":"2018","journal-title":"Proc Annu Int Symp Inf Manage Big Data"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-16145-3_5"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2019.2902491"},{"key":"ref9","article-title":"BERT: Pre-training of deep bidirectional transformers for language understanding","author":"devlin","year":"2018","journal-title":"arXiv 1810 04805"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-56608-5_46"},{"key":"ref20","article-title":"Fishing for clickbaits in social images and texts with linguistically-infused neural network models","author":"glenski","year":"2017","journal-title":"arXiv 1710 06390"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/N18-1202"},{"key":"ref21","article-title":"Adam: A method for stochastic optimization","author":"kingma","year":"2014","journal-title":"arXiv 1412 6980"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-32381-3_16"},{"key":"ref41","article-title":"Reducing transformer depth on demand with structured dropout","author":"fan","year":"2019","journal-title":"arXiv 1909 11556"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/P19-1441"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-37429-7_36"},{"key":"ref25","article-title":"Multi-task bidirectional transformer representations for irony detection","author":"zhang","year":"2019","journal-title":"arXiv 1909 03526"}],"container-title":["IEEE Access"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/6287639\/9312710\/09617586.pdf?arnumber=9617586","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,1,24]],"date-time":"2022-01-24T20:56:08Z","timestamp":1643057768000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9617586\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2021]]},"references-count":41,"URL":"https:\/\/doi.org\/10.1109\/access.2021.3128742","relation":{},"ISSN":["2169-3536"],"issn-type":[{"value":"2169-3536","type":"electronic"}],"subject":[],"published":{"date-parts":[[2021]]}}}