{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,25]],"date-time":"2026-02-25T12:30:56Z","timestamp":1772022656796,"version":"3.50.1"},"publisher-location":"Cham","reference-count":36,"publisher":"Springer International Publishing","isbn-type":[{"value":"9783030969561","type":"print"},{"value":"9783030969578","type":"electronic"}],"license":[{"start":{"date-parts":[[2022,1,1]],"date-time":"2022-01-01T00:00:00Z","timestamp":1640995200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"},{"start":{"date-parts":[[2022,1,1]],"date-time":"2022-01-01T00:00:00Z","timestamp":1640995200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2022]]},"DOI":"10.1007\/978-3-030-96957-8_33","type":"book-chapter","created":{"date-parts":[[2022,2,22]],"date-time":"2022-02-22T14:04:35Z","timestamp":1645538675000},"page":"381-392","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":14,"title":["Testing the Generalization of Neural Language Models for COVID-19 Misinformation Detection"],"prefix":"10.1007","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-2116-9767","authenticated-orcid":false,"given":"Jan Philip","family":"Wahle","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-7022-5948","authenticated-orcid":false,"given":"Nischal","family":"Ashok","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-9440-780X","authenticated-orcid":false,"given":"Terry","family":"Ruas","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-4648-8198","authenticated-orcid":false,"given":"Norman","family":"Meuschke","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-2358-522X","authenticated-orcid":false,"given":"Tirthankar","family":"Ghosal","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0001-6522-3019","authenticated-orcid":false,"given":"Bela","family":"Gipp","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2022,2,23]]},"reference":[{"key":"33_CR1","unstructured":"Alsentzer, E., et al.: Publicly Available Clinical BERT Embeddings. arXiv:1904.03323 [cs], June 2019. http:\/\/arxiv.org\/abs\/1904.03323"},{"key":"33_CR2","doi-asserted-by":"crossref","unstructured":"Beltagy, I., Lo, K., Cohan, A.: SciBERT: a pretrained language model for scientific text. In: Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pp. 3613\u20133618. Association for Computational Linguistics, Hong Kong, China (2019). 10\/ggcgtm","DOI":"10.18653\/v1\/D19-1371"},{"key":"33_CR3","doi-asserted-by":"publisher","unstructured":"Benkler, Y., Farris, R., Roberts, H.: Network Propaganda, vol. 1. Oxford University Press, October 2018. https:\/\/doi.org\/10.1093\/oso\/9780190923624.001.0001","DOI":"10.1093\/oso\/9780190923624.001.0001"},{"key":"33_CR4","doi-asserted-by":"crossref","unstructured":"Bojanowski, P., Grave, E., Joulin, A., Mikolov, T.: Enriching word vectors with subword information. Trans. Assoc. Comput. Linguist. 5, 135\u2013146 (2017). 10\/gfw9cs","DOI":"10.1162\/tacl_a_00051"},{"issue":"1","key":"33_CR5","doi-asserted-by":"publisher","first-page":"16598","DOI":"10.1038\/s41598-020-73510-5","volume":"10","author":"M Cinelli","year":"2020","unstructured":"Cinelli, M., et al.: The COVID-19 social media infodemic. Sci. Rep. 10(1), 16598 (2020). https:\/\/doi.org\/10.1038\/s41598-020-73510-5","journal-title":"Sci. Rep."},{"key":"33_CR6","unstructured":"Clark, K., Luong, M.T., Le, Q.V., Manning, C.D.: ELECTRA: Pre-training Text Encoders as Discriminators Rather Than Generators. arXiv:2003.10555 [cs], March 2020. http:\/\/arxiv.org\/abs\/2003.10555"},{"key":"33_CR7","unstructured":"Cui, L., Lee, D.: CoAID: COVID-19 Healthcare Misinformation Dataset. arXiv:2006.00885 [cs], August 2020. http:\/\/arxiv.org\/abs\/2006.00885"},{"key":"33_CR8","unstructured":"Devlin, J., Chang, M.W., Lee, K., Toutanova, K.: BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding. arXiv:1810.04805, May 2019. http:\/\/arxiv.org\/abs\/1810.04805"},{"key":"33_CR9","doi-asserted-by":"publisher","unstructured":"Dror, R., Baumer, G., Shlomov, S., Reichart, R.: The hitchhiker\u2019s guide to testing statistical significance in natural language processing. In: Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 1383\u20131392. Association for Computational Linguistics, Melbourne, Australia, July 2018. https:\/\/doi.org\/10.18653\/v1\/P18-1128","DOI":"10.18653\/v1\/P18-1128"},{"issue":"4","key":"33_CR10","doi-asserted-by":"publisher","first-page":"529","DOI":"10.1038\/s41562-021-01079-8","volume":"5","author":"T Hele","year":"2021","unstructured":"Hele, T., et al.: A global panel database of pandemic policies (oxford COVID-19 government response tracker). Nat. Hum. Behav. 5(4), 529\u2013538 (2021). https:\/\/doi.org\/10.1038\/s41562-021-01079-8","journal-title":"Nat. Hum. Behav."},{"key":"33_CR11","unstructured":"He, P., Liu, X., Gao, J., Chen, W.: DeBERTa: Decoding-enhanced BERT with Disentangled Attention. arXiv:2006.03654 [cs], January 2021. http:\/\/arxiv.org\/abs\/2006.03654"},{"key":"33_CR12","doi-asserted-by":"publisher","unstructured":"Howard, J., Ruder, S.: Universal language model fine-tuning for text classification. In: Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 328\u2013339. Association for Computational Linguistics, Melbourne, Australia, July 2018. https:\/\/doi.org\/10.18653\/v1\/P18-1031","DOI":"10.18653\/v1\/P18-1031"},{"key":"33_CR13","doi-asserted-by":"publisher","unstructured":"Johnson, A.E., et al.: MIMIC-III, a freelyaccessible critical care database. Sci. Data 3, 160035 (2016). https:\/\/doi.org\/10.1038\/sdata.2016.35","DOI":"10.1038\/sdata.2016.35"},{"key":"33_CR14","doi-asserted-by":"publisher","unstructured":"Lee, J., et al.: BioBERT: a pre-trained biomedical language representation model for biomedical text mining. Bioinformatics, pp. 1\u20137 (2019). https:\/\/doi.org\/10.1093\/bioinformatics\/btz682","DOI":"10.1093\/bioinformatics\/btz682"},{"key":"33_CR15","doi-asserted-by":"publisher","unstructured":"Lewis, M., et al.: BART: denoising sequence-to-sequence pre-training for natural language generation, translation, and comprehension. In: Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pp. 7871\u20137880. Association for Computational Linguistics, Online, July 2020. https:\/\/doi.org\/10.18653\/v1\/2020.acl-main.703","DOI":"10.18653\/v1\/2020.acl-main.703"},{"key":"33_CR16","unstructured":"Liu, Y., et al.: RoBERTa: A Robustly Optimized BERT Pretraining Approach. arXiv:1907.11692 [cs], July 2019. http:\/\/arxiv.org\/abs\/1907.11692"},{"key":"33_CR17","unstructured":"Memon, S.A., Carley, K.M.: Characterizing COVID-19 Misinformation Communities Using a Novel Twitter Dataset. arXiv:2008.00791 [cs], September 2020. http:\/\/arxiv.org\/abs\/2008.00791"},{"key":"33_CR18","unstructured":"Mikolov, T., Sutskever, I., Chen, K., Corrado, G., Dean, J.: Distributed Representations of Words and Phrases and their Compositionality. arXiv:1310.4546 [cs, stat], October 2013. http:\/\/arxiv.org\/abs\/1310.45464"},{"key":"33_CR19","doi-asserted-by":"publisher","unstructured":"Mutlu, E.C., et al.: A stance data set on polarized conversations on Twitter about the efficacy of hydroxychloroquine as a treatment for COVID-19. Data in Brief 33, 106401 (2020). https:\/\/doi.org\/10.1016\/j.dib.2020.106401","DOI":"10.1016\/j.dib.2020.106401"},{"key":"33_CR20","unstructured":"M\u00fcller, M., Salath\u00e9, M., Kummervold, P.E.: COVID-twitter-bert: a natural language processing model to analyse COVID-19 content on twitter. arXiv:2005.07503 [cs], May 2020. http:\/\/arxiv.org\/abs\/2005.07503"},{"key":"33_CR21","doi-asserted-by":"publisher","unstructured":"Nguyen, D.Q., Vu, T., Tuan Nguyen, A.: BERTweet: a pre-trained language model for English tweets. In: Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing: System Demonstrations, pp. 9\u201314. Association for Computational Linguistics, Online (2020). https:\/\/doi.org\/10.18653\/v1\/2020.emnlp-demos.2","DOI":"10.18653\/v1\/2020.emnlp-demos.2"},{"key":"33_CR22","doi-asserted-by":"publisher","unstructured":"Ostendorff, M., Ruas, T., Blume, T., Gipp, B., Rehm, G.: Aspect-based document similarity for research papers. In: Proceedings of the 28th International Conference on Computational Linguistics, pp. 6194\u20136206. International Committee on Computational Linguistics, Barcelona, Spain (Online) (2020). https:\/\/doi.org\/10.18653\/v1\/2020.coling-main.545","DOI":"10.18653\/v1\/2020.coling-main.545"},{"issue":"7","key":"33_CR23","doi-asserted-by":"publisher","first-page":"770","DOI":"10.1177\/0956797620939054","volume":"31","author":"G Pennycook","year":"2020","unstructured":"Pennycook, G., McPhetres, J., Zhang, Y., Lu, J.G., Rand, D.G.: Fighting COVID-19 misinformation on social media: experimental evidence for a scalable accuracy-nudge intervention. Psychol. Sci. 31(7), 770\u2013780 (2020). https:\/\/doi.org\/10.1177\/0956797620939054","journal-title":"Psychol. Sci."},{"key":"33_CR24","unstructured":"Press, O., Smith, N.A., Lewis, M.: Shortformer: better language modeling using shorter inputs. arXiv:2012.15832 [cs], December 2020. http:\/\/arxiv.org\/abs\/2012.15832"},{"key":"33_CR25","doi-asserted-by":"publisher","first-page":"16","DOI":"10.1016\/j.ins.2020.04.048","volume":"532","author":"T Ruas","year":"2020","unstructured":"Ruas, T., Ferreira, C.H.P., Grosky, W., de Fran\u00e7a, F.O., de Medeiros, D.M.R.: Enhanced word embeddings using multi-semantic representation through lexical chains. Inf. Sci. 532, 16\u201332 (2020). https:\/\/doi.org\/10.1016\/j.ins.2020.04.048","journal-title":"Inf. Sci."},{"key":"33_CR26","doi-asserted-by":"publisher","first-page":"288","DOI":"10.1016\/j.eswa.2019.06.026","volume":"136","author":"T Ruas","year":"2019","unstructured":"Ruas, T., Grosky, W., Aizawa, A.: Multi-sense embeddings through a word sense disambiguation process. Expert Syst. Appl. 136, 288\u2013303 (2019). https:\/\/doi.org\/10.1016\/j.eswa.2019.06.026","journal-title":"Expert Syst. Appl."},{"issue":"1","key":"33_CR27","doi-asserted-by":"publisher","first-page":"22","DOI":"10.1145\/3137597.3137600","volume":"19","author":"K Shu","year":"2017","unstructured":"Shu, K., Sliva, A., Wang, S., Tang, J., Liu, H.: Fake news detection on social media: a data mining perspective. ACM SIGKDD Explor. Newslett. 19(1), 22\u201336 (2017). https:\/\/doi.org\/10.1145\/3137597.3137600","journal-title":"ACM SIGKDD Explor. Newslett."},{"key":"33_CR28","unstructured":"Vaswani, A., et al.: Attention is all you need. In: Proceedings of the 31st International Conference on Neural Information Processing Systems, pp. 6000\u20136010. NIPS 2017, Curran Associates Inc., Red Hook, NY, USA (2017). https:\/\/arxiv.org\/abs\/1706.03762"},{"key":"33_CR29","doi-asserted-by":"crossref","unstructured":"Wahle, J.P., Ruas, T., Foltynek, T., Meuschke, N., Gipp, B.: Identifying machine-paraphrased plagiarism. In: Proceedings of the iConference, February 2022","DOI":"10.1007\/978-3-030-96957-8_34"},{"key":"33_CR30","doi-asserted-by":"crossref","unstructured":"Wahle, J.P., Ruas, T., Meuschke, N., Gipp, B.: Are neural language models good plagiarists? a benchmark for neural paraphrase detection. In: Proceedings of the ACM\/IEEE Joint Conference on Digital Libraries (JCDL). IEEE, Washington, USA, September 2021","DOI":"10.1109\/JCDL52503.2021.00065"},{"key":"33_CR31","unstructured":"Wang, A., et al.: SuperGLUE: a stickier benchmark for general-purpose language understanding systems. In: Wallach, H., Larochelle, H., Beygelzimer, A., d\u2019 Alch\u00e9-Buc, F., Fox, E., Garnett, R. (eds.) Advances in Neural Information Processing Systems 32, pp. 3266\u20133280. Curran Associates, Inc. (2019). https:\/\/arxiv.org\/abs\/1905.00537"},{"key":"33_CR32","doi-asserted-by":"crossref","unstructured":"Wang, A., Singh, A., Michael, J., Hill, F., Levy, O., Bowman, S.R.: GLUE: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding. arXiv:1804.07461 [cs], February 2019. https:\/\/arxiv.org\/abs\/1804.0746","DOI":"10.18653\/v1\/W18-5446"},{"key":"33_CR33","unstructured":"Wang, L.L., et al.: CORD-19: The COVID-19 Open Research Dataset. arXiv:2004.10706 [cs], July 2020. http:\/\/arxiv.org\/abs\/2004.10706"},{"key":"33_CR34","unstructured":"Yang, Z., Dai, Z., Yang, Y., Carbonell, J., Salakhutdinov, R., Le, Q.V.: XLNet: generalized autoregressive pretraining for language understanding. arXiv:1906.08237 [cs], June 2019. https:\/\/arxiv.org\/abs\/1804.0746"},{"issue":"10225","key":"33_CR35","doi-asserted-by":"publisher","first-page":"676","DOI":"10.1016\/S0140-6736(20)30461-X","volume":"395","author":"J Zarocostas","year":"2020","unstructured":"Zarocostas, J.: How to fight an infodemic. Lancet 395(10225), 676 (2020). https:\/\/doi.org\/10.1016\/S0140-6736(20)30461-X","journal-title":"Lancet"},{"key":"33_CR36","doi-asserted-by":"publisher","unstructured":"Zhou, X., Mulay, A., Ferrara, E., Zafarani, R.: ReCOVery: A multimodal repository for COVID-19 news credibility research, pp. 3205\u20133212. Association for Computing Machinery, New York, NY, USA (2020). https:\/\/doi.org\/10.1145\/3340531.3412880","DOI":"10.1145\/3340531.3412880"}],"container-title":["Lecture Notes in Computer Science","Information for a Better World: Shaping the Global Future"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-030-96957-8_33","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,2,24]],"date-time":"2022-02-24T00:07:50Z","timestamp":1645661270000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-030-96957-8_33"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022]]},"ISBN":["9783030969561","9783030969578"],"references-count":36,"URL":"https:\/\/doi.org\/10.1007\/978-3-030-96957-8_33","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2022]]},"assertion":[{"value":"23 February 2022","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"iConference","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Conference on Information","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2022","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"28 February 2022","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"4 March 2022","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"iconference2022","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/ischools.org\/iConference","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Double-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"ConfTool","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"147","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"32","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"29","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"22% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"2","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"2","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Yes","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}