{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,6,10]],"date-time":"2025-06-10T14:07:35Z","timestamp":1749564455038,"version":"3.40.3"},"publisher-location":"Cham","reference-count":25,"publisher":"Springer Nature Switzerland","isbn-type":[{"type":"print","value":"9783031533013"},{"type":"electronic","value":"9783031533020"}],"license":[{"start":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T00:00:00Z","timestamp":1704067200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T00:00:00Z","timestamp":1704067200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2024]]},"DOI":"10.1007\/978-3-031-53302-0_10","type":"book-chapter","created":{"date-parts":[[2024,1,28]],"date-time":"2024-01-28T09:02:09Z","timestamp":1706432529000},"page":"132-144","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":1,"title":["Ookpik- A Collection of\u00a0Out-of-Context Image-Caption Pairs"],"prefix":"10.1007","author":[{"given":"Kha-Luan","family":"Pham","sequence":"first","affiliation":[]},{"given":"Minh-Khoi","family":"Nguyen-Nhat","sequence":"additional","affiliation":[]},{"given":"Anh-Huy","family":"Dinh","sequence":"additional","affiliation":[]},{"given":"Quang-Tri","family":"Le","sequence":"additional","affiliation":[]},{"given":"Manh-Thien","family":"Nguyen","sequence":"additional","affiliation":[]},{"given":"Anh-Duy","family":"Tran","sequence":"additional","affiliation":[]},{"given":"Minh-Triet","family":"Tran","sequence":"additional","affiliation":[]},{"given":"Duc-Tien","family":"Dang-Nguyen","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,1,29]]},"reference":[{"key":"10_CR1","doi-asserted-by":"publisher","unstructured":"Akgul, T., Civelek, T.E., Ugur, D., Begen, A.C.: Cosmos on steroids: a cheap detector for cheapfakes. In: Proceedings of the 12th ACM Multimedia Systems Conference, MMSys 2021, pp. 327\u2013331. Association for Computing Machinery, New York, NY, USA (2021). https:\/\/doi.org\/10.1145\/3458305.3479968","DOI":"10.1145\/3458305.3479968"},{"key":"10_CR2","unstructured":"Aneja, S., Bregler, C., Nie\u00dfner, M.: COSMOS: catching out-of-context misinformation with self-supervised learning. In: ArXiv preprint arXiv:2101.06278 (2021), https:\/\/arxiv.org\/pdf\/2101.06278.pdf"},{"key":"10_CR3","unstructured":"Aneja, S., et al.: ACM multimedia grand challenge on detecting cheapfakes. arXiv preprint arXiv:2207.14534 (2022)"},{"key":"10_CR4","unstructured":"Brennen, J.S., Simon, F.M., Howard, P.N., Nielsen, R.K.: Types, sources, and claims of COVID-19 misinformation. Ph.D. thesis, University of Oxford (2020)"},{"key":"10_CR5","unstructured":"Britt Paris, J.D.: Deepfakes and cheap fakes the manipulation of audio and visual evidence (2019). https:\/\/datasociety.net\/wp-content\/uploads\/2019\/09\/DS_Deepfakes_Cheap_FakesFinal-1-1.pdf"},{"key":"10_CR6","doi-asserted-by":"crossref","unstructured":"Jaiswal, A., Sabir, E., AbdAlmageed, W., Natarajan, P.: Multimedia semantic integrity assessment using joint embedding of images and text. In: Proceedings of the 25th ACM International Conference on Multimedia, pp. 1465\u20131471 (2017)","DOI":"10.1145\/3123266.3123385"},{"key":"10_CR7","doi-asserted-by":"publisher","first-page":"6748","DOI":"10.1109\/ACCESS.2023.3236993","volume":"11","author":"SA Khan","year":"2023","unstructured":"Khan, S.A., et al.: Visual user-generated content verification in journalism: an overview. IEEE Access 11, 6748\u20136769 (2023)","journal-title":"IEEE Access"},{"key":"10_CR8","doi-asserted-by":"publisher","unstructured":"La, T.V., Tran, Q.T., Tran, T.P., Tran, A.D., Dang-Nguyen, D.T., Dao, M.S.: Multimodal cheapfakes detection by utilizing image captioning for global context. In: Proceedings of the 3rd ACM Workshop on Intelligent Cross-Data Analysis and Retrieval, ICDAR 2022, pp. 9\u201316. Association for Computing Machinery, New York, NY, USA (2022). https:\/\/doi.org\/10.1145\/3512731.3534210","DOI":"10.1145\/3512731.3534210"},{"key":"10_CR9","doi-asserted-by":"crossref","unstructured":"Liu, F., Wang, Y., Wang, T., Ordonez, V.: Visual news: benchmark and challenges in news image captioning. arXiv preprint arXiv:2010.03743 (2020)","DOI":"10.18653\/v1\/2021.emnlp-main.542"},{"key":"10_CR10","doi-asserted-by":"publisher","unstructured":"Luo, G., Darrell, T., Rohrbach, A.: Newsclippings: automatic generation of out-of-context multimodal media (2021). https:\/\/doi.org\/10.48550\/ARXIV.2104.05893, https:\/\/arxiv.org\/abs\/2104.05893","DOI":"10.48550\/ARXIV.2104.05893"},{"key":"10_CR11","doi-asserted-by":"crossref","unstructured":"M\u00fcller-Budack, E., Theiner, J., Diering, S., Idahl, M., Ewerth, R.: Multimodal analytics for real-world news using measures of cross-modal entity consistency. In: Proceedings of the 2020 International Conference on Multimedia Retrieval, pp. 16\u201325 (2020)","DOI":"10.1145\/3372278.3390670"},{"key":"10_CR12","doi-asserted-by":"publisher","first-page":"102182","DOI":"10.1016\/j.datak.2023.102182","volume":"146","author":"AL Opdahl","year":"2023","unstructured":"Opdahl, A.L., et al.: Trustworthy journalism through AI. Data Knowl. Eng. 146, 102182 (2023)","journal-title":"Data Knowl. Eng."},{"key":"10_CR13","unstructured":"OpenAI: Introducing ChatGPT. https:\/\/openai.com\/blog\/chatgpt (2021). Accessed 08 Aug 2023"},{"key":"10_CR14","doi-asserted-by":"publisher","unstructured":"Pham, K.L., Nguyen, M.T., Tran, A.D., Dao, M.S., Dang-Nguyen, D.T.: Detecting cheapfakes using self-query adaptive-context learning. In: Proceedings of the 4th ACM Workshop on Intelligent Cross-Data Analysis and Retrieval, ICDAR 2023, pp. 60\u201363. Association for Computing Machinery, New York, NY, USA (2023). https:\/\/doi.org\/10.1145\/3592571.3592972","DOI":"10.1145\/3592571.3592972"},{"key":"10_CR15","unstructured":"Radford, A., et al.: Learning transferable visual models from natural language supervision. In: International Conference on Machine Learning, pp. 8748\u20138763. PMLR (2021)"},{"key":"10_CR16","doi-asserted-by":"publisher","unstructured":"Rocha, Y.M., de Moura, G.A., Desid\u00e9rio, G.A., de Oliveira, C.H., Louren\u00e7o, F.D., de Figueiredo Nicolete, L.D.: The impact of fake news on social media and its influence on health during the COVID-19 pandemic: a systematic review. J. Public Health, pp. 1\u201310 (2021). https:\/\/doi.org\/10.1007\/s10389-021-01658-z","DOI":"10.1007\/s10389-021-01658-z"},{"key":"10_CR17","doi-asserted-by":"publisher","unstructured":"Roozenbeek, J., et al.: Susceptibility to misinformation about covid-19 around the world. R. Soc. Open Sci. 7(10), 201199 (2020). https:\/\/doi.org\/10.1098\/rsos.201199","DOI":"10.1098\/rsos.201199"},{"key":"10_CR18","doi-asserted-by":"crossref","unstructured":"Sabir, E., AbdAlmageed, W., Wu, Y., Natarajan, P.: Deep multimodal image-repurposing detection. In: Proceedings of the 26th ACM international conference on Multimedia, pp. 1337\u20131345 (2018)","DOI":"10.1145\/3240508.3240707"},{"key":"10_CR19","unstructured":"Schick, N.: Don\u2019t underestimate the cheapfake (2020). https:\/\/www.technologyreview.com\/2020\/12\/22\/1015442\/cheapfakes-more-political-damage-2020-election-than-deepfakes\/"},{"key":"10_CR20","doi-asserted-by":"publisher","unstructured":"Tandoc Jr., E.C.: The facts of fake news: a research review. Soc. Compass 13(9), e12724 (2019). https:\/\/doi.org\/10.1111\/soc4.12724","DOI":"10.1111\/soc4.12724"},{"key":"10_CR21","doi-asserted-by":"publisher","unstructured":"Tran, Q.T., Nguyen, T.P., Dao, M., La, T.V., Tran, A.D., Dang Nguyen, D.T.: A textual-visual-entailment-based unsupervised algorithm for cheapfake detection, August 2022. https:\/\/doi.org\/10.1145\/3503161.3551596","DOI":"10.1145\/3503161.3551596"},{"key":"10_CR22","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"318","DOI":"10.1007\/978-3-030-98355-0_27","volume-title":"MultiMedia Modeling","author":"NH Vo","year":"2022","unstructured":"Vo, N.H., Phan, K.D., Tran, A.-D., Dang-Nguyen, D.-T.: Adversarial attacks on\u00a0deepfake detectors: a practical analysis. In: \u00de\u00f3r J\u00f3nsson, B., et al. (eds.) MMM 2022. LNCS, vol. 13142, pp. 318\u2013330. Springer, Cham (2022). https:\/\/doi.org\/10.1007\/978-3-030-98355-0_27"},{"key":"10_CR23","doi-asserted-by":"publisher","unstructured":"Vosoughi, S., Roy, D., Aral, S.: The spread of true and false news online. Science 359(6380), 1146\u20131151 (2018). https:\/\/doi.org\/10.1126\/science.aap9559","DOI":"10.1126\/science.aap9559"},{"key":"10_CR24","doi-asserted-by":"publisher","first-page":"2146","DOI":"10.1109\/TASLP.2020.3008390","volume":"28","author":"B Wang","year":"2020","unstructured":"Wang, B., Kuo, C.C.J.: Sbert-wk: a sentence embedding method by dissecting bert-based word models. IEEE\/ACM Trans. Audio Speech Lang. Process. 28, 2146\u20132157 (2020)","journal-title":"IEEE\/ACM Trans. Audio Speech Lang. Process."},{"issue":"6","key":"10_CR25","doi-asserted-by":"publisher","first-page":"1452","DOI":"10.1109\/TPAMI.2017.2723009","volume":"40","author":"B Zhou","year":"2017","unstructured":"Zhou, B., Lapedriza, A., Khosla, A., Oliva, A., Torralba, A.: Places: a 10 million image database for scene recognition. IEEE Trans. Pattern Anal. Mach. Intell. 40(6), 1452\u20131464 (2017)","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."}],"container-title":["Lecture Notes in Computer Science","MultiMedia Modeling"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-53302-0_10","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,3,7]],"date-time":"2024-03-07T11:57:16Z","timestamp":1709812636000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-53302-0_10"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024]]},"ISBN":["9783031533013","9783031533020"],"references-count":25,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-53302-0_10","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"type":"print","value":"0302-9743"},{"type":"electronic","value":"1611-3349"}],"subject":[],"published":{"date-parts":[[2024]]},"assertion":[{"value":"29 January 2024","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"MMM","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Conference on Multimedia Modeling","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Amsterdam","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"The Netherlands","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2024","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"29 January 2024","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2 February 2024","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"30","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"mmm2024","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Double-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"ConfTool Pro","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"297","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"112","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"0","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"38% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3.2","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3.2","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Yes","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}