{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,12,21]],"date-time":"2025-12-21T08:33:27Z","timestamp":1766306007010,"version":"3.48.0"},"reference-count":24,"publisher":"Elsevier BV","license":[{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.elsevier.com\/tdm\/userlicense\/1.0\/"},{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.elsevier.com\/legal\/tdmrep-license"},{"start":{"date-parts":[[2025,10,2]],"date-time":"2025-10-02T00:00:00Z","timestamp":1759363200000},"content-version":"vor","delay-in-days":274,"URL":"http:\/\/creativecommons.org\/licenses\/by-nc-nd\/4.0\/"}],"funder":[{"DOI":"10.13039\/501100002241","name":"JST","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100002241","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100004721","name":"University of Tokyo","doi-asserted-by":"publisher","award":["C2410"],"award-info":[{"award-number":["C2410"]}],"id":[{"id":"10.13039\/501100004721","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100020354","name":"The University of Tokyo Graduate School of Frontier Sciences","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100020354","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001695","name":"Japan Science and Technology Corporation","doi-asserted-by":"publisher","award":["JPMJBS2418"],"award-info":[{"award-number":["JPMJBS2418"]}],"id":[{"id":"10.13039\/501100001695","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["elsevier.com","sciencedirect.com"],"crossmark-restriction":true},"short-container-title":["Procedia Computer Science"],"published-print":{"date-parts":[[2025]]},"DOI":"10.1016\/j.procs.2025.10.006","type":"journal-article","created":{"date-parts":[[2025,11,6]],"date-time":"2025-11-06T22:15:25Z","timestamp":1762467325000},"page":"5389-5398","update-policy":"https:\/\/doi.org\/10.1016\/elsevier_cm_policy","source":"Crossref","is-referenced-by-count":0,"special_numbering":"C","title":["Comparing User Perceptions of AI-Generated Short Video Clips and Text Summaries of Academic Papers: Toward the Development of the \u201cPaper 2 Clip\u201d System"],"prefix":"10.1016","volume":"270","author":[{"given":"Hayato","family":"Sezaki","sequence":"first","affiliation":[]},{"given":"Takashi","family":"Goto","sequence":"additional","affiliation":[]},{"given":"Ayako Kurono","family":"(Fukunaga)","sequence":"additional","affiliation":[]},{"given":"Hideo","family":"Kawamata","sequence":"additional","affiliation":[]},{"given":"Kayoko","family":"Kurita","sequence":"additional","affiliation":[]}],"member":"78","reference":[{"issue":"1","key":"10.1016\/j.procs.2025.10.006_bib1","doi-asserted-by":"crossref","first-page":"5","DOI":"10.1108\/00012530910932267","article-title":"\u201cElectronic journals and changes in scholarly article seeking and reading patterns.\u201d","volume":"61","author":"Tenopir","year":"2009","journal-title":"Aslib Proceedings"},{"key":"10.1016\/j.procs.2025.10.006_bib2","article-title":"\u201cActual usage of short videos among Generation Z: Survey report.\"","author":"SOMEWRITE Inc","year":"2021","journal-title":"SOMEWRITE White Paper"},{"key":"10.1016\/j.procs.2025.10.006_bib3","unstructured":"Elicit Research PBC (n.d.) \u201cElicit.\u201d https:\/\/elicit.com\/ (accessed\u202fMay 31,\u202f2025)."},{"key":"10.1016\/j.procs.2025.10.006_bib4","unstructured":"NextLab Inc. (n.d.) \u201cReadable.\u201d https:\/\/about.readable.jp\/ (accessed\u202fMay\u202f31, 2025)."},{"key":"10.1016\/j.procs.2025.10.006_bib5","unstructured":"Daichi Konno (n.d.) \u201cPaper Interpreter (Japanese) in ChatGPT GPTs.\u201d https:\/\/chatgpt.com\/g\/g-hxDOCBQrs-paper-interpreter-japanese (accessed\u202fMay\u202f31, 2025)."},{"key":"10.1016\/j.procs.2025.10.006_bib6","doi-asserted-by":"crossref","unstructured":"Yuchi Yahagi, Rintaro Chujo, Yuga Harada, Changyo Han, Kohei Sugiyama, and Takeshi Naemura (2025) \u201cPaperWave: Listening to research papers as conversational podcasts scripted by LLM.\u201d Proceedings of the Extended Abstracts of the CHI Conference on Human Factors in Computing Systems, 1\u201310. https:\/\/doi.org\/10.1145\/3706599.3706664","DOI":"10.1145\/3706599.3706664"},{"key":"10.1016\/j.procs.2025.10.006_bib7","doi-asserted-by":"crossref","unstructured":"Yuchi Yahagi, Rintaro Chujo, Yuga Harada, Changyo Han, Kohei Sugiyama, and Takeshi Naemura (2024) \u201cDeveloping PaperWave: A System for Adapting Research Papers into Conversational Podcasts with LLMs.\u201d IEICE Technical Report, HCG Symposium 2024, \u202fC-1-6.","DOI":"10.1145\/3706599.3706664"},{"key":"10.1016\/j.procs.2025.10.006_bib8","unstructured":"Hayato Sezaki, Takashi Goto, Ayako Kurono, Hideo Kawamata, Mayumi Sakurai, Siyuan Zhang, and Junji Isemoto. (2023) \u201cPaper\u202f2\u202fClip.\u201d Ministry of Education, Culture, Sports, Science and Technology (MEXT) \u00d7 Generative AI 2023 Ideathon \u2014 \u201cGenerative AI and the Digitalization of Higher Education\u201d Powered by Student-centered higher education ecosystem through Digitalization (Scheem-D)."},{"key":"10.1016\/j.procs.2025.10.006_bib9","unstructured":"Hayato Sezaki, Takashi Goto, Ayako Kurono, Hideo Kawamata, Mayumi Sakurai, and Siyuan Zhang. (2024) \u201cPaper 2 Clip\u2013Building a Platform to Foster Interdisciplinary Exchange by Converting Academic Papers into Short Video Clips.\u201d Ministry of Education, Culture, Sports, Science and Technology (MEXT), Student-centered higher education ecosystem through Digitalization (Scheem-D), Scheem-D Pitch and Conference 2023."},{"key":"10.1016\/j.procs.2025.10.006_bib10","unstructured":"Hayato Sezaki, Takashi Goto, Ayako Kurono (Fukunaga), and Hideo Kawamata. (2024) \u201c\u201cPaper 2 Clip\u201d\u2013A Generative AI System for Converting Academic Papers to Short Video Clips \u2013.\u201d The Conference of Digital Life vol.\u202f2, Digital INSPIRE, DI-1."},{"key":"10.1016\/j.procs.2025.10.006_bib11","unstructured":"Sankei Digital Inc. (n.d.) \u201cJournal of Digital Life.\u201d https:\/\/journal-digitallife.com\/ (accessed\u202fMay\u202f31, 2025)."},{"key":"10.1016\/j.procs.2025.10.006_bib12","unstructured":"invideo Inc. (n.d.) \u201cinvideo\u202fAI.\u201d https:\/\/invideo.io\/ (accessed\u202fMay 31,\u202f2025)."},{"key":"10.1016\/j.procs.2025.10.006_bib13","unstructured":"Mavericks Inc. (n.d.) \u201cNoLang.\u201d https:\/\/no-lang.com\/ (accessed\u202fMay\u202f31, 2025)."},{"key":"10.1016\/j.procs.2025.10.006_bib14","unstructured":"OpenAI Inc. (n.d.) \u201cSora.\u201d https:\/\/sora.com\/ (accessed\u202fMay\u202f31, 2025)."},{"key":"10.1016\/j.procs.2025.10.006_bib15","unstructured":"OpenAI Inc. (n.d.) \u201cChatGPT.\u201d https:\/\/chatgpt.com\/ (accessed\u202fMay\u202f31, 2025)."},{"key":"10.1016\/j.procs.2025.10.006_bib16","unstructured":"Kazuyuki Hiroshiba (n.d.) \u201cVOICEVOX.\u201d https:\/\/voicevox.hiroshiba.jp\/ (accessed\u202fMay\u202f31, 2025)."},{"key":"10.1016\/j.procs.2025.10.006_bib17","unstructured":"Microsoft Corp. (n.d.) \u201cClipchamp.\u201d https:\/\/clipchamp.com\/ (accessed\u202fMay\u202f31, 2025)."},{"key":"10.1016\/j.procs.2025.10.006_bib18","unstructured":"VoyagerX Inc. (n.d.) \u201cVrew.\u201d https:\/\/vrew.ai\/ (accessed\u202fMay\u202f31, 2025)."},{"key":"10.1016\/j.procs.2025.10.006_bib19","unstructured":"Ryousuke Furukado, Goichi Hagiwara, and Hiroyuki Inagaki. (2022) \u201cEffects of glucose Ramune candy ingestion on concentration during esports play and cognitive function.\u201d Journal of Digital Life, 2, 2022.2.11. https:\/\/doi.org\/10.51015\/jdl.2022.2.11"},{"key":"10.1016\/j.procs.2025.10.006_bib20","unstructured":"Yoshimasa Umehara, Wenyuan Jiang, Yoshito Nishita, Yuhei Yamamoto, Takeshi Naruo, Shigenori Tanaka, Akira Yokomichi, Norio Fujimoto, Toshihiro Akagi, and Shingo Hakamata (2024) \u201cDevelopment of Video Switching System in Sport Fields.\u201d Journal of Digital Life, 4(Special Issue), 2024.4.S6. https:\/\/doi.org\/10.51015\/jdl.2024.4.S6"},{"key":"10.1016\/j.procs.2025.10.006_bib21","unstructured":"Toshihiro Tsuchihashi (2024) \u201cHow much do you bid? Answers from ChatGPT in first-price and second-price auctions.\u201d Journal of Digital Life, 3, 2023.3.11. https:\/\/doi.org\/10.51015\/jdl.2023.3.11"},{"key":"10.1016\/j.procs.2025.10.006_bib22","unstructured":"Ryuichi Imai, Daisuke Kamiya, Yuhei Yamamoto, Wenyuan Jiang, Masaya Nakahara, Koki Nakahata, and Shigenori Tanaka (2023) \u201cMeasurement of Motor-vehicle Traffic Volume Using Camera Images and Artificial Intelligence.\u201d Journal of Digital Life, 3, 2023.3.4. https:\/\/doi.org\/10.51015\/jdl.2023.3.4"},{"key":"10.1016\/j.procs.2025.10.006_bib23","unstructured":"Cross Marketing Inc. (n.d.) \u201cQiQUMO.\u201d https:\/\/qiqumo.jp\/ (accessed\u202fMay\u202f31, 2025)."},{"key":"10.1016\/j.procs.2025.10.006_bib24","unstructured":"Google DeepMind (n.d.) \u201cVeo.\u201d https:\/\/deepmind.google\/models\/veo\/ (accessed\u202fMay\u202f31, 2025)."}],"container-title":["Procedia Computer Science"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/api.elsevier.com\/content\/article\/PII:S1877050925033393?httpAccept=text\/xml","content-type":"text\/xml","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/api.elsevier.com\/content\/article\/PII:S1877050925033393?httpAccept=text\/plain","content-type":"text\/plain","content-version":"vor","intended-application":"text-mining"}],"deposited":{"date-parts":[[2025,12,21]],"date-time":"2025-12-21T08:29:24Z","timestamp":1766305764000},"score":1,"resource":{"primary":{"URL":"https:\/\/linkinghub.elsevier.com\/retrieve\/pii\/S1877050925033393"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025]]},"references-count":24,"alternative-id":["S1877050925033393"],"URL":"https:\/\/doi.org\/10.1016\/j.procs.2025.10.006","relation":{},"ISSN":["1877-0509"],"issn-type":[{"type":"print","value":"1877-0509"}],"subject":[],"published":{"date-parts":[[2025]]},"assertion":[{"value":"Elsevier","name":"publisher","label":"This article is maintained by"},{"value":"Comparing User Perceptions of AI-Generated Short Video Clips and Text Summaries of Academic Papers: Toward the Development of the \u201cPaper 2 Clip\u201d System","name":"articletitle","label":"Article Title"},{"value":"Procedia Computer Science","name":"journaltitle","label":"Journal Title"},{"value":"https:\/\/doi.org\/10.1016\/j.procs.2025.10.006","name":"articlelink","label":"CrossRef DOI link to publisher maintained version"},{"value":"article","name":"content_type","label":"Content Type"},{"value":"\u00a9 2025 The Author(s). Published by Elsevier B.V.","name":"copyright","label":"Copyright"}]}}