{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,12,2]],"date-time":"2025-12-02T03:34:30Z","timestamp":1764646470915,"version":"3.28.0"},"reference-count":40,"publisher":"IEEE","license":[{"start":{"date-parts":[[2023,6,18]],"date-time":"2023-06-18T00:00:00Z","timestamp":1687046400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2023,6,18]],"date-time":"2023-06-18T00:00:00Z","timestamp":1687046400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2023,6,18]]},"DOI":"10.1109\/ijcnn54540.2023.10191980","type":"proceedings-article","created":{"date-parts":[[2023,8,2]],"date-time":"2023-08-02T17:30:03Z","timestamp":1690997403000},"page":"1-8","source":"Crossref","is-referenced-by-count":1,"title":["Anime Character Identification and Tag Prediction by Multimodality Modeling: Dataset and Model"],"prefix":"10.1109","author":[{"given":"Fan","family":"Yi","sequence":"first","affiliation":[{"name":"School of Computer Science, Fudan University,Shanghai,China"}]},{"given":"Jiaxiang","family":"Wu","sequence":"additional","affiliation":[{"name":"Youtu Lab Tencent,Shanghai,China"}]},{"given":"Minyi","family":"Zhao","sequence":"additional","affiliation":[{"name":"School of Computer Science, Fudan University,Shanghai,China"}]},{"given":"Shuigeng","family":"Zhou","sequence":"additional","affiliation":[{"name":"School of Computer Science, Fudan University,Shanghai,China"}]}],"member":"263","reference":[{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-86331-9_27"},{"journal-title":"Vlmo Unified vision-language pre-training with mixture-of-modality-experts","year":"2021","author":"bao","key":"ref35"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/ICDAR.2017.178"},{"key":"ref34","first-page":"12 888","article-title":"Blip: Bootstrapping language-image pre-training for unified vision-language understanding and generation","author":"li","year":"2022","journal-title":"ICML"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1016\/j.jvcir.2013.01.007"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.124"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/ISCAS48785.2022.9937519"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00804"},{"key":"ref31","article-title":"Vilbert: Pretraining task-agnostic visiolinguistic representations for vision-and-language tasks","volume":"32","author":"lu","year":"2019","journal-title":"NeurIPS"},{"key":"ref30","first-page":"5583","article-title":"Vilt: Vision-and-language transformer without convolution or region supervision","author":"kim","year":"2021","journal-title":"ICML"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1145\/3321408.3322624"},{"key":"ref33","first-page":"9694","article-title":"Align before fuse: Vision and language representation learning with momentum distillation","volume":"34","author":"li","year":"2021","journal-title":"NeurIPS"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/ICDAR.2017.291"},{"key":"ref32","first-page":"8748","article-title":"Learning transferable visual models from natural language supervision","author":"radford","year":"2021","journal-title":"ICML"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/P18-1238"},{"key":"ref1","article-title":"Attention is all you need","volume":"30","author":"vaswani","year":"2017","journal-title":"NeurIPS"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1145\/3011549.3011551"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2021.3069908"},{"journal-title":"Webcaricature a benchmark for caricature recognition","year":"2017","author":"huo","key":"ref16"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-19775-8_33"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1145\/1553374.1553380"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1145\/3394171.3413726"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/D15-1303"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/TBME.2014.2372011"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2016.2537340"},{"key":"ref25","doi-asserted-by":"crossref","first-page":"115","DOI":"10.3390\/s16010115","article-title":"Deep convolutional and lstm recurrent neural networks for multimodal wearable activity recognition","volume":"16","author":"ord\u00f3\u00f1ez","year":"2016","journal-title":"SENSORS"},{"journal-title":"Daf Re A challenging crowd-sourced large-scale long-tailed dataset for anime character recognition","year":"2021","author":"rios","key":"ref20"},{"key":"ref22","article-title":"Deep fragment embeddings for bidirectional image sentence mapping","volume":"27","author":"karpathy","year":"2014","journal-title":"NeurIPS"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.21236\/ADA623249"},{"journal-title":"BERT Pre-training of deep bidirectional transformers for language understanding","year":"2018","author":"devlin","key":"ref28"},{"journal-title":"An image is worth 16&#x00D7;16 words Transformers for image recognition at scale","year":"2020","author":"dosovitskiy","key":"ref27"},{"journal-title":"UNIMO Towards Unified-Modal Understanding and Generation via Cross-Modal Contrastive Learning","year":"2020","author":"li","key":"ref29"},{"key":"ref8","doi-asserted-by":"crossref","first-page":"1413","DOI":"10.1109\/TSMCB.2012.2192108","article-title":"On combining multiple features for cartoon character retrieval and clip synthesis","volume":"42","author":"yu","year":"2012","journal-title":"IEEE Transactions on Systems Man and Cybernetics Part B Cybernetics"},{"journal-title":"Laion-400m Open dataset of clip-filtered 400 million image-text pairs","year":"2021","author":"schuhmann","key":"ref7"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1007\/s11042-016-4020-z"},{"key":"ref4","first-page":"740","article-title":"Microsoft coco: Common objects in context","author":"lin","year":"2014","journal-title":"ECCV"},{"key":"ref3","article-title":"Im2text: Describing images using 1 million captioned photographs","volume":"24","author":"ordonez","year":"2011","journal-title":"NeurIPS"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.00356"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1007\/s11263-016-0981-7"},{"journal-title":"Danbooru2017 A large-scale crowdsourced and tagged anime illustration dataset","year":"0","author":"anonymous","key":"ref40"}],"event":{"name":"2023 International Joint Conference on Neural Networks (IJCNN)","start":{"date-parts":[[2023,6,18]]},"location":"Gold Coast, Australia","end":{"date-parts":[[2023,6,23]]}},"container-title":["2023 International Joint Conference on Neural Networks (IJCNN)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/10190990\/10190992\/10191980.pdf?arnumber=10191980","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,8,21]],"date-time":"2023-08-21T17:46:42Z","timestamp":1692640002000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10191980\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,6,18]]},"references-count":40,"URL":"https:\/\/doi.org\/10.1109\/ijcnn54540.2023.10191980","relation":{},"subject":[],"published":{"date-parts":[[2023,6,18]]}}}