{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,22]],"date-time":"2026-03-22T06:48:41Z","timestamp":1774162121536,"version":"3.50.1"},"reference-count":62,"publisher":"IEEE","license":[{"start":{"date-parts":[[2022,8,21]],"date-time":"2022-08-21T00:00:00Z","timestamp":1661040000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2022,8,21]],"date-time":"2022-08-21T00:00:00Z","timestamp":1661040000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100003556","name":"Ministry of Foreign Affairs","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100003556","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2022,8,21]]},"DOI":"10.1109\/icpr56361.2022.9955644","type":"proceedings-article","created":{"date-parts":[[2022,11,29]],"date-time":"2022-11-29T19:34:13Z","timestamp":1669750453000},"page":"4087-4094","source":"Crossref","is-referenced-by-count":32,"title":["CaMEL: Mean Teacher Learning for Image Captioning"],"prefix":"10.1109","author":[{"given":"Manuele","family":"Barraco","sequence":"first","affiliation":[{"name":"University of Modena and Reggio Emilia"}]},{"given":"Matteo","family":"Stefanini","sequence":"additional","affiliation":[{"name":"University of Modena and Reggio Emilia"}]},{"given":"Marcella","family":"Cornia","sequence":"additional","affiliation":[{"name":"University of Modena and Reggio Emilia"}]},{"given":"Silvia","family":"Cascianelli","sequence":"additional","affiliation":[{"name":"University of Modena and Reggio Emilia"}]},{"given":"Lorenzo","family":"Baraldi","sequence":"additional","affiliation":[{"name":"University of Modena and Reggio Emilia"}]},{"given":"Rita","family":"Cucchiara","sequence":"additional","affiliation":[{"name":"University of Modena and Reggio Emilia"}]}],"member":"263","reference":[{"key":"ref39","article-title":"Training data-e cient image transformers & distillation through attention","author":"touvron","year":"2021","journal-title":"Proceedings of the International Conference on Machine Learning"},{"key":"ref38","article-title":"An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale","author":"dosovitskiy","year":"2021","journal-title":"Proceedings of the International Conference on Learning Representations"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00754"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.503"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.345"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2015.7298878"},{"key":"ref37","article-title":"CPTR: Full Transformer Network for Image Captioning","author":"liu","year":"2021","journal-title":"arXiv preprint arXiv 2101 10955"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.acl-main.664"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.01094"},{"key":"ref34","article-title":"Exploring Visual Relationship for Image Captioning","author":"yao","year":"2018","journal-title":"Proceedings of the European Conference on Computer Vision"},{"key":"ref60","article-title":"Adam: A Method for Stochastic Optimization","author":"kingma","year":"2015","journal-title":"Proceedings of the International Conference on Learning Representations"},{"key":"ref62","doi-asserted-by":"publisher","DOI":"10.1002\/nav.3800020109"},{"key":"ref61","doi-asserted-by":"publisher","DOI":"10.1007\/s11263-016-0981-7"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.01070"},{"key":"ref27","article-title":"Large scale distributed neural network training through online distillation","author":"anil","year":"2018","journal-title":"Proceedings of the International Conference on Learning Representations"},{"key":"ref29","article-title":"Microsoft COCO: Common Objects in Context","author":"lin","year":"2014","journal-title":"Proceedings of the European Conference on Computer Vision"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2015.7298932"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2022.3148210"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.00553"},{"key":"ref22","article-title":"Scaling Up Vision-Language Pre-training for Image Captioning","author":"hu","year":"2022","journal-title":"Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition"},{"key":"ref21","article-title":"SimVLM: Simple visual language model pretraining with weak supervision","author":"wang","year":"2022","journal-title":"Proceedings of the International Conference on Learning Representations"},{"key":"ref24","article-title":"Mean teachers are better role models: Weight-averaged consistency targets improve semi-supervised deep learning results","author":"tarvainen","year":"2017","journal-title":"Advances in neural information processing systems"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.100"},{"key":"ref26","article-title":"Distilling the knowledge in a neural network","author":"hinton","year":"2015","journal-title":"Advances in Neural Information Processing Systems Workshops"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00951"},{"key":"ref50","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA40945.2020.9196653"},{"key":"ref51","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v34i07.7005"},{"key":"ref59","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/P16-1162"},{"key":"ref58","article-title":"SPICE: Semantic Propositional Image Caption Evaluation","author":"anderson","year":"2016","journal-title":"Proceedings of the Euro-pean Conference on Computer Vision"},{"key":"ref57","article-title":"Rouge: A package for automatic evaluation of summaries","author":"lin","year":"2004","journal-title":"Proceedings annual meeting of the Association for Computational Linguistics"},{"key":"ref56","article-title":"METEOR: An automatic metric for MT evaluation with improved correlation with human judgments","author":"banerjee","year":"2005","journal-title":"Proceedings annual meeting of the Association for Computational Linguistics"},{"key":"ref55","article-title":"BLEU: a method for automatic evaluation of machine translation","author":"papineni","year":"2002","journal-title":"Proceedings of the Annual Meeting of the Association for Computational Linguistics"},{"key":"ref54","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2016.2577031"},{"key":"ref53","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2015.7299087"},{"key":"ref52","article-title":"Teacher-Critical Training Strategies for Image Captioning","author":"huang","year":"2020","journal-title":"arXiv preprint arXiv 2009 14406"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1016\/j.neunet.2021.08.030"},{"key":"ref11","article-title":"Image Captioning: Transforming Objects into Words","author":"herdade","year":"2019","journal-title":"Advances in neural information processing systems"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-06430-2_24"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.01098"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.01059"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v35i3.16328"},{"key":"ref15","first-page":"1","article-title":"Explaining transformer-based image captioning models: An empirical analysis","author":"cornia","year":"2021","journal-title":"AI communications"},{"key":"ref16","article-title":"How Much Can CLIP Benefit Vision-and-Language Tasks?","author":"shen","year":"2021","journal-title":"arXiv preprint arXiv 2107 06383"},{"key":"ref17","article-title":"Universal Captioner: Inducing Content-Style Separation in Vision-and-Language Model Training","author":"cornia","year":"2022","journal-title":"arXiv preprint arXiv 2111 12372"},{"key":"ref18","article-title":"Learning Transferable Visual Models From Natural Language Supervision","author":"radford","year":"2021","journal-title":"arXiv preprint arXiv 2103 05767"},{"key":"ref19","article-title":"Oscar: Object-semantics aligned pre-training for vision-language tasks","author":"li","year":"2020","journal-title":"Proceedings of the European Conference on Computer Vision"},{"key":"ref4","article-title":"Show, attend and tell: Neural image caption generation with visual attention","author":"xu","year":"2015","journal-title":"Proceedings of the International Conference on Machine Learning"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2015.7298935"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00636"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2016.2587640"},{"key":"ref8","article-title":"Prophet Attention: Predicting Attention with Future Attention","author":"liu","year":"2020","journal-title":"Advances in neural information processing systems"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00473"},{"key":"ref49","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.01034"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.131"},{"key":"ref46","article-title":"Attention is all you need","author":"vaswani","year":"2017","journal-title":"Advances in neural information processing systems"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.01521"},{"key":"ref48","article-title":"Augmenting Self-attention with Persistent Memory","author":"sukhbaatar","year":"2019","journal-title":"arXiv preprint arXiv 1907 01403"},{"key":"ref47","article-title":"BERT: Pre-training of deep bidirectional transformers for language understanding","author":"devlin","year":"2018","journal-title":"Proceedings of the Annual Conference of the North American Chapter of the Association for Computational Linguistics"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00435"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00583"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.1145\/3460426.3463587"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00902"}],"event":{"name":"2022 26th International Conference on Pattern Recognition (ICPR)","location":"Montreal, QC, Canada","start":{"date-parts":[[2022,8,21]]},"end":{"date-parts":[[2022,8,25]]}},"container-title":["2022 26th International Conference on Pattern Recognition (ICPR)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9956007\/9955631\/09955644.pdf?arnumber=9955644","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,12,19]],"date-time":"2022-12-19T20:05:00Z","timestamp":1671480300000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9955644\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022,8,21]]},"references-count":62,"URL":"https:\/\/doi.org\/10.1109\/icpr56361.2022.9955644","relation":{},"subject":[],"published":{"date-parts":[[2022,8,21]]}}}