{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,7]],"date-time":"2026-03-07T18:22:02Z","timestamp":1772907722825,"version":"3.50.1"},"reference-count":64,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","license":[{"start":{"date-parts":[[2023,1,1]],"date-time":"2023-01-01T00:00:00Z","timestamp":1672531200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2023,1,1]],"date-time":"2023-01-01T00:00:00Z","timestamp":1672531200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2023,1,1]],"date-time":"2023-01-01T00:00:00Z","timestamp":1672531200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62122010"],"award-info":[{"award-number":["62122010"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["61876177"],"award-info":[{"award-number":["61876177"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Multimedia"],"published-print":{"date-parts":[[2023]]},"DOI":"10.1109\/tmm.2022.3233258","type":"journal-article","created":{"date-parts":[[2022,12,30]],"date-time":"2022-12-30T18:52:19Z","timestamp":1672426339000},"page":"8194-8203","source":"Crossref","is-referenced-by-count":4,"title":["Simultaneously Training and Compressing Vision-and-Language Pre-Training Model"],"prefix":"10.1109","volume":"25","author":[{"given":"Qiaosong","family":"Qi","sequence":"first","affiliation":[{"name":"Alibaba Group, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-9863-0091","authenticated-orcid":false,"given":"Aixi","family":"Zhang","sequence":"additional","affiliation":[{"name":"Alibaba Group, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-2671-0655","authenticated-orcid":false,"given":"Yue","family":"Liao","sequence":"additional","affiliation":[{"name":"Institute of Artificial Intelligence, Beihang University, Beijing, China"}]},{"given":"Wenyu","family":"Sun","sequence":"additional","affiliation":[{"name":"Alibaba Group, Beijing, China"}]},{"given":"Yongliang","family":"Wang","sequence":"additional","affiliation":[{"name":"Alibaba Group, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-8074-0230","authenticated-orcid":false,"given":"Xiaobo","family":"Li","sequence":"additional","affiliation":[{"name":"Alibaba Group, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-9180-2935","authenticated-orcid":false,"given":"Si","family":"Liu","sequence":"additional","affiliation":[{"name":"Institute of Artificial Intelligence, Beihang University, Beijing, China"}]}],"member":"263","reference":[{"key":"ref1","first-page":"4171","article-title":"BERT: Pre-training of deep bidirectional transformers for language understanding","author":"Devlin","year":"2019","journal-title":"North Amer. Chapter Assoc. Comput. Linguistics: Hum. Lang. Technol."},{"key":"ref2","article-title":"A robustly optimized bert pretraining approach","author":"Liu","year":"2019"},{"key":"ref3","first-page":"5754","article-title":"Xlnet: Generalized autoregressive pretraining for language understanding","volume-title":"Proc. Neural Inf. Process. Syst.","author":"Yang","year":"2019"},{"key":"ref4","article-title":"AlBERT: A lite bert for self-supervised learning of language representations","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Lan","year":"2020"},{"key":"ref5","first-page":"8877","article-title":"Language models are few-shot learners","volume-title":"Proc. Neural Inf. Process. Syst.","author":"Brown Askell","year":"2020"},{"key":"ref6","first-page":"1","article-title":"Exploring the limits of transfer learning with a unified text-to-text transformer","author":"Raffel","year":"2020","journal-title":"J. Mach. Learn. Res."},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/D19-1514"},{"key":"ref8","first-page":"13","article-title":"VilBERT: Pretraining task-agnostic visiolinguistic representations for vision-and-language tasks","volume-title":"Proc. Neural Inf. Process. Syst.","author":"Lu","year":"2019"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58577-8_7"},{"key":"ref10","first-page":"1","article-title":"Vl-BERT: Pre-training of generic visual-linguistic representations","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Su","year":"2019"},{"key":"ref11","article-title":"Univl: A. unified video and language pre-training model for multimodal understanding and generation","author":"Luo","year":"2020"},{"key":"ref12","first-page":"8821","article-title":"Zero-shot text-to-image generation","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Ramesh","year":"2021"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v35i10.17034"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.00725"},{"key":"ref15","article-title":"Learning video representations using contrastive bidirectional transformer","author":"Sun","year":"2019"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00756"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1145\/3474085.3475431"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00146"},{"key":"ref19","article-title":"DistilBERT, a distilled version of BERT: Smaller, faster, cheaper and lighter","author":"Sanh","year":"2019"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.findings-emnlp.372"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.acl-main.195"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.emnlp-main.633"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.acl-main.537"},{"key":"ref24","article-title":"Distilling the knowledge in a neural network","author":"Hinton","year":"2015"},{"key":"ref25","article-title":"Distilling transformers into simple neural networks with unlabeled transfer data","author":"Mukherjee","year":"2019"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/D19-1441"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1145\/3474085.3475474"},{"key":"ref28","article-title":"Reducing transformer depth on demand with structured dropout","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Fan","year":"2019"},{"key":"ref29","article-title":"Do deep nets really need to be deep","volume-title":"Proc. Neural Inf. Process. Syst.","author":"Lei","year":"2014"},{"key":"ref30","article-title":"Fitnets: Hints for thin deep nets","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Romero","year":"2015"},{"key":"ref31","article-title":"Paying more attention to attention: Improving the performance of convolutional neural networks via attention transfer","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Zagoruyko","year":"2016"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.754"},{"key":"ref33","article-title":"Knowledge distillation in generations: More tolerant teachers educate better students","volume-title":"Proc. IEEE Conf. Comput. Vis. Pattern Recognit.","author":"Yang","year":"2018"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2021.3073279"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2020.3037502"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2019.2951463"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v36i1.19945"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1145\/3474085.3475329"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1145\/3474085.3481034"},{"key":"ref40","first-page":"14014","article-title":"Are sixteen heads really better than one","volume-title":"Proc. Neural Inf. Process. Syst.","author":"Michel","year":"2019"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/P19-1580"},{"key":"ref42","first-page":"9782","article-title":"DynaBERT: Dynamic BERT with adaptive width and depth","volume-title":"Proc. Neural Inf. Process. Syst.","author":"Hou","year":"2020"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.1109\/iccv48922.2021.00650"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.1109\/CVPRW53098.2021.00301"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00381"},{"key":"ref46","article-title":"MSD: Multi-self-distillation learning via multi-classifiers within deep neural networks","author":"Luan","year":"2019"},{"key":"ref47","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2021\/155"},{"key":"ref48","article-title":"Pruning convolutional neural networks for resource efficient inference","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Molchanov","year":"2016"},{"key":"ref49","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00272"},{"key":"ref50","first-page":"5958","article-title":"Train large, then compress: Rethinking model size for efficient training and inference of transformers","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Li","year":"2020"},{"key":"ref51","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2021.3053766"},{"key":"ref52","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2021.3119868"},{"key":"ref53","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2021.3083109"},{"key":"ref54","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2021.3090595"},{"key":"ref55","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2019.2894964"},{"key":"ref56","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2019.2907453"},{"key":"ref57","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2019.2903628"},{"key":"ref58","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2019.2920620"},{"key":"ref59","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2020.2972830"},{"key":"ref60","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2019.2935678"},{"key":"ref61","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.571"},{"key":"ref62","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v32i1.12342"},{"key":"ref63","article-title":"Microsoft coco captions: Data collection and evaluation server","author":"Chen","year":"2015"},{"key":"ref64","doi-asserted-by":"publisher","DOI":"10.1007\/s11263-016-0981-7"}],"container-title":["IEEE Transactions on Multimedia"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/6046\/10016790\/10004507.pdf?arnumber=10004507","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,1,12]],"date-time":"2024-01-12T00:22:29Z","timestamp":1705018949000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10004507\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023]]},"references-count":64,"URL":"https:\/\/doi.org\/10.1109\/tmm.2022.3233258","relation":{},"ISSN":["1520-9210","1941-0077"],"issn-type":[{"value":"1520-9210","type":"print"},{"value":"1941-0077","type":"electronic"}],"subject":[],"published":{"date-parts":[[2023]]}}}