{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,30]],"date-time":"2026-04-30T16:45:58Z","timestamp":1777567558750,"version":"3.51.4"},"reference-count":32,"publisher":"IEEE","license":[{"start":{"date-parts":[[2022,7,18]],"date-time":"2022-07-18T00:00:00Z","timestamp":1658102400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2022,7,18]],"date-time":"2022-07-18T00:00:00Z","timestamp":1658102400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2022,7,18]]},"DOI":"10.1109\/icme52920.2022.9860016","type":"proceedings-article","created":{"date-parts":[[2022,8,26]],"date-time":"2022-08-26T19:45:18Z","timestamp":1661543118000},"page":"1-6","source":"Crossref","is-referenced-by-count":35,"title":["MLTR: Multi-Label Classification with Transformer"],"prefix":"10.1109","author":[{"given":"Xing","family":"Cheng","sequence":"first","affiliation":[{"name":"MMU KuaiShou Inc.,Beijing,China"}]},{"given":"Hezheng","family":"Lin","sequence":"additional","affiliation":[{"name":"MMU KuaiShou Inc.,Beijing,China"}]},{"given":"Xiangyu","family":"Wu","sequence":"additional","affiliation":[{"name":"MMU KuaiShou Inc.,Beijing,China"}]},{"given":"Dong","family":"Shen","sequence":"additional","affiliation":[{"name":"MMU KuaiShou Inc.,Beijing,China"}]},{"given":"Fan","family":"Yang","sequence":"additional","affiliation":[{"name":"MMU KuaiShou Inc.,Beijing,China"}]},{"given":"Honglin","family":"Liu","sequence":"additional","affiliation":[{"name":"MMU KuaiShou Inc.,Beijing,China"}]},{"given":"Nian","family":"Shi","sequence":"additional","affiliation":[{"name":"MMU KuaiShou Inc.,Beijing,China"}]}],"member":"263","reference":[{"key":"ref32","article-title":"Visualizing data using t-sne","volume":"9","author":"van der maaten","year":"2008","journal-title":"J Ma-chine Learning Research"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.219"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v32i1.12281"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2018.2812605"},{"key":"ref11","first-page":"649","article-title":"Attention-driven dynamic graph con-volutional network for multi-label image recognition","author":"ye","year":"0","journal-title":"European Conference on Computer Vision"},{"key":"ref12","author":"han","year":"2021","journal-title":"Transformer in trans-former"},{"key":"ref13","author":"wu","year":"2021","journal-title":"CvT introducing convolutions to vision transformers"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v34i07.6964"},{"key":"ref15","author":"yuan","year":"2021","journal-title":"Tokens-to-Token ViT Training Vision Transformers from Scratch on ImageNet [J]"},{"key":"ref16","author":"zhu","year":"2020","journal-title":"Deformable detr Deformable transformers for end-to-end object detection"},{"key":"ref17","first-page":"213","article-title":"End-to-end object detection with transformers","author":"carion","year":"0","journal-title":"European Conference on Computer Vision"},{"key":"ref18","author":"bertasius","year":"2021","journal-title":"Is space-time attention all you need for video understanding?"},{"key":"ref19","first-page":"111","article-title":"A theoretical analysis of feature pooling in visual recognition","author":"boureau","year":"0","journal-title":"Proceedings of the 27th International Conference on Machine Learning (ICML-10)"},{"key":"ref28","first-page":"10347","article-title":"Training data-efficient image transformers &#38; distillation through attention","volume":"139","author":"touvron","year":"0","journal-title":"Ternational Conference on Machine Learning"},{"key":"ref4","author":"dosovitskiy","year":"2020","journal-title":"An image is worth 16x16 words Transformers for image recognition at scale"},{"key":"ref27","author":"devries","year":"2017","journal-title":"Improved regularization of convolutional neural networks with cutout"},{"key":"ref3","first-page":"5998","article-title":"Attention is all you need","author":"vaswani","year":"0","journal-title":"Advances in neural information processing systems"},{"key":"ref6","author":"liu","year":"2021","journal-title":"Swin transformer Hierarchical vision transformer using shifted windows"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2020.3025814"},{"key":"ref5","author":"touvron","year":"2020","journal-title":"Training data-efficient image transformers & distillation through attention"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00532"},{"key":"ref7","author":"heo","year":"2021","journal-title":"Rethinking spatial dimensions of vision transformers"},{"key":"ref2","first-page":"770","article-title":"Deep residual learning for image recog-nition","author":"he","year":"0","journal-title":"Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00061"},{"key":"ref1","first-page":"740","article-title":"Microsoft coco: Common objects in context","author":"lin","year":"0","journal-title":"European Conference on Computer Vision"},{"key":"ref20","author":"ben-baruch","year":"2020","journal-title":"Asymmetric loss for multi-label classification"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1007\/s11263-009-0275-4"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2009.5206848"},{"key":"ref24","author":"loshchilov","year":"2017","journal-title":"Decoupled weight decay regularization"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1145\/1646396.1646452"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/WACV48630.2021.00144"},{"key":"ref25","author":"smith","year":"2018","journal-title":"A disciplined approach to neural network hyper-parameters Part I-learning rate batch size momentum and weight decay"}],"event":{"name":"2022 IEEE International Conference on Multimedia and Expo (ICME)","location":"Taipei, Taiwan","start":{"date-parts":[[2022,7,18]]},"end":{"date-parts":[[2022,7,22]]}},"container-title":["2022 IEEE International Conference on Multimedia and Expo (ICME)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9859562\/9858923\/09860016.pdf?arnumber=9860016","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,9,19]],"date-time":"2022-09-19T20:25:05Z","timestamp":1663619105000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9860016\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022,7,18]]},"references-count":32,"URL":"https:\/\/doi.org\/10.1109\/icme52920.2022.9860016","relation":{},"subject":[],"published":{"date-parts":[[2022,7,18]]}}}