{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,5,8]],"date-time":"2026-05-08T09:58:46Z","timestamp":1778234326382,"version":"3.51.4"},"reference-count":39,"publisher":"IEEE","license":[{"start":{"date-parts":[[2022,7,18]],"date-time":"2022-07-18T00:00:00Z","timestamp":1658102400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2022,7,18]],"date-time":"2022-07-18T00:00:00Z","timestamp":1658102400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2022,7,18]]},"DOI":"10.1109\/icme52920.2022.9859720","type":"proceedings-article","created":{"date-parts":[[2022,8,26]],"date-time":"2022-08-26T15:45:18Z","timestamp":1661528718000},"page":"1-6","source":"Crossref","is-referenced-by-count":213,"title":["CAT: Cross Attention in Vision Transformer"],"prefix":"10.1109","author":[{"given":"Hezheng","family":"Lin","sequence":"first","affiliation":[{"name":"Beijing University of Posts and Telecommunications"}]},{"given":"Xing","family":"Cheng","sequence":"additional","affiliation":[{"name":"University of Chinese Academy of Sciences"}]},{"given":"Xiangyu","family":"Wu","sequence":"additional","affiliation":[{"name":"KuaiShou Inc."}]},{"given":"Dong","family":"Shen","sequence":"additional","affiliation":[{"name":"KuaiShou Inc."}]}],"member":"263","reference":[{"key":"ref39","first-page":"1929","article-title":"Dropout: a simple way to prevent neural networks from overfitting","volume":"15","author":"srivastava","year":"2014","journal-title":"The Journal of Machine Learning Research"},{"key":"ref38","author":"contributors","year":"2020","journal-title":"MMSegmentation Openmmlab semantic segmentation toolbox and benchmark"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00326"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.29007\/3b2l"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00644"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.324"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00656"},{"key":"ref36","first-page":"418","article-title":"Unified perceptual parsing for scene understanding","author":"xiao","year":"0","journal-title":"Proceedings of the European Conference on Computer Vision (ECCV)"},{"key":"ref35","first-page":"801","article-title":"Encoder-decoder with atrous separable convolution for semantic image segmentation","author":"chen","year":"0","journal-title":"Proceedings of the European Conference on Computer Vision (ECCV)"},{"key":"ref34","article-title":"Object-contextual representations for semantic segmentation","author":"yuan","year":"2019","journal-title":"ArXiv Preprint"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00009"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00062"},{"key":"ref12","article-title":"Transformer in transformer","author":"han","year":"2021","journal-title":"ArXiv Preprint"},{"key":"ref13","article-title":"Layer normalization","author":"ba","year":"2016","journal-title":"ArXiv Preprint"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.90"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.660"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00584"},{"key":"ref17","article-title":"Multi-scale context aggregation by dilated convolutions","author":"yu","year":"2015","journal-title":"ArXiv Preprint"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.195"},{"key":"ref19","article-title":"Mobilenets: Efficient convolutional neural networks for mobile vision applications","author":"howard","year":"2017","journal-title":"ArXiv Preprint"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.01044"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00041"},{"key":"ref27","first-page":"6105","article-title":"Efficientnet: Rethinking model scaling for convolutional neural networks","author":"tan","year":"0","journal-title":"International Conference on Machine Learning"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00061"},{"key":"ref6","first-page":"1691","article-title":"Generative pretraining from pixels","author":"chen","year":"0","journal-title":"International Conference on Machine Learning"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.322"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00986"},{"key":"ref8","article-title":"Training data-efficient image transformers & distillation through attention","author":"touvron","year":"2020","journal-title":"ArXiv Preprint"},{"key":"ref7","article-title":"An image is worth 16x16 words: Transformers for image recognition at scale","author":"dosovitskiy","year":"2020","journal-title":"ArXiv Preprint"},{"key":"ref2","article-title":"Attention is all you need","author":"vaswani","year":"2017","journal-title":"ArXiv Preprint"},{"key":"ref9","article-title":"Bert: Pre-training of deep bidirectional transformers for language understanding","author":"devlin","year":"2018","journal-title":"ArXiv Preprint"},{"key":"ref1","first-page":"1995","article-title":"Convolutional networks for images, speech, and time series","volume":"3361","author":"lecun","year":"1995","journal-title":"The Handbook of Brain Theory and Neural Networks"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00378"},{"key":"ref22","article-title":"Yolov5","year":"2021","journal-title":"Ultralytics"},{"key":"ref21","first-page":"642","article-title":"Unilmv2: Pseudo-masked language models for unified language model pre-training","author":"bao","year":"0","journal-title":"International Conference on Machine Learning"},{"key":"ref24","first-page":"740","article-title":"Microsoft coco: Common objects in context","author":"lin","year":"0","journal-title":"European Conference on Computer Vision"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2009.5206848"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.634"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.544"}],"event":{"name":"2022 IEEE International Conference on Multimedia and Expo (ICME)","location":"Taipei, Taiwan","start":{"date-parts":[[2022,7,18]]},"end":{"date-parts":[[2022,7,22]]}},"container-title":["2022 IEEE International Conference on Multimedia and Expo (ICME)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9859562\/9858923\/09859720.pdf?arnumber=9859720","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,9,19]],"date-time":"2022-09-19T16:24:36Z","timestamp":1663604676000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9859720\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022,7,18]]},"references-count":39,"URL":"https:\/\/doi.org\/10.1109\/icme52920.2022.9859720","relation":{},"subject":[],"published":{"date-parts":[[2022,7,18]]}}}