{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,10,24]],"date-time":"2025-10-24T08:29:43Z","timestamp":1761294583332,"version":"3.40.3"},"publisher-location":"Cham","reference-count":56,"publisher":"Springer Nature Switzerland","isbn-type":[{"type":"print","value":"9783031263507"},{"type":"electronic","value":"9783031263514"}],"license":[{"start":{"date-parts":[[2023,1,1]],"date-time":"2023-01-01T00:00:00Z","timestamp":1672531200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2023,1,1]],"date-time":"2023-01-01T00:00:00Z","timestamp":1672531200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2023]]},"DOI":"10.1007\/978-3-031-26351-4_28","type":"book-chapter","created":{"date-parts":[[2023,2,25]],"date-time":"2023-02-25T09:03:18Z","timestamp":1677315798000},"page":"459-475","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":5,"title":["RaftMLP: How Much Can Be Done Without Attention and\u00a0with Less Spatial Locality?"],"prefix":"10.1007","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-7889-8143","authenticated-orcid":false,"given":"Yuki","family":"Tatsunami","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-5375-7862","authenticated-orcid":false,"given":"Masato","family":"Taki","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2023,2,26]]},"reference":[{"key":"28_CR1","doi-asserted-by":"crossref","unstructured":"Arnab, A., Dehghani, M., Heigold, G., Sun, C., Lu\u010di\u0107, M., Schmid, C.: ViViT: a video vision transformer. In: ICCV (2021)","DOI":"10.1109\/ICCV48922.2021.00676"},{"key":"28_CR2","unstructured":"Ba, J.L., Kiros, J.R., Hinton, G.E.: Layer normalization. In: NeurIPS (2016)"},{"key":"28_CR3","doi-asserted-by":"crossref","unstructured":"Bello, I., Zoph, B., Vaswani, A., Shlens, J., Le, Q.V.: Attention augmented convolutional networks. In: ICCV, pp. 3286\u20133295 (2019)","DOI":"10.1109\/ICCV.2019.00338"},{"key":"28_CR4","doi-asserted-by":"crossref","unstructured":"Chen, C.F., Fan, Q., Panda, R.: CrossViT: cross-attention multi-scale vision transformer for image classification. In: ICCV (2021)","DOI":"10.1109\/ICCV48922.2021.00041"},{"key":"28_CR5","unstructured":"Chen, S., Xie, E., Ge, C., Liang, D., Luo, P.: CycleMLP: a MLP-like architecture for dense prediction. arXiv preprint arXiv:2107.10224 (2021)"},{"key":"28_CR6","unstructured":"Cordonnier, J.B., Loukas, A., Jaggi, M.: On the relationship between self-attention and convolutional layers. In: ICLR (2019)"},{"key":"28_CR7","doi-asserted-by":"crossref","unstructured":"Cubuk, E.D., Zoph, B., Shlens, J., Le, Q.V.: RandAugment: practical automated data augmentation with a reduced search space. In: CVPR Workshops, pp. 702\u2013703 (2020)","DOI":"10.1109\/CVPRW50498.2020.00359"},{"key":"28_CR8","doi-asserted-by":"crossref","unstructured":"Deng, J., Dong, W., Socher, R., Li, L.J., Li, K., Fei-Fei, L.: Imagenet: a large-scale hierarchical image database. In: CVPR, pp. 248\u2013255 (2009)","DOI":"10.1109\/CVPR.2009.5206848"},{"key":"28_CR9","unstructured":"DeVries, T., Taylor, G.W.: Improved regularization of convolutional neural networks with cutout. arXiv preprint arXiv:1708.04552 (2017)"},{"key":"28_CR10","unstructured":"Ding, M., et al.: Cogview: mastering text-to-image generation via transformers. In: NeurIPS (2021)"},{"key":"28_CR11","unstructured":"Dosovitskiy, A., et al.: An image is worth 16x16 words: transformers for image recognition at scale. In: ICLR (2021)"},{"key":"28_CR12","unstructured":"El-Nouby, A., et al.: XCIT: cross-covariance image transformers. arXiv preprint arXiv:2106.09681 (2021)"},{"key":"28_CR13","unstructured":"Han, K., Xiao, A., Wu, E., Guo, J., Xu, C., Wang, Y.: Transformer in transformer. In: NeurIPS (2021)"},{"key":"28_CR14","doi-asserted-by":"crossref","unstructured":"He, K., Gkioxari, G., Doll\u00e1r, P., Girshick, R.: Mask R-CNN. In: ICCV, pp. 2961\u20132969 (2017)","DOI":"10.1109\/ICCV.2017.322"},{"key":"28_CR15","doi-asserted-by":"crossref","unstructured":"He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In: CVPR, pp. 770\u2013778 (2016)","DOI":"10.1109\/CVPR.2016.90"},{"key":"28_CR16","unstructured":"Hendrycks, D., Gimpel, K.: Gaussian error linear units (GELUs). arXiv preprint arXiv:1606.08415 (2016)"},{"key":"28_CR17","unstructured":"Hou, Q., Jiang, Z., Yuan, L., Cheng, M.M., Yan, S., Feng, J.: Vision permutator: a permutable MLP-like architecture for visual recognition. arXiv preprint arXiv:2106.12368 (2021)"},{"key":"28_CR18","doi-asserted-by":"crossref","unstructured":"Howard, A., et al.: Searching for MobileNetV3. In: ICCV, pp. 1314\u20131324 (2019)","DOI":"10.1109\/ICCV.2019.00140"},{"key":"28_CR19","unstructured":"Hu, J., Shen, L., Albanie, S., Sun, G., Vedaldi, A.: Gather-excite: exploiting feature context in convolutional neural networks. In: NeurIPS (2018)"},{"key":"28_CR20","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"646","DOI":"10.1007\/978-3-319-46493-0_39","volume-title":"Computer Vision \u2013 ECCV 2016","author":"G Huang","year":"2016","unstructured":"Huang, G., Sun, Yu., Liu, Z., Sedra, D., Weinberger, K.Q.: Deep networks with stochastic depth. In: Leibe, B., Matas, J., Sebe, N., Welling, M. (eds.) ECCV 2016. LNCS, vol. 9908, pp. 646\u2013661. Springer, Cham (2016). https:\/\/doi.org\/10.1007\/978-3-319-46493-0_39"},{"key":"28_CR21","doi-asserted-by":"crossref","unstructured":"Kirillov, A., Girshick, R., He, K., Doll\u00e1r, P.: Panoptic feature pyramid networks. In: CVPR, pp. 6399\u20136408 (2019)","DOI":"10.1109\/CVPR.2019.00656"},{"key":"28_CR22","doi-asserted-by":"crossref","unstructured":"Krause, J., Stark, M., Deng, J., Fei-Fei, L.: 3D object representations for fine-grained categorization. In: ICCV Workshops, pp. 554\u2013561 (2013)","DOI":"10.1109\/ICCVW.2013.77"},{"key":"28_CR23","unstructured":"Krizhevsky, A., Hinton, G., et al.: Learning multiple layers of features from tiny images. Technical report, University of Toronto (2009)"},{"key":"28_CR24","unstructured":"Krizhevsky, A., Sutskever, I., Hinton, G.E.: Imagenet classification with deep convolutional neural networks. In: NeurIPS, vol. 25, pp. 1097\u20131105 (2012)"},{"key":"28_CR25","unstructured":"Li, J., Hassani, A., Walton, S., Shi, H.: ConvMLP: hierarchical convolutional MLPs for vision. arXiv preprint arXiv:2109.04454 (2021)"},{"key":"28_CR26","unstructured":"Lian, D., Yu, Z., Sun, X., Gao, S.: AS-MLP: an axial shifted MLP architecture for vision. arXiv preprint arXiv:2107.08391 (2021)"},{"key":"28_CR27","doi-asserted-by":"crossref","unstructured":"Lin, T.Y., Goyal, P., Girshick, R., He, K., Doll\u00e1r, P.: Focal loss for dense object detection. In: ICCV, pp. 2980\u20132988 (2017)","DOI":"10.1109\/ICCV.2017.324"},{"key":"28_CR28","unstructured":"Liu, H., Dai, Z., So, D.R., Le, Q.V.: Pay attention to MLPs. arXiv preprint arXiv:2105.08050 (2021)"},{"key":"28_CR29","doi-asserted-by":"crossref","unstructured":"Liu, Z., et al.: Swin transformer: hierarchical vision transformer using shifted windows. In: ICCV (2021)","DOI":"10.1109\/ICCV48922.2021.00986"},{"key":"28_CR30","unstructured":"Loshchilov, I., Hutter, F.: Decoupled weight decay regularization. In: ICLR (2019)"},{"key":"28_CR31","unstructured":"Melas-Kyriazi, L.: Do you even need attention? A stack of feed-forward layers does surprisingly well on imagenet. arXiv preprint arXiv:2105.02723 (2021)"},{"key":"28_CR32","doi-asserted-by":"crossref","unstructured":"Nilsback, M.E., Zisserman, A.: Automated flower classification over a large number of classes. In: ICVGIP, pp. 722\u2013729 (2008)","DOI":"10.1109\/ICVGIP.2008.47"},{"key":"28_CR33","unstructured":"Ramachandran, P., Parmar, N., Vaswani, A., Bello, I., Levskaya, A., Shlens, J.: Stand-alone self-attention in vision models. In: NeurIPS (2019)"},{"key":"28_CR34","unstructured":"Simonyan, K., Zisserman, A.: Very deep convolutional networks for large-scale image recognition. In: ICLR (2015)"},{"key":"28_CR35","doi-asserted-by":"crossref","unstructured":"Szegedy, C., et al.: Going deeper with convolutions. In: CVPR, pp. 1\u20139 (2015)","DOI":"10.1109\/CVPR.2015.7298594"},{"key":"28_CR36","doi-asserted-by":"crossref","unstructured":"Szegedy, C., Vanhoucke, V., Ioffe, S., Shlens, J., Wojna, Z.: Rethinking the inception architecture for computer vision. In: CVPR, pp. 2818\u20132826 (2016)","DOI":"10.1109\/CVPR.2016.308"},{"key":"28_CR37","unstructured":"Tan, M., Le, Q.: EfficientNet: rethinking model scaling for convolutional neural networks. In: ICML, pp. 6105\u20136114 (2019)"},{"key":"28_CR38","unstructured":"Tan, M., Le, Q.V.: EfficientNetV2: smaller models and faster training. In: ICML (2021)"},{"key":"28_CR39","doi-asserted-by":"crossref","unstructured":"Tang, C., Zhao, Y., Wang, G., Luo, C., Xie, W., Zeng, W.: Sparse MLP for image recognition: is self-attention really necessary? arXiv preprint arXiv:2109.05422 (2021)","DOI":"10.1609\/aaai.v36i2.20133"},{"key":"28_CR40","unstructured":"Tolstikhin, I., et al.: MLP-mixer: an all-MLP architecture for vision. arXiv preprint arXiv:2105.01601 (2021)"},{"key":"28_CR41","doi-asserted-by":"crossref","unstructured":"Touvron, H., et al.: ResMLP: feedforward networks for image classification with data-efficient training. arXiv preprint arXiv:2105.03404 (2021)","DOI":"10.1109\/TPAMI.2022.3206148"},{"key":"28_CR42","unstructured":"Touvron, H., Cord, M., Douze, M., Massa, F., Sablayrolles, A., J\u00e9gou, H.: Training data-efficient image transformers & distillation through attention. In: ICML (2021)"},{"key":"28_CR43","doi-asserted-by":"crossref","unstructured":"Touvron, H., Cord, M., Sablayrolles, A., Synnaeve, G., J\u00e9gou, H.: Going deeper with image transformers. In: ICCV, pp. 32\u201342 (2021)","DOI":"10.1109\/ICCV48922.2021.00010"},{"key":"28_CR44","doi-asserted-by":"crossref","unstructured":"Van Horn, G., et al.: The iNaturalist species classification and detection dataset. In: CVPR, pp. 8769\u20138778 (2018)","DOI":"10.1109\/CVPR.2018.00914"},{"key":"28_CR45","unstructured":"Vaswani, A., et al.: Attention is all you need. In: NeurIPS (2017)"},{"key":"28_CR46","doi-asserted-by":"crossref","unstructured":"Wang, W., et al.: Pyramid vision transformer: a versatile backbone for dense prediction without convolutions. In: ICCV (2021)","DOI":"10.1109\/ICCV48922.2021.00061"},{"key":"28_CR47","unstructured":"Wang, W., Yao, L., Chen, L., Cai, D., He, X., Liu, W.: CrossFormer: a versatile vision transformer based on cross-scale attention. arXiv preprint arXiv:2108.00154 (2021)"},{"key":"28_CR48","doi-asserted-by":"crossref","unstructured":"Wang, X., Girshick, R., Gupta, A., He, K.: Non-local neural networks. In: CVPR, pp. 7794\u20137803 (2018)","DOI":"10.1109\/CVPR.2018.00813"},{"key":"28_CR49","unstructured":"Yu, T., Li, X., Cai, Y., Sun, M., Li, P.: Rethinking token-mixing MLP for MLP-based vision backbone. arXiv preprint arXiv:2106.14882 (2021)"},{"key":"28_CR50","doi-asserted-by":"crossref","unstructured":"Yu, T., Li, X., Cai, Y., Sun, M., Li, P.: S$$^2$$-MLP: spatial-shift MLP architecture for vision. arXiv preprint arXiv:2106.07477 (2021)","DOI":"10.1109\/WACV51458.2022.00367"},{"key":"28_CR51","doi-asserted-by":"crossref","unstructured":"Yu, T., Li, X., Cai, Y., Sun, M., Li, P.: S$${}^2$$-mlpv2: improved spatial-shift MLP architecture for vision. arXiv preprint arXiv:2108.01072 (2021)","DOI":"10.1109\/WACV51458.2022.00367"},{"key":"28_CR52","doi-asserted-by":"crossref","unstructured":"Yuan, L., et al.: Tokens-to-Token ViT: training vision transformers from scratch on ImageNet. In: ICCV, pp. 558\u2013567 (2021)","DOI":"10.1109\/ICCV48922.2021.00060"},{"key":"28_CR53","doi-asserted-by":"crossref","unstructured":"Yun, S., Han, D., Oh, S.J., Chun, S., Choe, J., Yoo, Y.: Cutmix: regularization strategy to train strong classifiers with localizable features. In: ICCV, pp. 6023\u20136032 (2019)","DOI":"10.1109\/ICCV.2019.00612"},{"key":"28_CR54","unstructured":"Zhang, H., Cisse, M., Dauphin, Y.N., Lopez-Paz, D.: Mixup: beyond empirical risk minimization. In: ICLR (2018)"},{"key":"28_CR55","unstructured":"Zhang, Z., Zhang, H., Zhao, L., Chen, T., Pfister, T.: Aggregating nested transformers. arXiv preprint arXiv:2105.12723 (2021)"},{"key":"28_CR56","unstructured":"Zhou, D., et al.: DeepViT: towards deeper vision transformer. arXiv preprint arXiv:2103.11886 (2021)"}],"container-title":["Lecture Notes in Computer Science","Computer Vision \u2013 ACCV 2022"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-26351-4_28","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,2,25]],"date-time":"2023-02-25T09:14:41Z","timestamp":1677316481000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-26351-4_28"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023]]},"ISBN":["9783031263507","9783031263514"],"references-count":56,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-26351-4_28","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"type":"print","value":"0302-9743"},{"type":"electronic","value":"1611-3349"}],"subject":[],"published":{"date-parts":[[2023]]},"assertion":[{"value":"26 February 2023","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ACCV","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Asian Conference on Computer Vision","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Macao","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"China","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2022","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"4 December 2022","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"8 December 2022","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"16","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"accv2022","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/www.accv2022.org","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Double-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"CMT Microsoft","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"836","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"277","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"0","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"33% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3.3","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"2.6","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Yes","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"For the ACCV 2022 workshops 25 papers have been accepted from 40 submissions","order":10,"name":"additional_info_on_review_process","label":"Additional Info on Review Process","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}