{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,28]],"date-time":"2026-03-28T13:11:11Z","timestamp":1774703471255,"version":"3.50.1"},"publisher-location":"Singapore","reference-count":26,"publisher":"Springer Nature Singapore","isbn-type":[{"value":"9789819983872","type":"print"},{"value":"9789819983889","type":"electronic"}],"license":[{"start":{"date-parts":[[2023,11,27]],"date-time":"2023-11-27T00:00:00Z","timestamp":1701043200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2023,11,27]],"date-time":"2023-11-27T00:00:00Z","timestamp":1701043200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2024]]},"DOI":"10.1007\/978-981-99-8388-9_3","type":"book-chapter","created":{"date-parts":[[2023,11,26]],"date-time":"2023-11-26T16:02:21Z","timestamp":1701014541000},"page":"28-41","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":7,"title":["No Token Left Behind: Efficient Vision Transformer via\u00a0Dynamic Token Idling"],"prefix":"10.1007","author":[{"given":"Xuwei","family":"Xu","sequence":"first","affiliation":[]},{"given":"Changlin","family":"Li","sequence":"additional","affiliation":[]},{"given":"Yudong","family":"Chen","sequence":"additional","affiliation":[]},{"given":"Xiaojun","family":"Chang","sequence":"additional","affiliation":[]},{"given":"Jiajun","family":"Liu","sequence":"additional","affiliation":[]},{"given":"Sen","family":"Wang","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2023,11,27]]},"reference":[{"key":"3_CR1","doi-asserted-by":"crossref","unstructured":"Chen, M., Peng, H., Fu, J., Ling, H.: AutoFormer: searching transformers for visual recognition. In: ICCV (2021)","DOI":"10.1109\/ICCV48922.2021.01205"},{"key":"3_CR2","doi-asserted-by":"crossref","unstructured":"Chen, Y., Dai, X., Chen, D., Liu, M., Dong, X., Yuan, L., Liu, Z.: Mobile-former: bridging MobileNet and transformer. In: CVPR (2022)","DOI":"10.1109\/CVPR52688.2022.00520"},{"key":"3_CR3","unstructured":"Chen, Z., Duan, Y., Wang, W., He, J., Lu, T., Dai, J., Qiao, Y.: Vision transformer adapter for dense predictions. In: ICLR (2023)"},{"key":"3_CR4","doi-asserted-by":"crossref","unstructured":"Chen, Z., Xie, L., Niu, J., Liu, X., Wei, L., Tian, Q.: Visformer: the vision-friendly transformer. In: ICCV (2021)","DOI":"10.1109\/ICCV48922.2021.00063"},{"key":"3_CR5","unstructured":"Dai, Z., Liu, H., Le, Q.V., Tan, M.: CoAtNet: marrying convolution and attention for all data sizes. In: NeurIPS (2021)"},{"key":"3_CR6","doi-asserted-by":"crossref","unstructured":"Deng, J., Dong, W., Socher, R., Li, L.J., Li, K., Fei-Fei, L.: ImageNet: a large-scale hierarchical image database. In: CVPR (2009)","DOI":"10.1109\/CVPR.2009.5206848"},{"key":"3_CR7","unstructured":"Dosovitskiy, A., et al.: An image is worth 16$$\\times $$16 words: transformers for image recognition at scale. In: ICLR (2021)"},{"key":"3_CR8","doi-asserted-by":"crossref","unstructured":"Fang, Y., et al.: EVA: exploring the limits of masked visual representation learning at scale. In: CVPR (2023)","DOI":"10.1109\/CVPR52729.2023.01855"},{"key":"3_CR9","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"396","DOI":"10.1007\/978-3-031-20083-0_24","volume-title":"Computer Vision - ECCV 2022","author":"M Fayyaz","year":"2022","unstructured":"Fayyaz, M., et al.: Adaptive token sampling for efficient vision transformers. In: Avidan, S., Brostow, G., Ciss\u00e9, M., Farinella, G.M., Hassner, T. (eds.) ECCV 2022. LNCS, vol. 13671, pp. 396\u2013414. Springer, Cham (2022). https:\/\/doi.org\/10.1007\/978-3-031-20083-0_24"},{"key":"3_CR10","unstructured":"Jiang, Z.H., et al.: All tokens matter: token labeling for training better vision transformers. In: NeurIPS (2021)"},{"key":"3_CR11","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"620","DOI":"10.1007\/978-3-031-20083-0_37","volume-title":"Computer Vision - ECCV 2022","author":"Z Kong","year":"2022","unstructured":"Kong, Z., et al.: SPViT: enabling faster vision transformers via latency-aware soft token pruning. In: Avidan, S., Brostow, G., Ciss\u00e9, M., Farinella, G.M., Hassner, T. (eds.) ECCV 2022. LNCS, vol. 13671, pp. 620\u2013640. Springer, Cham (2022). https:\/\/doi.org\/10.1007\/978-3-031-20083-0_37"},{"key":"3_CR12","unstructured":"Liang, Y., Chongjian, G., Tong, Z., Song, Y., Wang, J., Xie, P.: EViT: expediting vision transformers via token reorganizations. In: ICLR (2021)"},{"key":"3_CR13","doi-asserted-by":"crossref","unstructured":"Liu, Z., et al.: Swin transformer V2: scaling up capacity and resolution. In: CVPR (2022)","DOI":"10.1109\/CVPR52688.2022.01170"},{"key":"3_CR14","doi-asserted-by":"crossref","unstructured":"Liu, Z., et al.: Swin transformer: hierarchical vision transformer using shifted windows. In: ICCV (2021)","DOI":"10.1109\/ICCV48922.2021.00986"},{"key":"3_CR15","unstructured":"Mehta, S., Rastegari, M.: MobileViT: light-weight, general-purpose, and mobile-friendly vision transformer. In: ICLR (2022)"},{"key":"3_CR16","doi-asserted-by":"crossref","unstructured":"Meng, L., et al.: AdaViT: adaptive vision transformers for efficient image recognition. In: CVPR (2022)","DOI":"10.1109\/CVPR52688.2022.01199"},{"key":"3_CR17","unstructured":"Rao, Y., Zhao, W., Liu, B., Lu, J., Zhou, J., Hsieh, C.J.: DynamicViT: efficient vision transformers with dynamic token sparsification. In: NeurIPS (2021)"},{"key":"3_CR18","doi-asserted-by":"publisher","first-page":"888","DOI":"10.1109\/34.868688","volume":"22","author":"J Shi","year":"2000","unstructured":"Shi, J., Malik, J.: Normalized cuts and image segmentation. TPAMI 22, 888\u2013905 (2000)","journal-title":"TPAMI"},{"key":"3_CR19","unstructured":"Touvron, H., Cord, M., Douze, M., Massa, F., Sablayrolles, A., J\u00e9gou, H.: Training data-efficient image transformers & distillation through attention. In: ICML (2021)"},{"key":"3_CR20","unstructured":"Vaswani, A., et al.: Attention is all you need. In: NeurIPS (2017)"},{"key":"3_CR21","unstructured":"Wang, P., Zheng, W., Chen, T., Wang, Z.: Anti-oversmoothing in deep vision transformers via the Fourier domain analysis: from theory to practice. In: ICLR (2022)"},{"key":"3_CR22","unstructured":"Wang, Y., Huang, R., Song, S., Huang, Z., Huang, G.: Not all images are worth 16$$\\times $$16 words: dynamic vision transformers with adaptive sequence length. In: NeurIPS (2021)"},{"key":"3_CR23","doi-asserted-by":"crossref","unstructured":"Wu, H., et al.: CvT: introducing convolutions to vision transformers. In: ICCV (2021)","DOI":"10.1109\/ICCV48922.2021.00009"},{"key":"3_CR24","doi-asserted-by":"crossref","unstructured":"Xu, Y., et al.: Evo-ViT: slow-fast token evolution for dynamic vision transformer. In: AAAI (2022)","DOI":"10.1609\/aaai.v36i3.20202"},{"key":"3_CR25","doi-asserted-by":"crossref","unstructured":"Yuan, L., et al.: Tokens-to-token ViT: training vision transformers from scratch on ImageNet. In: ICCV (2021)","DOI":"10.1109\/ICCV48922.2021.00060"},{"key":"3_CR26","doi-asserted-by":"crossref","unstructured":"Zhai, X., Kolesnikov, A., Houlsby, N., Beyer, L.: Scaling vision transformers. In: CVPR (2022)","DOI":"10.1109\/CVPR52688.2022.01179"}],"container-title":["Lecture Notes in Computer Science","AI 2023: Advances in Artificial Intelligence"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-981-99-8388-9_3","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,3,13]],"date-time":"2024-03-13T18:49:00Z","timestamp":1710355740000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-981-99-8388-9_3"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,11,27]]},"ISBN":["9789819983872","9789819983889"],"references-count":26,"URL":"https:\/\/doi.org\/10.1007\/978-981-99-8388-9_3","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2023,11,27]]},"assertion":[{"value":"27 November 2023","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"AI","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Australasian Joint Conference on Artificial Intelligence","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Brisbane, QLD","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Australia","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2023","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"28 November 2023","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"1 December 2023","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"ausai2023","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/ajcai2023.org\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Double-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Easychair","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"213","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"23","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"59","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"11% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"No","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}