{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,31]],"date-time":"2026-03-31T09:27:13Z","timestamp":1774949233123,"version":"3.50.1"},"reference-count":62,"publisher":"Springer Science and Business Media LLC","issue":"2","license":[{"start":{"date-parts":[[2024,4,1]],"date-time":"2024-04-01T00:00:00Z","timestamp":1711929600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,4,1]],"date-time":"2024-04-01T00:00:00Z","timestamp":1711929600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Multimedia Systems"],"published-print":{"date-parts":[[2024,4]]},"DOI":"10.1007\/s00530-024-01312-0","type":"journal-article","created":{"date-parts":[[2024,4,1]],"date-time":"2024-04-01T17:01:30Z","timestamp":1711990890000},"update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":25,"title":["Vision transformer models for mobile\/edge devices: a survey"],"prefix":"10.1007","volume":"30","author":[{"given":"Seung Il","family":"Lee","sequence":"first","affiliation":[]},{"given":"Kwanghyun","family":"Koo","sequence":"additional","affiliation":[]},{"given":"Jong Ho","family":"Lee","sequence":"additional","affiliation":[]},{"given":"Gilha","family":"Lee","sequence":"additional","affiliation":[]},{"given":"Sangbeom","family":"Jeong","sequence":"additional","affiliation":[]},{"given":"Seongjun","family":"O","sequence":"additional","affiliation":[]},{"given":"Hyun","family":"Kim","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,4,1]]},"reference":[{"key":"1312_CR1","doi-asserted-by":"crossref","unstructured":"Choi, J., Chun, D., Kim, H., Lee, H.-J.: Gaussian yolov3: an accurate and fast object detector using localization uncertainty for autonomous driving. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 502\u2013511 (2019)","DOI":"10.1109\/ICCV.2019.00059"},{"key":"1312_CR2","unstructured":"Howard, A.G., Zhu, M., Chen, B., Kalenichenko, D., Wang, W., Weyand, T., Andreetto, M., Adam, H.: MobileNets: efficient convolutional neural networks for mobile vision applications (2017). arXiv preprint. arXiv:1704.04861"},{"key":"1312_CR3","doi-asserted-by":"crossref","unstructured":"Sandler, M., Howard, A., Zhu, M., Zhmoginov, A., Chen, L.-C.: MobileNetV2: inverted residuals and linear bottlenecks. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 4510\u20134520 (2018)","DOI":"10.1109\/CVPR.2018.00474"},{"key":"1312_CR4","unstructured":"Tan, M., Le, Q.: EfficientNet: rethinking model scaling for convolutional neural networks. In: International Conference on Machine Learning, pp. 6105\u20136114. PMLR (2019)"},{"key":"1312_CR5","doi-asserted-by":"crossref","unstructured":"Lee, S.I., Kim, H.: GaussianMask: uncertainty-aware instance segmentation based on gaussian modeling. In: 2022 26th International Conference on Pattern Recognition (ICPR), pp. 3851\u20133857. IEEE (2022)","DOI":"10.1109\/ICPR56361.2022.9956515"},{"key":"1312_CR6","doi-asserted-by":"publisher","first-page":"157","DOI":"10.1016\/j.aeue.2019.05.023","volume":"107","author":"DK Vishwakarma","year":"2019","unstructured":"Vishwakarma, D.K., Singh, T.: A visual cognizance based multi-resolution descriptor for human action recognition using key pose. AEU Int. J. Electron. Commun. 107, 157\u2013169 (2019)","journal-title":"AEU Int. J. Electron. Commun."},{"key":"1312_CR7","doi-asserted-by":"publisher","first-page":"1107","DOI":"10.1007\/s10462-018-9651-1","volume":"52","author":"T Singh","year":"2019","unstructured":"Singh, T., Vishwakarma, D.K.: Video benchmarks of human action datasets: a review. Artif. Intell. Rev. 52, 1107\u20131154 (2019)","journal-title":"Artif. Intell. Rev."},{"key":"1312_CR8","doi-asserted-by":"publisher","first-page":"469","DOI":"10.1007\/s00521-020-05018-y","volume":"33","author":"T Singh","year":"2021","unstructured":"Singh, T., Vishwakarma, D.K.: A deeply coupled ConvNet for human activity recognition using dynamic and RGB images. Neural Comput. Appl. 33, 469\u2013485 (2021)","journal-title":"Neural Comput. Appl."},{"key":"1312_CR9","doi-asserted-by":"publisher","first-page":"3835","DOI":"10.1109\/TIP.2020.2965299","volume":"29","author":"C Dhiman","year":"2020","unstructured":"Dhiman, C., Vishwakarma, D.K.: View-invariant deep architecture for human action recognition using two-stream motion and shape temporal dynamics. IEEE Trans. Image Process. 29, 3835\u20133844 (2020)","journal-title":"IEEE Trans. Image Process."},{"key":"1312_CR10","doi-asserted-by":"crossref","first-page":"52812","DOI":"10.1109\/ACCESS.2023.3294993","volume":"11","author":"D Chun","year":"2023","unstructured":"Chun, D., Choi, J., Lee, H.-J., Kim, H.: CP-CNN: computational parallelization of CNN-based object detectors in heterogeneous embedded systems for autonomous driving. IEEE Access 11, 52812\u201352823 (2023)","journal-title":"IEEE Access"},{"key":"1312_CR11","doi-asserted-by":"publisher","first-page":"120358","DOI":"10.1109\/ACCESS.2021.3108776","volume":"9","author":"J Lee","year":"2021","unstructured":"Lee, J., Jang, J., Lee, J., Chun, D., Kim, H.: CNN-based mask-pose fusion for detecting specific persons on heterogeneous embedded systems. IEEE Access 9, 120358\u2013120366 (2021)","journal-title":"IEEE Access"},{"key":"1312_CR12","unstructured":"Vaswani, A., Shazeer, N., Parmar, N., Uszkoreit, J., Jones, L., Gomez, A.N., Kaiser, \u0141., Polosukhin, I.: Attention is all you need. Adv. Neural Inf. Process. Syst. 30 (2017)"},{"key":"1312_CR13","unstructured":"Radford, A., Narasimhan, K., Salimans, T., Sutskever, I., et al.: Improving language understanding by generative pre-training. Inpreprint (2018)"},{"key":"1312_CR14","unstructured":"Devlin, J., Chang, M.-W., Lee, K., Toutanova, K.: BERT: pre-training of deep bidirectional transformers for language understanding (2018). arXiv preprint. arXiv:1810.04805"},{"key":"1312_CR15","doi-asserted-by":"crossref","unstructured":"Sak, H., Senior, A.W., Beaufays, F.: Long short-term memory recurrent neural network architectures for large scale acoustic modeling. Google (2014)","DOI":"10.21437\/Interspeech.2014-80"},{"key":"1312_CR16","unstructured":"Dosovitskiy, A., Beyer, L., Kolesnikov, A., Weissenborn, D., Zhai, X., Unterthiner, T., Dehghani, M., Minderer, M., Heigold, G., Gelly, S., et al.: An image is worth 16x16 words: transformers for image recognition at scale (2020). arXiv preprint. arXiv:2010.11929"},{"key":"1312_CR17","unstructured":"Touvron, H., Cord, M., Douze, M., Massa, F., Sablayrolles, A., J\u00e9gou, H.: Training data-efficient image transformers & distillation through attention. In: International Conference on Machine Learning, pp. 10347\u201310357. PMLR (2021)"},{"key":"1312_CR18","doi-asserted-by":"crossref","unstructured":"Yu, F., Huang, K., Wang, M., Cheng, Y., Chu, W., Cui, L.: Width & depth pruning for vision transformers. In: Proceedings of the AAAI Conference on Artificial Intelligence, vol. 36, pp. 3143\u20133151 (2022)","DOI":"10.1609\/aaai.v36i3.20222"},{"key":"1312_CR19","first-page":"19974","volume":"34","author":"T Chen","year":"2021","unstructured":"Chen, T., Cheng, Y., Gan, Z., Yuan, L., Zhang, L., Wang, Z.: Chasing sparsity in vision transformers: an end-to-end exploration. Adv. Neural Inf. Process. Syst. 34, 19974\u201319988 (2021)","journal-title":"Adv. Neural Inf. Process. Syst."},{"key":"1312_CR20","first-page":"13937","volume":"34","author":"Y Rao","year":"2021","unstructured":"Rao, Y., Zhao, W., Liu, B., Lu, J., Zhou, J., Hsieh, C.-J.: DynamicViT: efficient vision transformers with dynamic token sparsification. Adv. Neural Inf. Process. Syst. 34, 13937\u201313949 (2021)","journal-title":"Adv. Neural Inf. Process. Syst."},{"key":"1312_CR21","unstructured":"Liang, Y., Ge, C., Tong, Z., Song, Y., Wang, J., Xie, P.: Not all patches are what you need: expediting vision transformers via token reorganizations (2022). arXiv preprint. arXiv:2202.07800"},{"key":"1312_CR22","first-page":"28092","volume":"34","author":"Z Liu","year":"2021","unstructured":"Liu, Z., Wang, Y., Han, K., Zhang, W., Ma, S., Gao, W.: Post-training quantization for vision transformer. Adv. Neural Inf. Process. Syst. 34, 28092\u201328103 (2021)","journal-title":"Adv. Neural Inf. Process. Syst."},{"key":"1312_CR23","first-page":"27168","volume":"35","author":"Z Yao","year":"2022","unstructured":"Yao, Z., Yazdani Aminabadi, R., Zhang, M., Wu, X., Li, C., He, Y.: ZeroQuant: efficient and affordable post-training quantization for large-scale transformers. Adv. Neural Inf. Process. Syst. 35, 27168\u201327183 (2022)","journal-title":"Adv. Neural Inf. Process. Syst."},{"key":"1312_CR24","doi-asserted-by":"crossref","unstructured":"Tang, Y., Han, K., Wang, Y., Xu, C., Guo, J., Xu, C., Tao, D.: Patch slimming for efficient vision transformers. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 12165\u201312174 (2022)","DOI":"10.1109\/CVPR52688.2022.01185"},{"issue":"1","key":"1312_CR25","doi-asserted-by":"publisher","first-page":"48","DOI":"10.5573\/IEIESPC.2023.12.1.48","volume":"12","author":"JH Lee","year":"2023","unstructured":"Lee, J.H., Kim, H.: Discrete cosine transformed images are easy to recognize in vision transformers. IEIE Trans. Smart Process. Comput. 12(1), 48\u201354 (2023)","journal-title":"IEIE Trans. Smart Process. Comput."},{"key":"1312_CR26","doi-asserted-by":"crossref","unstructured":"Wu, H., Xiao, B., Codella, N., Liu, M., Dai, X., Yuan, L., Zhang, L.: CvT: introducing convolutions to vision transformers. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 22\u201331 (2021)","DOI":"10.1109\/ICCV48922.2021.00009"},{"key":"1312_CR27","doi-asserted-by":"publisher","first-page":"5279","DOI":"10.1109\/TMM.2022.3189496","volume":"25","author":"NJ Kim","year":"2023","unstructured":"Kim, N.J., Kim, H.: FP-AGL: filter pruning with adaptive gradient learning for accelerating deep convolutional neural networks. IEEE Trans. Multimed. 25, 5279\u20135290 (2023)","journal-title":"IEEE Trans. Multimed."},{"key":"1312_CR28","doi-asserted-by":"publisher","first-page":"20828","DOI":"10.1109\/ACCESS.2021.3054879","volume":"9","author":"S Kim","year":"2021","unstructured":"Kim, S., Kim, H.: Zero-centered fixed-point quantization with iterative retraining for deep convolutional neural network-based object detectors. IEEE Access 9, 20828\u201320839 (2021)","journal-title":"IEEE Access"},{"key":"1312_CR29","doi-asserted-by":"crossref","unstructured":"Zhang, X., Zhou, X., Lin, M., Sun, J.: ShuffleNet: an extremely efficient convolutional neural network for mobile devices. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 6848\u20136856 (2018)","DOI":"10.1109\/CVPR.2018.00716"},{"key":"1312_CR30","unstructured":"Chuanyang, Z., Li, Z., Zhang, K., Yang, Z., Tan, W., Xiao, J., Ren, Y., Pu, S.: SAViT: structure-aware vision transformer pruning via collaborative optimization. In: Oh, A.H., Agarwal, A., Belgrave, D., Cho, K. (eds.) Advances in Neural Information Processing Systems (2022). https:\/\/openreview.net\/forum?id=w5DacXWzQ-Q"},{"key":"1312_CR31","doi-asserted-by":"crossref","unstructured":"Liu, Y., Gehrig, M., Messikommer, N., Cannici, M., Scaramuzza, D.: Revisiting token pruning for object detection and instance segmentation (2023). arXiv preprint. arXiv:2306.07050","DOI":"10.1109\/WACV57701.2024.00264"},{"key":"1312_CR32","doi-asserted-by":"crossref","unstructured":"Wu, K., Zhang, J., Peng, H., Liu, M., Xiao, B., Fu, J., Yuan, L.: TinyViT: fast pretraining distillation for small vision transformers. In: European Conference on Computer Vision, pp. 68\u201385. Springer, Berlin (2022)","DOI":"10.1007\/978-3-031-19803-8_5"},{"key":"1312_CR33","doi-asserted-by":"crossref","unstructured":"Lin, Y., Zhang, T., Sun, P., Li, Z., Zhou, S.: FQ-ViT: post-training quantization for fully quantized vision transformer (2021). arXiv preprint. arXiv:2111.13824","DOI":"10.24963\/ijcai.2022\/164"},{"key":"1312_CR34","doi-asserted-by":"crossref","unstructured":"Wang, W., Xie, E., Li, X., Fan, D.-P., Song, K., Liang, D., Lu, T., Luo, P., Shao, L.: Pyramid vision transformer: a versatile backbone for dense prediction without convolutions. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 568\u2013578 (2021)","DOI":"10.1109\/ICCV48922.2021.00061"},{"key":"1312_CR35","doi-asserted-by":"crossref","unstructured":"Pan, Z., Zhuang, B., Liu, J., He, H., Cai, J.: Scalable vision transformers with hierarchical pooling. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 377\u2013386 (2021)","DOI":"10.1109\/ICCV48922.2021.00043"},{"key":"1312_CR36","unstructured":"Mehta, S., Rastegari, M.: MobileViT: light-weight, general-purpose, and mobile-friendly vision transformer (2021). arXiv preprint. arXiv:2110.02178"},{"key":"1312_CR37","doi-asserted-by":"crossref","unstructured":"Graham, B., El-Nouby, A., Touvron, H., Stock, P., Joulin, A., J\u00e9gou, H., Douze, M.: LeViT: a vision transformer in convnet\u2019s clothing for faster inference. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 12259\u201312269 (2021)","DOI":"10.1109\/ICCV48922.2021.01204"},{"key":"1312_CR38","doi-asserted-by":"crossref","unstructured":"He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 770\u2013778 (2016)","DOI":"10.1109\/CVPR.2016.90"},{"key":"1312_CR39","doi-asserted-by":"crossref","unstructured":"Chen, Y., Dai, X., Chen, D., Liu, M., Dong, X., Yuan, L., Liu, Z.: Mobile-former: bridging MobileNet and transformer. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 5270\u20135279 (2022)","DOI":"10.1109\/CVPR52688.2022.00520"},{"key":"1312_CR40","first-page":"12934","volume":"35","author":"Y Li","year":"2022","unstructured":"Li, Y., Yuan, G., Wen, Y., Hu, J., Evangelidis, G., Tulyakov, S., Wang, Y., Ren, J.: EfficientFormer: vision transformers at MobileNet speed. Adv. Neural Inf. Process. Syst. 35, 12934\u201312949 (2022)","journal-title":"Adv. Neural Inf. Process. Syst."},{"key":"1312_CR41","doi-asserted-by":"crossref","unstructured":"Zhang, P., Dai, X., Yang, J., Xiao, B., Yuan, L., Zhang, L., Gao, J.: Multi-scale vision longformer: A new vision transformer for high-resolution image encoding. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 2998\u20133008 (2021)","DOI":"10.1109\/ICCV48922.2021.00299"},{"key":"1312_CR42","unstructured":"Zheng, H., Wang, J., Zhen, X., Chen, H., Song, J., Zheng, F.: CageViT: convolutional activation guided efficient vision transformer (2023). arXiv preprint. arXiv:2305.09924"},{"key":"1312_CR43","doi-asserted-by":"crossref","unstructured":"Liu, Z., Lin, Y., Cao, Y., Hu, H., Wei, Y., Zhang, Z., Lin, S., Guo, B.: Swin transformer: hierarchical vision transformer using shifted windows. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 10012\u201310022 (2021)","DOI":"10.1109\/ICCV48922.2021.00986"},{"key":"1312_CR44","doi-asserted-by":"crossref","unstructured":"Pan, X., Ye, T., Xia, Z., Song, S., Huang, G.: Slide-transformer: hierarchical vision transformer with local self-attention. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 2082\u20132091 (2023)","DOI":"10.1109\/CVPR52729.2023.00207"},{"key":"1312_CR45","doi-asserted-by":"crossref","unstructured":"Chattopadhay, A., Sarkar, A., Howlader, P., Balasubramanian, V.N.: Grad-CAM++: generalized gradient-based visual explanations for deep convolutional networks. In: 2018 IEEE Winter Conference on Applications of Computer Vision (WACV), pp. 839\u2013847. IEEE (2018)","DOI":"10.1109\/WACV.2018.00097"},{"key":"1312_CR46","doi-asserted-by":"crossref","unstructured":"Tu, Z., Talebi, H., Zhang, H., Yang, F., Milanfar, P., Bovik, A., Li, Y.: MaxViT: multi-axis vision transformer. In: Computer Vision\u2014ECCV 2022: 17th European Conference, Tel Aviv, Israel, October 23\u201327, 2022, Proceedings, Part XXIV, pp. 459\u2013479. Springer, Berlin (2022)","DOI":"10.1007\/978-3-031-20053-3_27"},{"key":"1312_CR47","doi-asserted-by":"crossref","unstructured":"Ding, M., Xiao, B., Codella, N., Luo, P., Wang, J., Yuan, L.: DaViT: dual attention vision transformers. In: Computer Vision\u2014ECCV 2022: 17th European Conference, Tel Aviv, Israel, October 23\u201327, 2022, Proceedings, Part XXIV, pp. 74\u201392. Springer, Berlin (2022)","DOI":"10.1007\/978-3-031-20053-3_5"},{"key":"1312_CR48","unstructured":"Hechen, Z., Huang, W., Zhao, Y.: ViT-LSLA: vision transformer with light self-limited-attention (2022). arXiv preprint. arXiv:2210.17115"},{"key":"1312_CR49","doi-asserted-by":"crossref","unstructured":"Yang, C., Wang, Y., Zhang, J., Zhang, H., Wei, Z., Lin, Z., Yuille, A.: Lite vision transformer with enhanced self-attention. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 11998\u201312008 (2022)","DOI":"10.1109\/CVPR52688.2022.01169"},{"issue":"9","key":"1312_CR50","doi-asserted-by":"publisher","first-page":"10870","DOI":"10.1109\/TPAMI.2023.3268446","volume":"45","author":"T Yao","year":"2023","unstructured":"Yao, T., Li, Y., Pan, Y., Wang, Y., Zhang, X.-P., Mei, T.: Dual vision transformer. IEEE Trans. Pattern Anal. Mach. Intell. 45(9), 10870\u201310882 (2023)","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"1312_CR51","doi-asserted-by":"crossref","unstructured":"Yu, W., Luo, M., Zhou, P., Si, C., Zhou, Y., Wang, X., Feng, J., Yan, S.: MetaFormer is actually what you need for vision. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 10819\u201310829 (2022)","DOI":"10.1109\/CVPR52688.2022.01055"},{"key":"1312_CR52","doi-asserted-by":"crossref","unstructured":"Pan, J., Bulat, A., Tan, F., Zhu, X., Dudziak, L., Li, H., Tzimiropoulos, G., Martinez, B.: EdgeViTs: competing light-weight CNNs on mobile devices with vision transformers. In: Computer Vision\u2014ECCV 2022: 17th European Conference, Tel Aviv, Israel, October 23\u201327, 2022, Proceedings, Part XI, pp. 294\u2013311. Springer, Berlin (2022)","DOI":"10.1007\/978-3-031-20083-0_18"},{"key":"1312_CR53","unstructured":"Li, S., Wang, Z., Liu, Z., Tan, C., Lin, H., Wu, D., Chen, Z., Zheng, J., Li, S.Z.: Efficient multi-order gated aggregation network (2022). arXiv preprint. arXiv:2211.03295"},{"key":"1312_CR54","unstructured":"Yang, C., Qiao, S., Yu, Q., Yuan, X., Zhu, Y., Yuille, A., Adam, H., Chen, L.-C.: MOAT: alternating mobile convolution and attention brings strong vision models (2022). arXiv preprint. arXiv:2210.01820"},{"key":"1312_CR55","doi-asserted-by":"crossref","unstructured":"Yuan, K., Guo, S., Liu, Z., Zhou, A., Yu, F., Wu, W.: Incorporating convolution designs into visual transformers. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 579\u2013588 (2021)","DOI":"10.1109\/ICCV48922.2021.00062"},{"key":"1312_CR56","first-page":"3965","volume":"34","author":"Z Dai","year":"2021","unstructured":"Dai, Z., Liu, H., Le, Q.V., Tan, M.: CoAtNet: marrying convolution and attention for all data sizes. Adv. Neural Inf. Process. Syst. 34, 3965\u20133977 (2021)","journal-title":"Adv. Neural Inf. Process. Syst."},{"key":"1312_CR57","unstructured":"Huang, T., Huang, L., You, S., Wang, F., Qian, C., Xu, C.: LightViT: towards light-weight convolution-free vision transformers (2022). arXiv preprint. arXiv:2207.05557"},{"key":"1312_CR58","unstructured":"Vasu, P.K.A., Gabriel, J., Zhu, J., Tuzel, O., Ranjan, A.: FastViT: a fast hybrid vision transformer using structural reparameterization (2023). arXiv preprint. arXiv:2303.14189"},{"key":"1312_CR59","unstructured":"Cai, H., Gan, C., Han, S.: EfficientViT: enhanced linear attention for high-resolution low-computation visual recognition (2022). arXiv preprint. arXiv:2205.14756"},{"key":"1312_CR60","doi-asserted-by":"crossref","unstructured":"Deng, J., Dong, W., Socher, R., Li, L.-J., Li, K., Fei-Fei, L.: ImageNet: a large-scale hierarchical image database. In: 2009 IEEE Conference on Computer Vision and Pattern Recognition, pp. 248\u2013255. IEEE (2009)","DOI":"10.1109\/CVPR.2009.5206848"},{"key":"1312_CR61","doi-asserted-by":"crossref","unstructured":"Lin, T.-Y., Maire, M., Belongie, S., Hays, J., Perona, P., Ramanan, D., Doll\u00e1r, P., Zitnick, C.L.: Microsoft COCO: common objects in context. In: Computer Vision\u2014ECCV 2014: 13th European Conference, Zurich, Switzerland, September 6\u201312, 2014, Proceedings, Part V 13, pp. 740\u2013755. Springer, Berlin (2014)","DOI":"10.1007\/978-3-319-10602-1_48"},{"key":"1312_CR62","doi-asserted-by":"crossref","unstructured":"He, K., Gkioxari, G., Doll\u00e1r, P., Girshick, R.: Mask R-CNN. In: Proceedings of the IEEE International Conference on Computer Vision, pp. 2961\u20132969 (2017)","DOI":"10.1109\/ICCV.2017.322"}],"container-title":["Multimedia Systems"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s00530-024-01312-0.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s00530-024-01312-0\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s00530-024-01312-0.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,4,12]],"date-time":"2024-04-12T13:17:43Z","timestamp":1712927863000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s00530-024-01312-0"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,4]]},"references-count":62,"journal-issue":{"issue":"2","published-print":{"date-parts":[[2024,4]]}},"alternative-id":["1312"],"URL":"https:\/\/doi.org\/10.1007\/s00530-024-01312-0","relation":{},"ISSN":["0942-4962","1432-1882"],"issn-type":[{"value":"0942-4962","type":"print"},{"value":"1432-1882","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,4]]},"assertion":[{"value":"31 July 2023","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"4 March 2024","order":2,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"1 April 2024","order":3,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors declare no competing interests.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}}],"article-number":"109"}}