{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,25]],"date-time":"2026-03-25T04:41:43Z","timestamp":1774413703189,"version":"3.50.1"},"reference-count":61,"publisher":"Springer Science and Business Media LLC","issue":"3","license":[{"start":{"date-parts":[[2024,4,23]],"date-time":"2024-04-23T00:00:00Z","timestamp":1713830400000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,4,23]],"date-time":"2024-04-23T00:00:00Z","timestamp":1713830400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62276118"],"award-info":[{"award-number":["62276118"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Multimedia Systems"],"published-print":{"date-parts":[[2024,6]]},"DOI":"10.1007\/s00530-024-01322-y","type":"journal-article","created":{"date-parts":[[2024,4,23]],"date-time":"2024-04-23T09:01:39Z","timestamp":1713862899000},"update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":7,"title":["CA-CLIP: category-aware adaptation of CLIP model for few-shot class-incremental learning"],"prefix":"10.1007","volume":"30","author":[{"given":"Yuqiao","family":"Xu","sequence":"first","affiliation":[]},{"given":"Shucheng","family":"Huang","sequence":"additional","affiliation":[]},{"given":"Haoliang","family":"Zhou","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,4,23]]},"reference":[{"key":"1322_CR1","doi-asserted-by":"crossref","unstructured":"He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 770\u2013778 (2016)","DOI":"10.1109\/CVPR.2016.90"},{"key":"1322_CR2","unstructured":"Dosovitskiy, A., Beyer, L., Kolesnikov, A., Weissenborn, D., Zhai, X., Unterthiner, T., Dehghani, M., Minderer, M., Heigold, G., Gelly, S., et al.: An image is worth 16x16 words: Transformers for image recognition at scale. arXiv preprint arXiv:2010.11929 (2020)"},{"key":"1322_CR3","unstructured":"Wang, G., Huang, S., Tao, Z.: Shallow multi-branch attention convolutional neural network for micro-expression recognition. Multimed. Syst. 1\u201314 (2023)"},{"issue":"6","key":"1322_CR4","doi-asserted-by":"publisher","first-page":"3863","DOI":"10.1007\/s00530-023-01164-0","volume":"29","author":"H Zhou","year":"2023","unstructured":"Zhou, H., Huang, S., Xu, Y.: Inceptr: micro-expression recognition integrating inception-cbam and vision transformer. Multimed. Syst. 29(6), 3863\u20133876 (2023)","journal-title":"Multimed. Syst."},{"issue":"12","key":"1322_CR5","doi-asserted-by":"publisher","first-page":"2935","DOI":"10.1109\/TPAMI.2017.2773081","volume":"40","author":"Z Li","year":"2017","unstructured":"Li, Z., Hoiem, D.: Learning without forgetting. IEEE Trans. Pattern Anal. Mach. Intell. 40(12), 2935\u20132947 (2017)","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"1322_CR6","doi-asserted-by":"crossref","unstructured":"Rebuffi, S.-A., Kolesnikov, A., Sperl, G., Lampert, C.H.: icarl: Incremental classifier and representation learning. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 2001\u20132010 (2017)","DOI":"10.1109\/CVPR.2017.587"},{"issue":"13","key":"1322_CR7","doi-asserted-by":"publisher","first-page":"3521","DOI":"10.1073\/pnas.1611835114","volume":"114","author":"J Kirkpatrick","year":"2017","unstructured":"Kirkpatrick, J., Pascanu, R., Rabinowitz, N., Veness, J., Desjardins, G., Rusu, A.A., Milan, K., Quan, J., Ramalho, T., Grabska-Barwinska, A., et al.: Overcoming catastrophic forgetting in neural networks. Proc. Natl. Acad. Sci. 114(13), 3521\u20133526 (2017)","journal-title":"Proc. Natl. Acad. Sci."},{"key":"1322_CR8","doi-asserted-by":"crossref","unstructured":"Hou, S., Pan, X., Loy, C.C., Wang, Z., Lin, D.: Learning a unified classifier incrementally via rebalancing. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 831\u2013839 (2019)","DOI":"10.1109\/CVPR.2019.00092"},{"key":"1322_CR9","doi-asserted-by":"crossref","unstructured":"Yu, L., Twardowski, B., Liu, X., Herranz, L., Wang, K., Cheng, Y., Jui, S., Weijer, J.v.d.: Semantic drift compensation for class-incremental learning. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 6982\u20136991 (2020)","DOI":"10.1109\/CVPR42600.2020.00701"},{"key":"1322_CR10","doi-asserted-by":"crossref","unstructured":"Douillard, A., Ram\u00e9, A., Couairon, G., Cord, M.: Dytox: Transformers for continual learning with dynamic token expansion. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 9285\u20139295 (2022)","DOI":"10.1109\/CVPR52688.2022.00907"},{"key":"1322_CR11","doi-asserted-by":"crossref","unstructured":"Tao, Z., Huang, S., Wang, G.: Prototypes sampling mechanism for class incremental learning. IEEE Access (2023)","DOI":"10.1109\/ACCESS.2023.3301123"},{"key":"1322_CR12","doi-asserted-by":"crossref","unstructured":"Smith, J.S., Karlinsky, L., Gutta, V., Cascante-Bonilla, P., Kim, D., Arbelle, A., Panda, R., Feris, R., Kira, Z.: Coda-prompt: Continual decomposed attention-based prompting for rehearsal-free continual learning. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 11909\u201311919 (2023)","DOI":"10.1109\/CVPR52729.2023.01146"},{"key":"1322_CR13","doi-asserted-by":"crossref","unstructured":"Tao, X., Hong, X., Chang, X., Dong, S., Wei, X., Gong, Y.: Few-shot class-incremental learning. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 12183\u201312192 (2020)","DOI":"10.1109\/CVPR42600.2020.01220"},{"key":"1322_CR14","doi-asserted-by":"crossref","unstructured":"Liu, H., Gu, L., Chi, Z., Wang, Y., Yu, Y., Chen, J., Tang, J.: Few-shot class-incremental learning via entropy-regularized data-free replay. In: European Conference on Computer Vision, pp. 146\u2013162. Springer (2022)","DOI":"10.1007\/978-3-031-20053-3_9"},{"key":"1322_CR15","doi-asserted-by":"crossref","unstructured":"Kukleva, A., Kuehne, H., Schiele, B.: Generalized and incremental few-shot learning by explicit learning and calibration without forgetting. In: Proceedings of the IEEE\/VF International Conference on Computer Vision, pp. 9020\u20139029 (2021)","DOI":"10.1109\/ICCV48922.2021.00889"},{"key":"1322_CR16","doi-asserted-by":"crossref","unstructured":"Cheraghian, A., Rahman, S., Fang, P., Roy, S.K., Petersson, L., Harandi, M.: Semantic-aware knowledge distillation for few-shot class-incremental learning. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 2534\u20132543 (2021)","DOI":"10.1109\/CVPR46437.2021.00256"},{"key":"1322_CR17","unstructured":"Zhao, H., Fu, Y., Kang, M., Tian, Q., Wu, F., Li, X.: Mgsvf: Multi-grained slow vs. fast framework for few-shot class-incremental learning. IEEE Trans. Pattern Anal. Mach. Intell. (2021)"},{"key":"1322_CR18","doi-asserted-by":"crossref","unstructured":"Hersche, M., Karunaratne, G., Cherubini, G., Benini, L., Sebastian, A., Rahimi, A.: Constrained few-shot class-incremental learning. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 9057\u20139067 (2022)","DOI":"10.1109\/CVPR52688.2022.00885"},{"key":"1322_CR19","doi-asserted-by":"crossref","unstructured":"Peng, C., Zhao, K., Wang, T., Li, M., Lovell, B.C.: Few-shot class-incremental learning from an open-set perspective. In: European Conference on Computer Vision, pp. 382\u2013397. Springer (2022)","DOI":"10.1007\/978-3-031-19806-9_22"},{"key":"1322_CR20","doi-asserted-by":"crossref","unstructured":"Zhao, L., Lu, J., Xu, Y., Cheng, Z., Guo, D., Niu, Y., Fang, X.: Few-shot class-incremental learning via class-aware bilateral distillation. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 11838\u201311847 (2023)","DOI":"10.1109\/CVPR52729.2023.01139"},{"key":"1322_CR21","unstructured":"Aky\u00fcrek, A.F., Aky\u00fcrek, E., Wijaya, D.T., Andreas, J.: Subspace regularizers for few-shot class incremental learning. arXiv preprint arXiv:2110.07059 (2021)"},{"key":"1322_CR22","doi-asserted-by":"crossref","unstructured":"Ayub, A., Wagner, A.R.: Cognitively-inspired model for incremental learning using a few examples. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition Workshops, pp. 222\u2013223 (2020)","DOI":"10.1109\/CVPRW50498.2020.00119"},{"key":"1322_CR23","doi-asserted-by":"crossref","unstructured":"Zhu, K., Cao, Y., Zhai, W., Cheng, J., Zha, Z.-J.: Self-promoted prototype refinement for few-shot class-incremental learning. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 6801\u20136810 (2021)","DOI":"10.1109\/CVPR46437.2021.00673"},{"key":"1322_CR24","doi-asserted-by":"crossref","unstructured":"Zhou, D.-W., Wang, F.-Y., Ye, H.-J., Ma, L., Pu, S., Zhan, D.-C.: Forward compatible few-shot class-incremental learning. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 9046\u20139056 (2022)","DOI":"10.1109\/CVPR52688.2022.00884"},{"key":"1322_CR25","doi-asserted-by":"crossref","unstructured":"Zhang, C., Song, N., Lin, G., Zheng, Y., Pan, P., Xu, Y.: Few-shot incremental learning with continually evolved classifiers. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 12455\u201312464 (2021)","DOI":"10.1109\/CVPR46437.2021.01227"},{"key":"1322_CR26","doi-asserted-by":"crossref","unstructured":"Song, Z., Zhao, Y., Shi, Y., Peng, P., Yuan, L., Tian, Y.: Learning with fantasy: semantic-aware virtual contrastive constraint for few-shot class-incremental learning. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 24183\u201324192 (2023)","DOI":"10.1109\/CVPR52729.2023.02316"},{"key":"1322_CR27","first-page":"6747","volume":"34","author":"G Shi","year":"2021","unstructured":"Shi, G., Chen, J., Zhang, W., Zhan, L.-M., Wu, X.-M.: Overcoming catastrophic forgetting in incremental few-shot learning by finding flat minima. Adv Neural Inf Process Syst 34, 6747\u20136761 (2021)","journal-title":"Adv Neural Inf Process Syst"},{"key":"1322_CR28","unstructured":"Kim, D.-Y., Han, D.-J., Seo, J., Moon, J.: Warping the space: weight space rotation for class-incremental few-shot learning. In: The Eleventh International Conference on Learning Representations (2023)"},{"key":"1322_CR29","doi-asserted-by":"crossref","unstructured":"Chi, Z., Gu, L., Liu, H., Wang, Y., Yu, Y., Tang, J.: Metafscil: a meta-learning approach for few-shot class incremental learning. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 14166\u201314175 (2022)","DOI":"10.1109\/CVPR52688.2022.01377"},{"key":"1322_CR30","unstructured":"Radford, A., Kim, J.W., Hallacy, C., Ramesh, A., Goh, G., Agarwal, S., Sastry, G., Askell, A., Mishkin, P., Clark, J., et al.: Learning transferable visual models from natural language supervision. In: International Conference on Machine Learning, pp. 8748\u20138763. PMLR (2021)"},{"key":"1322_CR31","unstructured":"Jia, C., Yang, Y., Xia, Y., Chen, Y.-T., Parekh, Z., Pham, H., Le, Q., Sung, Y.-H., Li, Z., Duerig, T.: Scaling up visual and vision-language representation learning with noisy text supervision. In: International Conference on Machine Learning, pp. 4904\u20134916. PMLR (2021)"},{"key":"1322_CR32","unstructured":"Li, Y., Liang, F., Zhao, L., Cui, Y., Ouyang, W., Shao, J., Yu, F., Yan, J.: Supervision exists everywhere: a data efficient contrastive language-image pre-training paradigm. arXiv preprint arXiv:2110.05208 (2021)"},{"key":"1322_CR33","doi-asserted-by":"crossref","unstructured":"Tschannen, M., Mustafa, B., Houlsby, N.: Clippo: Image-and-language understanding from pixels only. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 11006\u201311017 (2023)","DOI":"10.1109\/CVPR52729.2023.01059"},{"key":"1322_CR34","unstructured":"Thengane, V., Khan, S., Hayat, M., Khan, F.: Clip model is an efficient continual learner. arXiv preprint arXiv:2210.03114 (2022)"},{"key":"1322_CR35","doi-asserted-by":"crossref","unstructured":"Zhou, K., Yang, J., Loy, C.C., Liu, Z.: Conditional prompt learning for vision-language models. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 16816\u201316825 (2022)","DOI":"10.1109\/CVPR52688.2022.01631"},{"issue":"9","key":"1322_CR36","doi-asserted-by":"publisher","first-page":"2337","DOI":"10.1007\/s11263-022-01653-1","volume":"130","author":"K Zhou","year":"2022","unstructured":"Zhou, K., Yang, J., Loy, C.C., Liu, Z.: Learning to prompt for vision-language models. Int. J. Comput. Vis. 130(9), 2337\u20132348 (2022)","journal-title":"Int. J. Comput. Vis."},{"key":"1322_CR37","doi-asserted-by":"crossref","unstructured":"Gao, P., Geng, S., Zhang, R., Ma, T., Fang, R., Zhang, Y., Li, H., Qiao, Y.: Clip-adapter: Better vision-language models with feature adapters. Int. J. Comput. Vis. 1\u201315 (2023)","DOI":"10.1007\/s11263-023-01891-x"},{"key":"1322_CR38","unstructured":"Finn, C., Abbeel, P., Levine, S.: Model-agnostic meta-learning for fast adaptation of deep networks. In: International Conference on Machine Learning, pp. 1126\u20131135. PMLR (2017)"},{"key":"1322_CR39","unstructured":"Krizhevsky, A., Hinton, G., et al.: Learning multiple layers of features from tiny images (2009)"},{"key":"1322_CR40","doi-asserted-by":"publisher","first-page":"211","DOI":"10.1007\/s11263-015-0816-y","volume":"115","author":"O Russakovsky","year":"2015","unstructured":"Russakovsky, O., Deng, J., Su, H., Krause, J., Satheesh, S., Ma, S., Huang, Z., Karpathy, A., Khosla, A., Bernstein, M., et al.: Imagenet large scale visual recognition challenge. Int. J. Comput. Vis. 115, 211\u2013252 (2015)","journal-title":"Int. J. Comput. Vis."},{"key":"1322_CR41","unstructured":"Wah, C., Branson, S., Welinder, P., Perona, P., Belongie, S.: The caltech-ucsd birds-200-2011 dataset (2011)"},{"key":"1322_CR42","doi-asserted-by":"crossref","unstructured":"He, K., Chen, X., Xie, S., Li, Y., Doll\u00e1r, P., Girshick, R.: Masked autoencoders are scalable vision learners. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 16000\u201316009 (2022)","DOI":"10.1109\/CVPR52688.2022.01553"},{"key":"1322_CR43","unstructured":"Chen, T., Kornblith, S., Norouzi, M., Hinton, G.: A simple framework for contrastive learning of visual representations. In: International Conference on Machine Learning, pp. 1597\u20131607. PMLR (2020)"},{"key":"1322_CR44","unstructured":"Vaswani, A., Shazeer, N., Parmar, N., Uszkoreit, J., Jones, L., Gomez, A.N., Kaiser, \u0141., Polosukhin, I.: Attention is all you need. Adv. Neural Inf. Process. Syst. 30 (2017)"},{"key":"1322_CR45","unstructured":"Devlin, J., Chang, M.-W., Lee, K., Toutanova, K.: Bert: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805 (2018)"},{"key":"1322_CR46","doi-asserted-by":"crossref","unstructured":"Guo, Z., Dong, B., Ji, Z., Bai, J., Guo, Y., Zuo, W.: Texts as images in prompt tuning for multi-label image recognition. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 2808\u20132817 (2023)","DOI":"10.1109\/CVPR52729.2023.00275"},{"key":"1322_CR47","doi-asserted-by":"crossref","unstructured":"Zareian, A., Rosa, K.D., Hu, D.H., Chang, S.-F.: Open-vocabulary object detection using captions. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 14393\u201314402 (2021)","DOI":"10.1109\/CVPR46437.2021.01416"},{"key":"1322_CR48","doi-asserted-by":"crossref","unstructured":"Xu, M., Zhang, Z., Wei, F., Lin, Y., Cao, Y., Hu, H., Bai, X.: A simple baseline for open-vocabulary semantic segmentation with pre-trained vision-language model. In: European Conference on Computer Vision, pp. 736\u2013753. Springer (2022)","DOI":"10.1007\/978-3-031-19818-2_42"},{"key":"1322_CR49","unstructured":"Ren, S., He, K., Girshick, R., Sun, J.: Faster r-cnn: Towards real-time object detection with region proposal networks. Adv. Neural Inf. Process. Syst. 28 (2015)"},{"key":"1322_CR50","doi-asserted-by":"crossref","unstructured":"Thrun, S., Pratt, L.: Learning to learn: Introduction and overview. Learn. Learn 3\u201317 (1998)","DOI":"10.1007\/978-1-4615-5529-2_1"},{"key":"1322_CR51","unstructured":"Santoro, A., Bartunov, S., Botvinick, M., Wierstra, D., Lillicrap, T.: Meta-learning with memory-augmented neural networks. In: International Conference on Machine Learning, pp. 1842\u20131850. PMLR (2016)"},{"key":"1322_CR52","unstructured":"Ravi, S., Larochelle, H.: Optimization as a model for few-shot learning. In: International Conference on Learning Representations (2016)"},{"key":"1322_CR53","unstructured":"Koch, G., Zemel, R., Salakhutdinov, R., et al.: Siamese neural networks for one-shot image recognition. In: ICML Deep Learning Workshop, vol. 2. Lille (2015)"},{"key":"1322_CR54","doi-asserted-by":"publisher","first-page":"56","DOI":"10.1016\/j.neunet.2019.03.010","volume":"116","author":"D Maltoni","year":"2019","unstructured":"Maltoni, D., Lomonaco, V.: Continuous learning in single-incremental-task scenarios. Neural Netw. 116, 56\u201373 (2019)","journal-title":"Neural Netw."},{"key":"1322_CR55","doi-asserted-by":"crossref","unstructured":"Volpi, R., Larlus, D., Rogez, G.: Continual adaptation of visual representations via domain randomization and meta-learning. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 4443\u20134453 (2021)","DOI":"10.1109\/CVPR46437.2021.00442"},{"key":"1322_CR56","doi-asserted-by":"crossref","unstructured":"Caron, M., Touvron, H., Misra, I., J\u00e9gou, H., Mairal, J., Bojanowski, P., Joulin, A.: Emerging properties in self-supervised vision transformers. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 9650\u20139660 (2021)","DOI":"10.1109\/ICCV48922.2021.00951"},{"key":"1322_CR57","doi-asserted-by":"crossref","unstructured":"Zhou, D.-W., Ye, H.-J., Ma, L., Xie, D., Pu, S., Zhan, D.-C.: Few-shot class-incremental learning by sampling multi-phase tasks. IEEE Trans. Pattern Anal. Mach. Intell. (2022)","DOI":"10.1109\/CVPR52688.2022.00884"},{"key":"1322_CR58","doi-asserted-by":"crossref","unstructured":"Zhuang, H., Weng, Z., He, R., Lin, Z., Zeng, Z.: Gkeal: Gaussian kernel embedded analytic learning for few-shot class incremental task. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 7746\u20137755 (2023)","DOI":"10.1109\/CVPR52729.2023.00748"},{"key":"1322_CR59","doi-asserted-by":"crossref","unstructured":"Mazumder, P., Singh, P., Rai, P.: Few-shot lifelong learning. In: Proceedings of the AAAI Conference on Artificial Intelligence, vol. 35, pp. 2337\u20132345 (2021)","DOI":"10.1609\/aaai.v35i3.16334"},{"key":"1322_CR60","unstructured":"Yang, Y., Yuan, H., Li, X., Lin, Z., Torr, P., Tao, D.: Neural collapse inspired feature-classifier alignment for few-shot class incremental learning. In: The Eleventh International Conference on Learning Representations (2023)"},{"key":"1322_CR61","doi-asserted-by":"crossref","unstructured":"Wang, H., Wang, Z., Du, M., Yang, F., Zhang, Z., Ding, S., Mardziel, P., Hu, X.: Score-cam: Score-weighted visual explanations for convolutional neural networks. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition Workshops, pp. 24\u201325 (2020)","DOI":"10.1109\/CVPRW50498.2020.00020"}],"container-title":["Multimedia Systems"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s00530-024-01322-y.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s00530-024-01322-y\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s00530-024-01322-y.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,7,5]],"date-time":"2024-07-05T17:13:43Z","timestamp":1720199623000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s00530-024-01322-y"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,4,23]]},"references-count":61,"journal-issue":{"issue":"3","published-print":{"date-parts":[[2024,6]]}},"alternative-id":["1322"],"URL":"https:\/\/doi.org\/10.1007\/s00530-024-01322-y","relation":{},"ISSN":["0942-4962","1432-1882"],"issn-type":[{"value":"0942-4962","type":"print"},{"value":"1432-1882","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,4,23]]},"assertion":[{"value":"19 December 2023","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"20 March 2024","order":2,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"23 April 2024","order":3,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors have no conflict of interest to declare that are relevant to the content of this article.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}}],"article-number":"130"}}