{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,10]],"date-time":"2026-04-10T04:57:31Z","timestamp":1775797051039,"version":"3.50.1"},"reference-count":52,"publisher":"Springer Science and Business Media LLC","issue":"6","license":[{"start":{"date-parts":[[2023,8,31]],"date-time":"2023-08-31T00:00:00Z","timestamp":1693440000000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2023,8,31]],"date-time":"2023-08-31T00:00:00Z","timestamp":1693440000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"funder":[{"name":"Postgraduate Research and Practice Innovation Program of Jiangsu Province","award":["KYCX22_3853"],"award-info":[{"award-number":["KYCX22_3853"]}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"crossref","award":["62276118"],"award-info":[{"award-number":["62276118"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"crossref"}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Multimedia Systems"],"published-print":{"date-parts":[[2023,12]]},"DOI":"10.1007\/s00530-023-01164-0","type":"journal-article","created":{"date-parts":[[2023,8,31]],"date-time":"2023-08-31T05:02:34Z","timestamp":1693458154000},"page":"3863-3876","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":37,"title":["Inceptr: micro-expression recognition integrating inception-CBAM and vision transformer"],"prefix":"10.1007","volume":"29","author":[{"given":"Haoliang","family":"Zhou","sequence":"first","affiliation":[]},{"given":"Shucheng","family":"Huang","sequence":"additional","affiliation":[]},{"given":"Yuqiao","family":"Xu","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2023,8,31]]},"reference":[{"issue":"2","key":"1164_CR1","doi-asserted-by":"publisher","first-page":"124","DOI":"10.1037\/h0030377","volume":"17","author":"P Ekman","year":"1971","unstructured":"Ekman, P., Friesen, W.V.: Constants across cultures in the face and emotion. J. Pers. Soc. Psychol. 17(2), 124 (1971)","journal-title":"J. Pers. Soc. Psychol."},{"issue":"9","key":"1164_CR2","first-page":"5826","volume":"44","author":"X Ben","year":"2021","unstructured":"Ben, X., Ren, Y., Zhang, J., Wang, S.-J., Kpalma, K., Meng, W., Liu, Y.-J.: Video-based facial micro-expression analysis: A survey of datasets, features and algorithms. IEEE Trans. Pattern Anal. Mach. Intell. 44(9), 5826\u20135846 (2021)","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"issue":"6","key":"1164_CR3","doi-asserted-by":"publisher","first-page":"915","DOI":"10.1109\/TPAMI.2007.1110","volume":"29","author":"G Zhao","year":"2007","unstructured":"Zhao, G., Pietikainen, M.: Dynamic texture recognition using local binary patterns with an application to facial expressions. IEEE Trans. Pattern Anal. Mach. Intell. 29(6), 915\u2013928 (2007)","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"issue":"4","key":"1164_CR4","doi-asserted-by":"publisher","first-page":"299","DOI":"10.1109\/TAFFC.2015.2485205","volume":"7","author":"Y-J Liu","year":"2015","unstructured":"Liu, Y.-J., Zhang, J.-K., Yan, W.-J., Wang, S.-J., Zhao, G., Fu, X.: A main directional mean optical flow feature for spontaneous micro-expression recognition. IEEE Trans. Affect. Comput. 7(4), 299\u2013310 (2015)","journal-title":"IEEE Trans. Affect. Comput."},{"key":"1164_CR5","doi-asserted-by":"publisher","first-page":"82","DOI":"10.1016\/j.image.2017.11.006","volume":"62","author":"S-T Liong","year":"2018","unstructured":"Liong, S.-T., See, J., Wong, K., Phan, R.C.-W.: Less is more: micro-expression recognition from video using apex frame. Signal Process. Image Commun. 62, 82\u201392 (2018)","journal-title":"Signal Process. Image Commun."},{"key":"1164_CR6","doi-asserted-by":"crossref","unstructured":"Zhou, L., Mao, Q., Xue, L.: Dual-inception network for cross-database micro-expression recognition. In: 2019 14th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2019), pp. 1\u20135 (2019). IEEE","DOI":"10.1109\/FG.2019.8756579"},{"key":"1164_CR7","doi-asserted-by":"crossref","unstructured":"Liong, S.-T., Gan, Y.S., See, J., Khor, H.-Q., Huang, Y.-C.: Shallow triple stream three-dimensional cnn (ststnet) for micro-expression recognition. In: 2019 14th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2019), pp. 1\u20135 (2019). IEEE","DOI":"10.1109\/FG.2019.8756567"},{"key":"1164_CR8","unstructured":"Li, H., Sui, M., Zhao, F., Zha, Z., Wu, F.: Mvt: mask vision transformer for facial expression recognition in the wild. arXiv preprint arXiv:2106.04520 (2021)"},{"key":"1164_CR9","doi-asserted-by":"publisher","DOI":"10.1109\/TAFFC.2021.3122146","author":"F Ma","year":"2021","unstructured":"Ma, F., Sun, B., Li, S.: Facial expression recognition with visual transformers and attentional selective fusion. IEEE Trans. Affect. Comput. (2021). https:\/\/doi.org\/10.1109\/TAFFC.2021.3122146","journal-title":"IEEE Trans. Affect. Comput."},{"issue":"3","key":"1164_CR10","doi-asserted-by":"publisher","first-page":"460","DOI":"10.3390\/e25030460","volume":"25","author":"H Zhou","year":"2023","unstructured":"Zhou, H., Huang, S., Li, J., Wang, S.-J.: Dual-atme: dual-branch attention network for micro-expression recognition. Entropy 25(3), 460 (2023)","journal-title":"Entropy"},{"key":"1164_CR11","doi-asserted-by":"publisher","DOI":"10.4855\/arXiv.2010.11929","author":"A Dosovitskiy","year":"2020","unstructured":"Dosovitskiy, A., Beyer, L., Kolesnikov, A., Weissenborn, D., Zhai, X., Unterthiner, T., Dehghani, M., Minderer, M., Heigold, G., Gelly, S., et al.: An image is worth 16x16 words: transformers for image recognition at scale. arXiv Preprint (2020). https:\/\/doi.org\/10.4855\/arXiv.2010.11929","journal-title":"arXiv Preprint"},{"key":"1164_CR12","unstructured":"Touvron, H., Cord, M., Douze, M., Massa, F., Sablayrolles, A., J\u00e9gou, H.: Training data-efficient image transformers & distillation through attention. In: International Conference on Machine Learning, pp. 10347\u201310357 (2021). PMLR"},{"key":"1164_CR13","doi-asserted-by":"crossref","unstructured":"Liu, Z., Lin, Y., Cao, Y., Hu, H., Wei, Y., Zhang, Z., Lin, S., Guo, B.: Swin transformer: Hierarchical vision transformer using shifted windows. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 10012\u201310022 (2021)","DOI":"10.1109\/ICCV48922.2021.00986"},{"key":"1164_CR14","doi-asserted-by":"crossref","unstructured":"Xue, F., Wang, Q., Guo, G.: Transfer: Learning relation-aware facial expression representations with transformers. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 3601\u20133610 (2021)","DOI":"10.1109\/ICCV48922.2021.00358"},{"issue":"4","key":"1164_CR15","doi-asserted-by":"publisher","first-page":"1973","DOI":"10.1109\/TAFFC.2022.3213509","volume":"13","author":"L Zhang","year":"2022","unstructured":"Zhang, L., Hong, X., Arandjelovi\u0107, O., Zhao, G.: Short and long range relation based spatio-temporal transformer for micro-expression recognition. IEEE Trans. Affect. Comput. 13(4), 1973\u20131985 (2022)","journal-title":"IEEE Trans. Affect. Comput."},{"key":"1164_CR16","unstructured":"Ran, R., Shi, K., Jiang, X., Wang, N.: Micro-expression recognition method based on dual attention crossvit. J. Nanjing Univ. Inform. Eng. 1\u201311 (2023). http:\/\/kns.cnki.net\/kcms\/detail\/32.1801.N.20230214.0837.002.html. Accessed 28 Aug 2023"},{"key":"1164_CR17","doi-asserted-by":"crossref","unstructured":"Li, X., Pfister, T., Huang, X., Zhao, G., Pietik\u00e4inen, M.: A spontaneous micro-expression database: Inducement, collection and baseline. In: 2013 10th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (fg), pp. 1\u20136 (2013). IEEE","DOI":"10.1109\/FG.2013.6553717"},{"issue":"1","key":"1164_CR18","doi-asserted-by":"publisher","first-page":"86041","DOI":"10.1371\/journal.pone.0086041","volume":"9","author":"W-J Yan","year":"2014","unstructured":"Yan, W.-J., Li, X., Wang, S.-J., Zhao, G., Liu, Y.-J., Chen, Y.-H., Fu, X.: Casme ii: an improved spontaneous micro-expression database and the baseline evaluation. PLoS ONE 9(1), 86041 (2014)","journal-title":"PLoS ONE"},{"issue":"01","key":"1164_CR19","doi-asserted-by":"publisher","first-page":"116","DOI":"10.1109\/TAFFC.2016.2573832","volume":"9","author":"AK Davison","year":"2018","unstructured":"Davison, A.K., Lansley, C., Costen, N., Tan, K., Yap, M.H.: Samm: a spontaneous micro-facial movement dataset. IEEE Trans. Affect. Comput. 9(01), 116\u2013129 (2018)","journal-title":"IEEE Trans. Affect. Comput."},{"issue":"3","key":"1164_CR20","first-page":"2782","volume":"45","author":"J Li","year":"2023","unstructured":"Li, J., Dong, Z., Lu, S., Wang, S.-J., Yan, W.-J., Ma, Y., Liu, Y., Huang, C., Fu, X.: Cas(me)$$^{3}$$: a third generation facial spontaneous micro-expression database with depth information and high ecological validity. IEEE Trans. Pattern Anal. Mach. Intell. 45(3), 2782\u20132800 (2023)","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"1164_CR21","doi-asserted-by":"crossref","unstructured":"See, J., Yap, M.H., Li, J., Hong, X., Wang, S.-J.: Megc 2019\u2013the second facial micro-expressions grand challenge. In: 2019 14th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2019), pp. 1\u20135 (2019). IEEE","DOI":"10.1109\/FG.2019.8756611"},{"issue":"4","key":"1164_CR22","doi-asserted-by":"publisher","first-page":"601","DOI":"10.1007\/s11265-017-1276-0","volume":"90","author":"S-T Liong","year":"2018","unstructured":"Liong, S.-T., See, J., Phan, R.C.-W., Wong, K., Tan, S.-W.: Hybrid facial regions extraction for micro-expression recognition system. J. Signal Process. Syst. 90(4), 601\u2013617 (2018)","journal-title":"J. Signal Process. Syst."},{"key":"1164_CR23","doi-asserted-by":"publisher","first-page":"564","DOI":"10.1016\/j.neucom.2015.10.096","volume":"175","author":"X Huang","year":"2016","unstructured":"Huang, X., Zhao, G., Hong, X., Zheng, W., Pietik\u00e4inen, M.: Spontaneous facial micro-expression analysis using spatiotemporal completed local quantized patterns. Neurocomputing 175, 564\u2013578 (2016)","journal-title":"Neurocomputing"},{"issue":"1","key":"1164_CR24","doi-asserted-by":"publisher","first-page":"32","DOI":"10.1109\/TAFFC.2017.2713359","volume":"10","author":"X Huang","year":"2017","unstructured":"Huang, X., Wang, S.-J., Liu, X., Zhao, G., Feng, X., Pietik\u00e4inen, M.: Discriminative spatiotemporal local binary pattern with revisited integral projection for spontaneous facial micro-expression recognition. IEEE Trans. Affect. Comput. 10(1), 32\u201347 (2017)","journal-title":"IEEE Trans. Affect. Comput."},{"key":"1164_CR25","doi-asserted-by":"publisher","first-page":"129","DOI":"10.1016\/j.image.2019.02.005","volume":"74","author":"YS Gan","year":"2019","unstructured":"Gan, Y.S., Liong, S.-T., Yau, W.-C., Huang, Y.-C., Tan, L.-K.: Off-apexnet on micro-expression recognition system. Signal Process. Image Commun. 74, 129\u2013139 (2019)","journal-title":"Signal Process. Image Commun."},{"key":"1164_CR26","doi-asserted-by":"publisher","first-page":"8590","DOI":"10.1109\/TIP.2020.3018222","volume":"29","author":"Z Xia","year":"2020","unstructured":"Xia, Z., Peng, W., Khor, H.-Q., Feng, X., Zhao, G.: Revealing the invisible with model and data shrinking for composite-database micro-expression recognition. IEEE Trans. Image Process. 29, 8590\u20138605 (2020)","journal-title":"IEEE Trans. Image Process."},{"key":"1164_CR27","doi-asserted-by":"publisher","DOI":"10.1155\/2021\/7799100","author":"Y Wang","year":"2021","unstructured":"Wang, Y., Huang, Y., Liu, C., Gu, X., Yang, D., Wang, S., Zhang, B.: Micro expression recognition via dual-stream spatiotemporal attention network. J. Healthc. Eng. (2021). https:\/\/doi.org\/10.1155\/2021\/7799100","journal-title":"J. Healthc. Eng."},{"key":"1164_CR28","doi-asserted-by":"publisher","first-page":"1345","DOI":"10.1109\/TMM.2022.3141616","volume":"25","author":"B Chen","year":"2022","unstructured":"Chen, B., Liu, K.-H., Xu, Y., Wu, Q.-Q., Yao, J.-F.: Block division convolutional network with implicit deep features augmentation for micro-expression recognition. IEEE Trans. Multimed. 25, 1345\u201358 (2022)","journal-title":"IEEE Trans. Multimed."},{"key":"1164_CR29","first-page":"1","volume":"7","author":"G Wang","year":"2023","unstructured":"Wang, G., Huang, S., Tao, Z.: Shallow multi-branch attention convolutional neural network for micro-expression recognition. Multim. Syst. 7, 1\u201314 (2023)","journal-title":"Multim. Syst."},{"key":"1164_CR30","doi-asserted-by":"crossref","unstructured":"Van\u00a0Quang, N., Chun, J., Tokuyama, T.: Capsulenet for micro-expression recognition. In: 2019 14th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2019), pp. 1\u20137 (2019). IEEE","DOI":"10.1109\/FG.2019.8756544"},{"issue":"2","key":"1164_CR31","doi-asserted-by":"publisher","first-page":"502","DOI":"10.1109\/TMM.2019.2928494","volume":"22","author":"P Rodriguez","year":"2019","unstructured":"Rodriguez, P., Velazquez, D., Cucurull, G., Gonfaus, J.M., Roca, F.X., Gonzalez, J.: Pay attention to the activations: a modular attention mechanism for fine-grained image recognition. IEEE Trans. Multim. 22(2), 502\u2013514 (2019)","journal-title":"IEEE Trans. Multim."},{"key":"1164_CR32","doi-asserted-by":"crossref","unstructured":"Woo, S., Park, J., Lee, J.-Y., Kweon, I.S.: Cbam: Convolutional block attention module. In: Proceedings of the European Conference on Computer Vision (ECCV), pp. 3\u201319 (2018)","DOI":"10.1007\/978-3-030-01234-2_1"},{"key":"1164_CR33","doi-asserted-by":"crossref","unstructured":"Hu, J., Shen, L., Sun, G.: Squeeze-and-excitation networks. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 7132\u20137141 (2018)","DOI":"10.1109\/CVPR.2018.00745"},{"key":"1164_CR34","doi-asserted-by":"publisher","first-page":"427","DOI":"10.1016\/j.neunet.2022.06.024","volume":"153","author":"S Zhao","year":"2022","unstructured":"Zhao, S., Tang, H., Liu, S., Zhang, Y., Wang, H., Xu, T., Chen, E., Guan, C.: Me-plan: a deep prototypical learning with local attention network for dynamic micro-expression recognition. Neural Netw. 153, 427\u2013443 (2022)","journal-title":"Neural Netw."},{"key":"1164_CR35","doi-asserted-by":"publisher","first-page":"110149","DOI":"10.1109\/ACCESS.2022.3214808","volume":"10","author":"G Wang","year":"2022","unstructured":"Wang, G., Huang, S., Dong, Z.: Haphazard cuboids feature extraction for micro-expression recognition. IEEE Access 10, 110149\u2013110162 (2022)","journal-title":"IEEE Access"},{"key":"1164_CR36","doi-asserted-by":"crossref","unstructured":"Su, Y., Zhang, J., Liu, J., Zhai, G.: Key facial components guided micro-expression recognition based on first & second-order motion. In: 2021 IEEE International Conference on Multimedia and Expo (ICME), pp. 1\u20136 (2021). IEEE","DOI":"10.1109\/ICME51207.2021.9428407"},{"key":"1164_CR37","doi-asserted-by":"crossref","unstructured":"Li, H., Sui, M., Zhu, Z., Zhao, F.: Mmnet: Muscle motion-guided network for micro-expression recognition. arXiv preprint arXiv:2201.05297 (2022)","DOI":"10.24963\/ijcai.2022\/150"},{"issue":"4","key":"1164_CR38","first-page":"1973","volume":"13","author":"A Vaswani","year":"2017","unstructured":"Vaswani, A., Shazeer, N., Parmar, N., Uszkoreit, J., Jones, L., Gomez, A.N., Kaiser, \u0141, Polosukhin, I.: Attention is all you need. Adv. in Neural Inform. Process. Syst. 13(4), 1973 (2017)","journal-title":"Adv. in Neural Inform. Process. Syst."},{"key":"1164_CR39","doi-asserted-by":"crossref","unstructured":"He, K., Chen, X., Xie, S., Li, Y., Doll\u00e1r, P., Girshick, R.: Masked autoencoders are scalable vision learners. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 16000\u201316009 (2022)","DOI":"10.1109\/CVPR52688.2022.01553"},{"key":"1164_CR40","doi-asserted-by":"crossref","unstructured":"Xue, F., Wang, Q., Tan, Z., Ma, Z., Guo, G.: Vision transformer with attentive pooling for robust facial expression recognition. IEEE Trans. Affect. Comput. (2022)","DOI":"10.1109\/TAFFC.2022.3226473"},{"key":"1164_CR41","doi-asserted-by":"crossref","unstructured":"Chen, C.-F.R., Fan, Q., Panda, R.: Crossvit: Cross-attention multi-scale vision transformer for image classification. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 357\u2013366 (2021)","DOI":"10.1109\/ICCV48922.2021.00041"},{"key":"1164_CR42","first-page":"1755","volume":"10","author":"DE King","year":"2009","unstructured":"King, D.E.: Dlib-ml: a machine learning toolkit. J. Mach. Learn. Res. 10, 1755\u20131758 (2009)","journal-title":"J. Mach. Learn. Res."},{"key":"1164_CR43","doi-asserted-by":"crossref","unstructured":"Zach, C., Pock, T., Bischof, H.: A duality based approach for realtime tv-l 1 optical flow. In: Joint Pattern Recognition Symposium, pp. 214\u2013223 (2007). Springer","DOI":"10.1007\/978-3-540-74936-3_22"},{"key":"1164_CR44","doi-asserted-by":"crossref","unstructured":"Szegedy, C., Liu, W., Jia, Y., Sermanet, P., Reed, S., Anguelov, D., Erhan, D., Vanhoucke, V., Rabinovich, A.: Going deeper with convolutions. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 1\u20139 (2015)","DOI":"10.1109\/CVPR.2015.7298594"},{"key":"1164_CR45","doi-asserted-by":"crossref","unstructured":"Yan, W.-J., Wang, S.-J., Chen, Y.-H., Zhao, G., Fu, X.: Quantifying micro-expressions with constraint local model and local binary pattern. In: European Conference on Computer Vision, pp. 296\u2013305 (2014). Springer","DOI":"10.1007\/978-3-319-16178-5_20"},{"issue":"3","key":"1164_CR46","doi-asserted-by":"publisher","first-page":"626","DOI":"10.1109\/TMM.2019.2931351","volume":"22","author":"Z Xia","year":"2019","unstructured":"Xia, Z., Hong, X., Gao, X., Feng, X., Zhao, G.: Spatiotemporal recurrent convolutional networks for recognizing spontaneous micro-expressions. IEEE Trans. Multim. 22(3), 626\u2013640 (2019)","journal-title":"IEEE Trans. Multim."},{"key":"1164_CR47","doi-asserted-by":"publisher","DOI":"10.1109\/TAFFC.2020.3023821","author":"J Li","year":"2020","unstructured":"Li, J., Soladie, C., Seguier, R.: Local temporal pattern and data augmentation for micro-expression spotting. IEEE Trans. Affect. Comput. (2020). https:\/\/doi.org\/10.1109\/TAFFC.2020.3023821","journal-title":"IEEE Trans. Affect. Comput."},{"key":"1164_CR48","doi-asserted-by":"crossref","unstructured":"Huang, L., Wang, W., Chen, J., Wei, X.-Y.: Attention on attention for image captioning. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 4634\u20134643 (2019)","DOI":"10.1109\/ICCV.2019.00473"},{"key":"1164_CR49","doi-asserted-by":"crossref","unstructured":"Melacci, S., Sarti, L., Maggini, M., Bianchini, M.: A neural network approach to similarity learning. In: IAPR Workshop on Artificial Neural Networks in Pattern Recognition, pp. 133\u2013136 (2008). Springer","DOI":"10.1007\/978-3-540-69939-2_13"},{"key":"1164_CR50","doi-asserted-by":"crossref","unstructured":"Peng, M., Wang, C., Bi, T., Shi, Y., Zhou, X., Chen, T.: A novel apex-time network for cross-dataset micro-expression recognition. In: 2019 8th International Conference on Affective Computing and Intelligent Interaction (ACII), pp. 1\u20136 (2019). IEEE","DOI":"10.1109\/ACII.2019.8925525"},{"key":"1164_CR51","doi-asserted-by":"publisher","first-page":"13","DOI":"10.1016\/j.neucom.2020.10.082","volume":"427","author":"X Nie","year":"2021","unstructured":"Nie, X., Takalkar, M.A., Duan, M., Zhang, H., Xu, M.: Geme: Dual-stream multi-task gender-based micro-expression recognition. Neurocomputing 427, 13\u201328 (2021)","journal-title":"Neurocomputing"},{"issue":"3","key":"1164_CR52","first-page":"56","volume":"5","author":"P Eckman","year":"1978","unstructured":"Eckman, P., Friesen, W.: Facial action coding system (facs): a technique for the measurement of facial action. Environ. Psychol. Nonverbal Bahav. 5(3), 56\u201375 (1978)","journal-title":"Environ. Psychol. Nonverbal Bahav."}],"container-title":["Multimedia Systems"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s00530-023-01164-0.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s00530-023-01164-0\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s00530-023-01164-0.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,11,16]],"date-time":"2023-11-16T11:13:51Z","timestamp":1700133231000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s00530-023-01164-0"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,8,31]]},"references-count":52,"journal-issue":{"issue":"6","published-print":{"date-parts":[[2023,12]]}},"alternative-id":["1164"],"URL":"https:\/\/doi.org\/10.1007\/s00530-023-01164-0","relation":{},"ISSN":["0942-4962","1432-1882"],"issn-type":[{"value":"0942-4962","type":"print"},{"value":"1432-1882","type":"electronic"}],"subject":[],"published":{"date-parts":[[2023,8,31]]},"assertion":[{"value":"10 April 2023","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"12 August 2023","order":2,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"31 August 2023","order":3,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors have no competing interests to declare that are relevant to the content of this article.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}}]}}