{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,15]],"date-time":"2026-01-15T13:29:26Z","timestamp":1768483766287,"version":"3.49.0"},"reference-count":51,"publisher":"Springer Science and Business Media LLC","issue":"1","license":[{"start":{"date-parts":[[2024,1,29]],"date-time":"2024-01-29T00:00:00Z","timestamp":1706486400000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,1,29]],"date-time":"2024-01-29T00:00:00Z","timestamp":1706486400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["61972187"],"award-info":[{"award-number":["61972187"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"name":"Natural Science Foundation of Fujian Province, China","award":["2020J02024"],"award-info":[{"award-number":["2020J02024"]}]},{"name":"Project of the 14th Five Year Plan of Education Studies, Fujian Province","award":["FJJKBK21-197"],"award-info":[{"award-number":["FJJKBK21-197"]}]},{"name":"Key Project of Educational Reform in Minjiang University","award":["MJUJG2021A008"],"award-info":[{"award-number":["MJUJG2021A008"]}]},{"name":"Project of The Development of Core Values throughout the Curriculum in Minjiang University","award":["MJU2021KC512"],"award-info":[{"award-number":["MJU2021KC512"]}]},{"name":"Humanities and Social Science Fund of the Ministry of Education","award":["21YJC740082"],"award-info":[{"award-number":["21YJC740082"]}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Multimedia Systems"],"published-print":{"date-parts":[[2024,2]]},"DOI":"10.1007\/s00530-023-01230-7","type":"journal-article","created":{"date-parts":[[2024,1,29]],"date-time":"2024-01-29T01:03:38Z","timestamp":1706490218000},"update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":10,"title":["BENet: bi-directional enhanced network for image captioning"],"prefix":"10.1007","volume":"30","author":[{"given":"Peixin","family":"Yan","sequence":"first","affiliation":[]},{"given":"Zuoyong","family":"Li","sequence":"additional","affiliation":[]},{"given":"Rong","family":"Hu","sequence":"additional","affiliation":[]},{"given":"Xinrong","family":"Cao","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,1,29]]},"reference":[{"issue":"3","key":"1230_CR1","doi-asserted-by":"crossref","first-page":"1043","DOI":"10.1007\/s00530-022-01036-z","volume":"29","author":"J Li","year":"2023","unstructured":"Li, J., Wang, Y., Zhao, D.: Layer-wise enhanced transformer with multi-modal fusion for image caption. Multimedia Syst. 29(3), 1043\u20131056 (2023)","journal-title":"Multimedia Syst."},{"issue":"3","key":"1230_CR2","doi-asserted-by":"crossref","first-page":"1665","DOI":"10.1007\/s00530-022-00937-3","volume":"29","author":"T Carmo Nogueira","year":"2023","unstructured":"Carmo Nogueira, T., Vinhal, C.D.N., Cruz J\u00fanior, G., Ullmann, M.R.D., Marques, T.C.: A reference-based model using deep learning for image captioning. Multimedia Syst. 29(3), 1665\u20131681 (2023)","journal-title":"Multimedia Syst."},{"issue":"3","key":"1230_CR3","doi-asserted-by":"crossref","first-page":"2706","DOI":"10.1007\/s10489-022-03624-y","volume":"53","author":"J Wei","year":"2023","unstructured":"Wei, J., Li, Z., Zhu, J., Ma, H.: Enhance understanding and reasoning ability for image captioning. Appl. Intell. 53(3), 2706\u20132722 (2023)","journal-title":"Appl. Intell."},{"issue":"5","key":"1230_CR4","first-page":"5910","volume":"53","author":"Z Lian","year":"2023","unstructured":"Lian, Z., Zhang, Y., Li, H., Wang, R., Hu, X.: Cross modification attention-based deliberation model for image captioning. Appl. Intell. 53(5), 5910\u20135933 (2023)","journal-title":"Appl. Intell."},{"key":"1230_CR5","first-page":"15465","volume-title":"RSTNet: captioning with adaptive attention on visual and non-visual words","author":"X Zhang","year":"2021","unstructured":"Zhang, X., Sun, X., Luo, Y., Ji, J., Zhou, Y., Wu, Y., Huang, F., Ji, R.: RSTNet: captioning with adaptive attention on visual and non-visual words. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 15465\u201315474. IEEE (2021)"},{"key":"1230_CR6","volume-title":"Advances in Neural Information Processing Systems","author":"I Sutskever","year":"2014","unstructured":"Sutskever, I., Vinyals, O., Le, Q.V.: Sequence to sequence learning with neural networks. In: Advances in Neural Information Processing Systems, vol. 27. Curran Associates Inc. (2014)"},{"key":"1230_CR7","unstructured":"Mao, J., Xu, W., Yang, Y., Wang, J., Yuille, A.L.: Explain images with multimodal recurrent neural networks (2014). arXiv preprint. arXiv:1410.1090"},{"key":"1230_CR8","first-page":"2048","volume-title":"International Conference on Machine Learning","author":"K Xu","year":"2015","unstructured":"Xu, K., Ba, J., Kiros, R., Cho, K., Courville, A., Salakhudinov, R., Zemel, R., Bengio, Y.: Show, attend and tell: neural image caption generation with visual attention. In: International Conference on Machine Learning, pp. 2048\u20132057. PMLR (2015)"},{"key":"1230_CR9","first-page":"4651","volume-title":"Image captioning with semantic attention","author":"Q You","year":"2016","unstructured":"You, Q., Jin, H., Wang, Z., Fang, C., Luo, J.: Image captioning with semantic attention. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 4651\u20134659. IEEE (2016)"},{"key":"1230_CR10","first-page":"375","volume-title":"Knowing when to look: Adaptive attention via a visual sentinel for image captioning","author":"J Lu","year":"2017","unstructured":"Lu, J., Xiong, C., Parikh, D., Socher, R.: Knowing when to look: Adaptive attention via a visual sentinel for image captioning. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 375\u2013383. IEEE (2017)"},{"issue":"2","key":"1230_CR11","doi-asserted-by":"crossref","first-page":"1","DOI":"10.1145\/3177745","volume":"14","author":"M Cornia","year":"2018","unstructured":"Cornia, M., Baraldi, L., Serra, G., Cucchiara, R.: Paying more attention to saliency: image captioning with saliency and context attention. ACM Trans. Multimedia Comput. Commun. Appl. (TOMM) 14(2), 1\u201321 (2018)","journal-title":"ACM Trans. Multimedia Comput. Commun. Appl. (TOMM)"},{"key":"1230_CR12","first-page":"8307","volume-title":"Show, control and tell: a framework for generating controllable and grounded captions","author":"M Cornia","year":"2019","unstructured":"Cornia, M., Baraldi, L., Cucchiara, R.: Show, control and tell: a framework for generating controllable and grounded captions. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 8307\u20138316. IEEE (2019)"},{"key":"1230_CR13","first-page":"8367","volume-title":"Look back and predict forward in image captioning","author":"Y Qin","year":"2019","unstructured":"Qin, Y., Du, J., Zhang, Y., Lu, H.: Look back and predict forward in image captioning. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 8367\u20138375. IEEE (2019)"},{"issue":"1\u20133","key":"1230_CR14","doi-asserted-by":"crossref","first-page":"17","DOI":"10.1080\/135062800394667","volume":"7","author":"RA Rensink","year":"2000","unstructured":"Rensink, R.A.: The dynamic representation of scenes. Vis. Cogn. 7(1\u20133), 17\u201342 (2000)","journal-title":"Vis. Cogn."},{"key":"1230_CR15","volume-title":"Advances in Neural Information Processing Systems","author":"A Vaswani","year":"2017","unstructured":"Vaswani, A., Shazeer, N., Parmar, N., Uszkoreit, J., Jones, L., Gomez, A.N., Kaiser, \u0141., Polosukhin, I.: Attention is all you need. In: Advances in Neural Information Processing Systems, vol. 30. Curran Associates Inc. (2017)"},{"key":"1230_CR16","first-page":"4634","volume-title":"X.-Y.: Attention on attention for image captioning","author":"L Huang","year":"2019","unstructured":"Huang, L., Wang, W., Chen, J., Wei,  X-Y.: Attention on attention for image captioning. In: Proceedings of the IEEE International Conference on Computer Vision, pp. 4634\u20134643. IEEE (2019)"},{"key":"1230_CR17","first-page":"10578","volume-title":"Meshed-memory transformer for image captioning","author":"M Cornia","year":"2020","unstructured":"Cornia, M., Stefanini, M., Baraldi, L., Cucchiara, R.: Meshed-memory transformer for image captioning. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 10578\u201310587. IEEE (2020)"},{"key":"1230_CR18","first-page":"10971","volume-title":"X-linear attention networks for image captioning","author":"Y Pan","year":"2020","unstructured":"Pan, Y., Yao, T., Li, Y., Mei, T.: X-linear attention networks for image captioning. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 10971\u201310980. IEEE (2020)"},{"key":"1230_CR19","first-page":"10267","volume-title":"In defense of grid features for visual question answering","author":"H Jiang","year":"2020","unstructured":"Jiang, H., Misra, I., Rohrbach, M., Learned-Miller, E., Chen, X.: In defense of grid features for visual question answering. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 10267\u201310276. IEEE (2020)"},{"key":"1230_CR20","first-page":"1317","volume-title":"Memory-augmented image captioning","author":"Z Fei","year":"2021","unstructured":"Fei, Z.: Memory-augmented image captioning.  In: Proceedings of the AAAI Conference on Artificial Intelligence, vol. 35, pp. 1317\u20131324. AAAI Press (2021)"},{"key":"1230_CR21","first-page":"4125","volume-title":"Unsupervised image captioning","author":"Y Feng","year":"2019","unstructured":"Feng, Y., Ma, L., Liu, W., Luo, J.: Unsupervised image captioning. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 4125\u20134134. IEEE (2019)"},{"key":"1230_CR22","first-page":"15","volume-title":"Every picture tells a story: generating sentences from images","author":"A Farhadi","year":"2010","unstructured":"Farhadi, A., Hejrati, M., Sadeghi, M.A., Young, P., Rashtchian, C., Hockenmaier, J., Forsyth, D.: Every picture tells a story: generating sentences from images. In: European Conference on Computer Vision, pp. 15\u201329. Springer (2010)"},{"key":"1230_CR23","first-page":"529","volume-title":"Improving image-sentence embeddings using large weakly annotated photo collections","author":"Y Gong","year":"2014","unstructured":"Gong, Y., Wang, L., Hodosh, M., Hockenmaier, J., Lazebnik, S.: Improving image-sentence embeddings using large weakly annotated photo collections. In: European Conference on Computer Vision, pp. 529\u2013545. Springer (2014)"},{"key":"1230_CR24","first-page":"3156","volume-title":"Show and tell: a neural image caption generator","author":"O Vinyals","year":"2015","unstructured":"Vinyals, O., Toshev, A., Bengio, S., Erhan, D.: Show and tell: a neural image caption generator.  In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 3156\u20133164. IEEE (2015)"},{"issue":"3","key":"1230_CR25","doi-asserted-by":"crossref","first-page":"201","DOI":"10.1038\/nrn755","volume":"3","author":"M Corbetta","year":"2002","unstructured":"Corbetta, M., Shulman, G.L.: Control of goal-directed and stimulus-driven attention in the brain. Nat. Rev. Neurosci. 3(3), 201\u2013215 (2002)","journal-title":"Nat. Rev. Neurosci."},{"issue":"5820","key":"1230_CR26","doi-asserted-by":"crossref","first-page":"1860","DOI":"10.1126\/science.1138071","volume":"315","author":"TJ Buschman","year":"2007","unstructured":"Buschman, T.J., Miller, E.K.: Top-down versus bottom-up control of attention in the prefrontal and posterior parietal cortices. Science 315(5820), 1860\u20131862 (2007)","journal-title":"Science"},{"key":"1230_CR27","first-page":"6077","volume-title":"Bottom-up and top-down attention for image captioning and visual question answering","author":"P Anderson","year":"2018","unstructured":"Anderson, P., He, X., Buehler, C., Teney, D., Johnson, M., Gould, S., Zhang, L.: Bottom-up and top-down attention for image captioning and visual question answering. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 6077\u20136086. IEEE (2018)"},{"key":"1230_CR28","first-page":"684","volume-title":"Exploring visual relationship for image captioning","author":"T Yao","year":"2018","unstructured":"Yao, T., Pan, Y., Li, Y., Mei, T.: Exploring visual relationship for image captioning. In: European Conference on Computer Vision, pp. 684\u2013699. Springer (2018)"},{"key":"1230_CR29","first-page":"10685","volume-title":"Auto-encoding scene graphs for image captioning","author":"X Yang","year":"2019","unstructured":"Yang, X., Tang, K., Zhang, H., Cai, J.: Auto-encoding scene graphs for image captioning, In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 10685\u201310694. IEEE (2019)"},{"key":"1230_CR30","volume-title":"A.v.d.: Memorizing normality to detect anomaly: Memory-augmented deep autoencoder for unsupervised anomaly detection","author":"D Gong","year":"2019","unstructured":"Gong, D., Liu, L., Le, V., Saha, B., Mansour, M.R., Venkatesh, S., Hengel, A.v.d.: Memorizing normality to detect anomaly: Memory-augmented deep autoencoder for unsupervised anomaly detection. In: IEEE International Conference on Computer Vision (ICCV), IEEE (2019)"},{"key":"1230_CR31","first-page":"7008","volume-title":"Self-critical sequence training for image captioning","author":"SJ Rennie","year":"2017","unstructured":"Rennie, S.J., Marcheret, E., Mroueh, Y., Ross, J., Goel, V.: Self-critical sequence training for image captioning. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 7008\u20137024. IEEE (2017)"},{"key":"1230_CR32","first-page":"740","volume-title":"Microsoft coco: common objects in context","author":"T-Y Lin","year":"2014","unstructured":"Lin, T.-Y., Maire, M., Belongie, S., Hays, J., Perona, P., Ramanan, D., Doll\u00e1r, P., Zitnick, C.L.: Microsoft coco: common objects in context. In: European Conference on Computer Vision, pp. 740\u2013755. Springer (2014)"},{"key":"1230_CR33","first-page":"3128","volume-title":"Deep visual-semantic alignments for generating image descriptions","author":"A Karpathy","year":"2015","unstructured":"Karpathy, A., Fei-Fei, L.: Deep visual-semantic alignments for generating image descriptions. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 3128\u20133137. IEEE (2015)"},{"key":"1230_CR34","first-page":"311","volume-title":"Bleu: a method for automatic evaluation of machine translation","author":"K Papineni","year":"2002","unstructured":"Papineni, K., Roukos, S., Ward, T., Zhu, W.-J.: Bleu: a method for automatic evaluation of machine translation. In: Proceedings of the 40th Annual Meeting of the Association for Computational Linguistics, pp. 311\u2013318. Association for Computational Linguistics (2002)"},{"key":"1230_CR35","first-page":"65","volume-title":"Meteor: an automatic metric for MT evaluation with improved correlation with human judgments","author":"S Banerjee","year":"2005","unstructured":"Banerjee, S., Lavie, A.: Meteor: an automatic metric for MT evaluation with improved correlation with human judgments. In: Proceedings of the ACL Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and\/or Summarization, pp. 65\u201372. Association for Computational Linguistics (2005)"},{"key":"1230_CR36","first-page":"74","volume-title":"Rouge: a package for automatic evaluation of summaries","author":"C-Y Lin","year":"2004","unstructured":"Lin, C.-Y.: Rouge: a package for automatic evaluation of summaries. In: Text Summarization Branches Out, pp. 74\u201381. Association for Computational Linguistics (2004)"},{"key":"1230_CR37","first-page":"4566","volume-title":"Cider: consensus-based image description evaluation","author":"R Vedantam","year":"2015","unstructured":"Vedantam, R., Lawrence Zitnick, C., Parikh, D.: Cider: consensus-based image description evaluation. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 4566\u20134575. IEEE (2015)"},{"key":"1230_CR38","first-page":"382","volume-title":"S.: Spice: semantic propositional image caption evaluation","author":"P Anderson","year":"2016","unstructured":"Anderson, P., Fernando, B., Johnson, M., Gould, S.: Spice: semantic propositional image caption evaluation. In: European Conference on Computer Vision, pp. 382\u2013398. Springer (2016)"},{"key":"1230_CR39","first-page":"1","volume":"5","author":"P Zeng","year":"2022","unstructured":"Zeng, P., Zhang, H., Song, J., Gao, L.: S2 transformer for image captioning. In: Proceedings of the International Joint Conferences on Artificial Intelligence, 5, 1\u20137 (2022)","journal-title":"S2 transformer for image captioning."},{"key":"1230_CR40","volume-title":"Faster R-CNN: towards real-time object detection with region proposal networks","author":"S Ren","year":"2015","unstructured":"Ren, S., He, K., Girshick, R., Sun, J.: Faster R-CNN: towards real-time object detection with region proposal networks. In: Advances in Neural Information Processing Systems, vol. 28. Curran Associates Inc. (2015)"},{"key":"1230_CR41","first-page":"499","volume-title":"Recurrent fusion network for image captioning","author":"W Jiang","year":"2018","unstructured":"Jiang, W., Ma, L., Jiang, Y.-G., Liu, W., Zhang, T.: Recurrent fusion network for image captioning. In: European Conference on Computer Vision, pp. 499\u2013515. Springer (2018)"},{"key":"1230_CR42","first-page":"8928","volume-title":"Entangled transformer for image captioning","author":"G Li","year":"2019","unstructured":"Li, G., Zhu, L., Liu, P., Yang, Y.: Entangled transformer for image captioning.  In: Proceedings of the IEEE International Conference on Computer Vision, pp. 8928\u20138937. IEEE (2019)"},{"key":"1230_CR43","doi-asserted-by":"crossref","unstructured":"Zhao, S., Li, L., Peng, H.: Incorporating retrieval-based method for feature enhanced image captioning. Applied Intelligence, vol. 53, pp. 9731\u20139743. Springer (2023)","DOI":"10.1007\/s10489-022-04010-4"},{"key":"1230_CR44","unstructured":"Shen, S., Li, L.H., Tan, H., Bansal, M., Rohrbach, A., Chang, K.-W., Yao, Z., Keutzer, K.: How much can clip benefit vision and-language tasks? 3 (2021). arXiv preprint. arXiv:2107.06383"},{"key":"1230_CR45","first-page":"17969","volume-title":"Beyond a pre-trained object detector: Cross-modal textual and visual context for image captioning","author":"C-W Kuo","year":"2022","unstructured":"Kuo, C.-W., Kira, Z.: Beyond a pre-trained object detector: Cross-modal textual and visual context for image captioning. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 17969\u201317979. IEEE (2022)"},{"key":"1230_CR46","first-page":"13041","volume":"34","author":"L Zhou","year":"2020","unstructured":"Zhou, L., Palangi, H., Zhang, L., Hu, H., Corso, J., Gao, J.: Unified vision-language pre-training for image captioning and VQA. In: Proceedings of the AAAI Conference on Artificial Intelligence. 34, 13041\u201313049. AAAI Press (2020)","journal-title":"Unified vision-language pre-training for image captioning and VQA."},{"key":"1230_CR47","first-page":"121","volume-title":"Oscar: object-semantics aligned pre-training for vision-language tasks","author":"X Li","year":"2020","unstructured":"Li, X., Yin, X., Li, C., Zhang, P., Hu, X., Zhang, L., Wang, L., Hu, H., Dong, L., Wei, F., et al.: Oscar: object-semantics aligned pre-training for vision-language tasks. In: Computer Vision\u2014ECCV 2020: 16th European Conference, Glasgow, UK, August 23\u201328, 2020, Proceedings, Part XXX 16, pp. 121\u2013137. Springer, Berlin (2020)"},{"key":"1230_CR48","first-page":"12888","volume-title":"Blip: bootstrapping language-image pre-training for unified vision-language understanding and generation","author":"J Li","year":"2022","unstructured":"Li, J., Li, D., Xiong, C., Hoi, S.: Blip: bootstrapping language-image pre-training for unified vision-language understanding and generation. In: International Conference on Machine Learning, pp. 12888\u201312900, PMLR (2022)"},{"key":"1230_CR49","first-page":"23318","volume-title":"OFA: unifying architectures, tasks, and modalities through a simple sequence-to-sequence learning framework","author":"P Wang","year":"2022","unstructured":"Wang, P., Yang, A., Men, R., Lin, J., Bai, S., Li, Z., Ma, J., Zhou, C., Zhou, J., Yang, H.: OFA: unifying architectures, tasks, and modalities through a simple sequence-to-sequence learning framework. In: International Conference on Machine Learning, pp. 23318\u201323340, PMLR (2022)"},{"key":"1230_CR50","unstructured":"Wang, Z., Yu, J., Yu, A.W., Dai, Z., Tsvetkov, Y., Cao, Y.: SimVLM: simple visual language model pretraining with weak supervision (2021). arXiv preprint. arXiv:2108.10904"},{"key":"1230_CR51","unstructured":"Radford, A., Kim, J.W., Hallacy, C., Ramesh, A., Goh, G., Agarwal, S., Sastry, G., Askell, A., Mishkin, P., Clark, J., et al.: Learning transferable visual models from natural language supervision. In: International Conference on Machine Learning, pp. 8748\u20138763. PMLR (2021)"}],"container-title":["Multimedia Systems"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s00530-023-01230-7.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s00530-023-01230-7\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s00530-023-01230-7.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,2,14]],"date-time":"2024-02-14T06:23:35Z","timestamp":1707891815000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s00530-023-01230-7"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,1,29]]},"references-count":51,"journal-issue":{"issue":"1","published-print":{"date-parts":[[2024,2]]}},"alternative-id":["1230"],"URL":"https:\/\/doi.org\/10.1007\/s00530-023-01230-7","relation":{},"ISSN":["0942-4962","1432-1882"],"issn-type":[{"value":"0942-4962","type":"print"},{"value":"1432-1882","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,1,29]]},"assertion":[{"value":"1 August 2023","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"8 December 2023","order":2,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"29 January 2024","order":3,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors have no competing interests to declare that are relevant to the content of this article.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}},{"value":"Written informed consent for publication of this paper was obtained from the Fujian University of Technology and all authors.","order":3,"name":"Ethics","group":{"name":"EthicsHeading","label":"Ethics approval"}}],"article-number":"48"}}