{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,12,25]],"date-time":"2025-12-25T07:25:36Z","timestamp":1766647536599,"version":"3.37.3"},"reference-count":51,"publisher":"Springer Science and Business Media LLC","issue":"6","license":[{"start":{"date-parts":[[2024,11,16]],"date-time":"2024-11-16T00:00:00Z","timestamp":1731715200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,11,16]],"date-time":"2024-11-16T00:00:00Z","timestamp":1731715200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"funder":[{"name":"Humanities and Social Sciences Fund of the Ministry of Education","award":["21YJC740082"],"award-info":[{"award-number":["21YJC740082"]}]},{"name":"Project of the 14th Five Year Plan of Education Studies, Fujian Province","award":["FJJKBK21-197"],"award-info":[{"award-number":["FJJKBK21-197"]}]},{"name":"Open Project of Fujian Provincial Key Laboratory of Cognitive Computing and Intelligent Information Processing at Wuyi University","award":["KLCCIIP2021203"],"award-info":[{"award-number":["KLCCIIP2021203"]}]},{"name":"Fujian Provincial Central Leading Local Science and Technology Development Fund Project","award":["2023L3029"],"award-info":[{"award-number":["2023L3029"]}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"crossref","award":["62471207"],"award-info":[{"award-number":["62471207"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"crossref"}]},{"name":"High-level Talent Research Start-up Fund Project of Fujian University of Traditional Chinese Medicine","award":["No. X2020005- Talent"],"award-info":[{"award-number":["No. X2020005- Talent"]}]},{"name":"Open Project of Fujian Key Laboratory of Medical Big Data Engineering","award":["KLKF202301"],"award-info":[{"award-number":["KLKF202301"]}]},{"DOI":"10.13039\/501100003392","name":"Natural Science Foundation of Fujian Province","doi-asserted-by":"crossref","award":["2024J02029,2023R1050,2023J011390"],"award-info":[{"award-number":["2024J02029,2023R1050,2023J011390"]}],"id":[{"id":"10.13039\/501100003392","id-type":"DOI","asserted-by":"crossref"}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Multimedia Systems"],"published-print":{"date-parts":[[2024,12]]},"DOI":"10.1007\/s00530-024-01559-7","type":"journal-article","created":{"date-parts":[[2024,11,16]],"date-time":"2024-11-16T06:58:10Z","timestamp":1731740290000},"update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":2,"title":["Bidirectional interactive alignment network for image captioning"],"prefix":"10.1007","volume":"30","author":[{"given":"Xinrong","family":"Cao","sequence":"first","affiliation":[]},{"given":"Peixin","family":"Yan","sequence":"additional","affiliation":[]},{"given":"Rong","family":"Hu","sequence":"additional","affiliation":[]},{"given":"Zuoyong","family":"Li","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,11,16]]},"reference":[{"unstructured":"Sutskever, I., Vinyals, O., Le, Q.V.: Sequence to sequence learning with neural networks. Adv Neural Inf Process Syst (2014)","key":"1559_CR1"},{"doi-asserted-by":"crossref","unstructured":"Vinyals, O., Toshev, A., Bengio, S., Erhan, D.: Show and tell: A neural image caption generator. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 3156\u20133164 (2015)","key":"1559_CR2","DOI":"10.1109\/CVPR.2015.7298935"},{"unstructured":"Vaswani, A., Shazeer, N., Parmar, N., Uszkoreit, J., Jones, L., Gomez, A.N., Kaiser, \u0141., Polosukhin, I.: Attention is all you need. Adv Neural Inf Process Syst (2017)","key":"1559_CR3"},{"doi-asserted-by":"crossref","unstructured":"Cornia, M., Stefanini, M., Baraldi, L., Cucchiara, R.: Meshed-memory transformer for image captioning. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 10578\u201310587 (2020)","key":"1559_CR4","DOI":"10.1109\/CVPR42600.2020.01059"},{"doi-asserted-by":"crossref","unstructured":"Pan, Y., Yao, T., Li, Y., Mei, T.: X-linear attention networks for image captioning. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 10971\u201310980 (2020)","key":"1559_CR5","DOI":"10.1109\/CVPR42600.2020.01098"},{"doi-asserted-by":"crossref","unstructured":"Zhang, X., Sun, X., Luo, Y., Ji, J., Zhou, Y., Wu, Y., Huang, F., Ji, R.: Rstnet: Captioning with adaptive attention on visual and non-visual words. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 15465\u201315474 (2021)","key":"1559_CR6","DOI":"10.1109\/CVPR46437.2021.01521"},{"unstructured":"Ren, S., He, K., Girshick, R., Sun, J.: Faster r-cnn: Towards real-time object detection with region proposal networks. Advances in neural information processing systems (2015)","key":"1559_CR7"},{"doi-asserted-by":"crossref","unstructured":"Jiang, H., Misra, I., Rohrbach, M., Learned-Miller, E., Chen, X.: In defense of grid features for visual question answering. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 10267\u201310276 (2020)","key":"1559_CR8","DOI":"10.1109\/CVPR42600.2020.01028"},{"doi-asserted-by":"crossref","unstructured":"Luo, Y., Ji, J., Sun, X., Cao, L., Wu, Y., Huang, F., Lin, C.-W., Ji, R.: Dual-level collaborative transformer for image captioning. In: Proceedings of the AAAI Conference on Artificial Intelligence, pp. 2286\u20132293 (2021)","key":"1559_CR9","DOI":"10.1609\/aaai.v35i3.16328"},{"doi-asserted-by":"crossref","unstructured":"Nguyen, V.-Q., Suganuma, M., Okatani, T.: Grit: Faster and better image captioning transformer using dual visual features. In: European Conference on Computer Vision, pp. 167\u2013184 (2022)","key":"1559_CR10","DOI":"10.1007\/978-3-031-20059-5_10"},{"doi-asserted-by":"crossref","unstructured":"Wu, M., Zhang, X., Sun, X., Zhou, Y., Chen, C., Gu, J., Sun, X., Ji, R.: Difnet: Boosting visual information flow for image captioning. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 18020\u201318029 (2022)","key":"1559_CR11","DOI":"10.1109\/CVPR52688.2022.01749"},{"doi-asserted-by":"crossref","unstructured":"Anderson, P., He, X., Buehler, C., Teney, D., Johnson, M., Gould, S., Zhang, L.: Bottom-up and top-down attention for image captioning and visual question answering. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 6077\u20136086 (2018)","key":"1559_CR12","DOI":"10.1109\/CVPR.2018.00636"},{"doi-asserted-by":"crossref","unstructured":"Ji, J., Luo, Y., Sun, X., Chen, F., Luo, G., Wu, Y., Gao, Y., Ji, R.: Improving image captioning by leveraging intra-and inter-layer global representation in transformer network. In: Proceedings of the AAAI Conference on Artificial Intelligence, vol. 35, pp. 1655\u20131663 (2021)","key":"1559_CR13","DOI":"10.1609\/aaai.v35i2.16258"},{"key":"1559_CR14","doi-asserted-by":"publisher","first-page":"48","DOI":"10.1016\/j.neucom.2021.10.014","volume":"468","author":"Y Wang","year":"2022","unstructured":"Wang, Y., Xu, J., Sun, Y.: A visual persistence model for image captioning. Neurocomputing 468, 48\u201359 (2022)","journal-title":"Neurocomputing"},{"issue":"8","key":"1559_CR15","doi-asserted-by":"publisher","first-page":"9731","DOI":"10.1007\/s10489-022-04010-4","volume":"53","author":"S Zhao","year":"2023","unstructured":"Zhao, S., Li, L., Peng, H.: Incorporating retrieval-based method for feature enhanced image captioning. Appl. Intell. 53(8), 9731\u20139743 (2023)","journal-title":"Appl. Intell."},{"key":"1559_CR16","doi-asserted-by":"publisher","first-page":"69","DOI":"10.1016\/j.neucom.2022.11.045","volume":"519","author":"N Hu","year":"2023","unstructured":"Hu, N., Fan, C., Ming, Y., Feng, F.: Maenet: a novel multi-head association attention enhancement network for completing intra-modal interaction in image captioning. Neurocomputing 519, 69\u201381 (2023)","journal-title":"Neurocomputing"},{"doi-asserted-by":"crossref","unstructured":"Zhang, P., Li, X., Hu, X., Yang, J., Zhang, L., Wang, L., Choi, Y., Gao, J.: Vinvl: Revisiting visual representations in vision-language models. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 5579\u20135588 (2021)","key":"1559_CR17","DOI":"10.1109\/CVPR46437.2021.00553"},{"doi-asserted-by":"crossref","unstructured":"Rennie, S.J., Marcheret, E., Mroueh, Y., Ross, J., Goel, V.: Self-critical sequence training for image captioning. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 7008\u20137024 (2017)","key":"1559_CR18","DOI":"10.1109\/CVPR.2017.131"},{"doi-asserted-by":"crossref","unstructured":"Hochreiter, S., Schmidhuber, J.: Long short-term memory. Neural Comput, 1735\u20131780 (1997)","key":"1559_CR19","DOI":"10.1162\/neco.1997.9.8.1735"},{"unstructured":"Xu, K., Ba, J., Kiros, R., Cho, K., Courville, A., Salakhudinov, R., Zemel, R., Bengio, Y.: Show, attend and tell: neural image caption generation with visual attention. In: International conference on machine learning, pp. 2048\u20132057 (2015)","key":"1559_CR20"},{"doi-asserted-by":"crossref","unstructured":"Lu, J., Xiong, C., Parikh, D., Socher, R.: Knowing when to look: Adaptive attention via a visual sentinel for image captioning. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 375\u2013383 (2017)","key":"1559_CR21","DOI":"10.1109\/CVPR.2017.345"},{"doi-asserted-by":"crossref","unstructured":"Corbetta, M., Shulman, G.L.: Control of goal-directed and stimulus-driven attention in the brain. Nat Rev Neurosci, 201\u2013215 (2002)","key":"1559_CR22","DOI":"10.1038\/nrn755"},{"doi-asserted-by":"crossref","unstructured":"Buschman, T.J., Miller, E.K.: Top-down versus bottom-up control of attention in the prefrontal and posterior parietal cortices. Science, 1860\u20131862 (2007)","key":"1559_CR23","DOI":"10.1126\/science.1138071"},{"doi-asserted-by":"crossref","unstructured":"Huang, L., Wang, W., Chen, J., Wei, X.-Y.: Attention on attention for image captioning. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 4634\u20134643 (2019)","key":"1559_CR24","DOI":"10.1109\/ICCV.2019.00473"},{"doi-asserted-by":"crossref","unstructured":"Fei, Z.: Memory-augmented image captioning. In: Proceedings of the AAAI Conference on Artificial Intelligence, pp. 1317\u20131324 (2021)","key":"1559_CR25","DOI":"10.1609\/aaai.v35i2.16220"},{"doi-asserted-by":"crossref","unstructured":"Zeng, P., Zhang, H., Song, J., Gao, L.: S2 transformer for image captioning. In: Proceedings of the International Joint Conferences on Artificial Intelligence (2022)","key":"1559_CR26","DOI":"10.24963\/ijcai.2022\/224"},{"key":"1559_CR27","first-page":"1","volume":"19","author":"H Zhou","year":"2022","unstructured":"Zhou, H., Tian, C., Zhang, Z., Huo, Q., Xie, Y., Li, Z.: Multispectral fusion transformer network for rgb-thermal urban scene semantic segmentation. IEEE Geosci. Remote Sens. Lett. 19, 1\u20135 (2022)","journal-title":"IEEE Geosci. Remote Sens. Lett."},{"key":"1559_CR28","doi-asserted-by":"publisher","DOI":"10.1016\/j.patcog.2023.110043","volume":"146","author":"H Zhou","year":"2024","unstructured":"Zhou, H., Tian, C., Zhang, Z., Li, C., Xie, Y., Li, Z.: Frequency-aware feature aggregation network with dual-task consistency for rgb-t salient object detection. Pattern Recogn. 146, 110043 (2024)","journal-title":"Pattern Recogn."},{"doi-asserted-by":"crossref","unstructured":"Kuo, C.-W., Kira, Z.: Beyond a pre-trained object detector: Cross-modal textual and visual context for image captioning. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 17969\u201317979 (2022)","key":"1559_CR29","DOI":"10.1109\/CVPR52688.2022.01744"},{"doi-asserted-by":"crossref","unstructured":"Li, X., Yin, X., Li, C., Zhang, P., Hu, X., Zhang, L., Wang, L., Hu, H., Dong, L., Wei, F., et al.: Oscar: Object-semantics aligned pre-training for vision-language tasks. In: Computer Vision\u2013ECCV 2020: 16th European Conference, Glasgow, UK, August 23\u201328, 2020, Proceedings, Part XXX 16, pp. 121\u2013137 (2020)","key":"1559_CR30","DOI":"10.1007\/978-3-030-58577-8_8"},{"doi-asserted-by":"crossref","unstructured":"Kendall, A., Gal, Y., Cipolla, R.: Multi-task learning using uncertainty to weigh losses for scene geometry and semantics. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 7482\u20137491 (2018)","key":"1559_CR31","DOI":"10.1109\/CVPR.2018.00781"},{"doi-asserted-by":"crossref","unstructured":"Kongyoung, S., Macdonald, C., Ounis, I.: Multi-task learning using dynamic task weighting for conversational question answering. In: Proceedings of the 5th International Workshop on Search-Oriented Conversational AI (SCAI), pp. 17\u201326 (2020)","key":"1559_CR32","DOI":"10.18653\/v1\/2020.scai-1.3"},{"doi-asserted-by":"crossref","unstructured":"Karpathy, A., Fei-Fei, L.: Deep visual-semantic alignments for generating image descriptions. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 3128\u20133137 (2015)","key":"1559_CR33","DOI":"10.1109\/CVPR.2015.7298932"},{"doi-asserted-by":"crossref","unstructured":"Papineni, K., Roukos, S., Ward, T., Zhu, W.-J.: Bleu: a method for automatic evaluation of machine translation. In: Proceedings of the 40th Annual Meeting of the Association for Computational Linguistics, pp. 311\u2013318 (2002)","key":"1559_CR34","DOI":"10.3115\/1073083.1073135"},{"unstructured":"Banerjee, S., Lavie, A.: Meteor: An automatic metric for mt evaluation with improved correlation with human judgments. In: Proceedings of the Acl Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation And\/or Summarization, pp. 65\u201372 (2005)","key":"1559_CR35"},{"unstructured":"Lin, C.-Y.: Rouge: A package for automatic evaluation of summaries. In: Text Summarization Branches Out, pp. 74\u201381 (2004)","key":"1559_CR36"},{"doi-asserted-by":"crossref","unstructured":"Vedantam, R., Lawrence\u00a0Zitnick, C., Parikh, D.: Cider: Consensus-based image description evaluation. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 4566\u20134575 (2015)","key":"1559_CR37","DOI":"10.1109\/CVPR.2015.7299087"},{"doi-asserted-by":"crossref","unstructured":"Anderson, P., Fernando, B., Johnson, M., Gould, S.: Spice: Semantic propositional image caption evaluation. In: Computer Vision\u2013ECCV 2016: 14th European Conference, Amsterdam, The Netherlands, October 11-14, 2016, Proceedings, Part V 14, pp. 382\u2013398 (2016)","key":"1559_CR38","DOI":"10.1007\/978-3-319-46454-1_24"},{"issue":"3","key":"1559_CR39","doi-asserted-by":"publisher","first-page":"2706","DOI":"10.1007\/s10489-022-03624-y","volume":"53","author":"J Wei","year":"2023","unstructured":"Wei, J., Li, Z., Zhu, J., Ma, H.: Enhance understanding and reasoning ability for image captioning. Appl. Intell. 53(3), 2706\u20132722 (2023)","journal-title":"Appl. Intell."},{"doi-asserted-by":"crossref","unstructured":"Yan, J., Xie, Y., Guo, Y., Wei, Y., Luan, X.: Exploring better image captioning with grid features. Complex & Intelligent Systems, 1\u201316 (2024)","key":"1559_CR40","DOI":"10.1007\/s40747-023-01341-8"},{"unstructured":"Radford, A., Kim, J.W., Hallacy, C., Ramesh, A., Goh, G., Agarwal, S., Sastry, G., Askell, A., Mishkin, P., Clark, J., et al.: Learning transferable visual models from natural language supervision. In: International Conference on Machine Learning, pp. 8748\u20138763 (2021)","key":"1559_CR41"},{"unstructured":"Shen, S., Li, L.H., Tan, H., Bansal, M., Rohrbach, A., Chang, K.-W., Yao, Z., Keutzer, K.: How much can clip benefit visionand-language tasks? arXiv preprint arXiv:2107.06383 (2021)","key":"1559_CR42"},{"doi-asserted-by":"crossref","unstructured":"Barraco, M., Cornia, M., Cascianelli, S., Baraldi, L., Cucchiara, R.: The unreasonable effectiveness of clip features for image captioning: an experimental analysis. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 4662\u20134670 (2022)","key":"1559_CR43","DOI":"10.1109\/CVPRW56347.2022.00512"},{"doi-asserted-by":"crossref","unstructured":"Sarto, S., Cornia, M., Baraldi, L., Cucchiara, R.: Retrieval-augmented transformer for image captioning. In: Proceedings of the 19th International Conference on Content-based Multimedia Indexing, pp. 1\u20137 (2022)","key":"1559_CR44","DOI":"10.1145\/3549555.3549585"},{"doi-asserted-by":"crossref","unstructured":"Barraco, M., Sarto, S., Cornia, M., Baraldi, L., Cucchiara, R.: With a little help from your own past: Prototypical memory networks for image captioning. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 3021\u20133031 (2023)","key":"1559_CR45","DOI":"10.1109\/ICCV51070.2023.00282"},{"doi-asserted-by":"crossref","unstructured":"Zhang, J., Xie, Y., Ding, W., Wang, Z.: Cross on cross attention: Deep fusion transformer for image captioning. IEEE Trans Circuits Syst Video Technol (2023)","key":"1559_CR46","DOI":"10.1109\/TCSVT.2023.3243725"},{"doi-asserted-by":"crossref","unstructured":"Li, Y., Pan, Y., Yao, T., Mei, T.: Comprehending and ordering semantics for image captioning. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 17990\u201317999 (2022)","key":"1559_CR47","DOI":"10.1109\/CVPR52688.2022.01746"},{"doi-asserted-by":"crossref","unstructured":"Carion, N., Massa, F., Synnaeve, G., Usunier, N., Kirillov, A., Zagoruyko, S.: End-to-end object detection with transformers. In: European Conference on Computer Vision, pp. 213\u2013229 (2020). Springer","key":"1559_CR48","DOI":"10.1007\/978-3-030-58452-8_13"},{"doi-asserted-by":"crossref","unstructured":"Fang, Z., Wang, J., Hu, X., Liang, L., Gan, Z., Wang, L., Yang, Y., Liu, Z.: Injecting semantic concepts into end-to-end image captioning. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 18009\u201318019 (2022)","key":"1559_CR49","DOI":"10.1109\/CVPR52688.2022.01748"},{"unstructured":"Li, J., Li, D., Xiong, C., Hoi, S.: Blip: Bootstrapping language-image pre-training for unified vision-language understanding and generation. In: International Conference on Machine Learning, pp. 12888\u201312900 (2022)","key":"1559_CR50"},{"unstructured":"Li, J., Li, D., Savarese, S., Hoi, S.: Blip-2: Bootstrapping language-image pre-training with frozen image encoders and large language models. In: International Conference on Machine Learning, pp. 19730\u201319742 (2023). PMLR","key":"1559_CR51"}],"container-title":["Multimedia Systems"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s00530-024-01559-7.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s00530-024-01559-7\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s00530-024-01559-7.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,12,16]],"date-time":"2024-12-16T09:13:17Z","timestamp":1734340397000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s00530-024-01559-7"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,11,16]]},"references-count":51,"journal-issue":{"issue":"6","published-print":{"date-parts":[[2024,12]]}},"alternative-id":["1559"],"URL":"https:\/\/doi.org\/10.1007\/s00530-024-01559-7","relation":{},"ISSN":["0942-4962","1432-1882"],"issn-type":[{"type":"print","value":"0942-4962"},{"type":"electronic","value":"1432-1882"}],"subject":[],"published":{"date-parts":[[2024,11,16]]},"assertion":[{"value":"9 July 2024","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"6 November 2024","order":2,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"16 November 2024","order":3,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"he authors have no Conflict of interest to declare that are relevant to the content of this article.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}},{"value":"Written informed consent for publication of this paper was obtained from the Fujian University of Technology and all authors.","order":3,"name":"Ethics","group":{"name":"EthicsHeading","label":"Ethics approval"}},{"value":"Peixin Yan: Conceptualization, Methodology, Validation, Writing-original draft; Zuoyong Li: Supervision, Writing-review & editing, Funding acquisition; Rong Hu: Conceptualization, Investigation, Supervision; Xinrong Cao: Conceptualization, Supervision, Writing-original draft, review & editing, Funding acquisition.","order":4,"name":"Ethics","group":{"name":"EthicsHeading","label":"Credit author statement"}}],"article-number":"340"}}