{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,9]],"date-time":"2026-01-09T22:43:21Z","timestamp":1767998601916,"version":"3.49.0"},"reference-count":57,"publisher":"Springer Science and Business Media LLC","issue":"6","license":[{"start":{"date-parts":[[2024,4,20]],"date-time":"2024-04-20T00:00:00Z","timestamp":1713571200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,4,20]],"date-time":"2024-04-20T00:00:00Z","timestamp":1713571200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62062041"],"award-info":[{"award-number":["62062041"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"name":"Scientific Research Foundation of Education Bureau of Jiangxi Province","award":["GJJ211009"],"award-info":[{"award-number":["GJJ211009"]}]},{"DOI":"10.13039\/501100004479","name":"Jiangxi Provincial Natural Science Foundation","doi-asserted-by":"crossref","award":["20212BAB202020"],"award-info":[{"award-number":["20212BAB202020"]}],"id":[{"id":"10.13039\/501100004479","id-type":"DOI","asserted-by":"crossref"}]},{"DOI":"10.13039\/501100004479","name":"Jiangxi Provincial Natural Science Foundation","doi-asserted-by":"crossref","award":["62362041"],"award-info":[{"award-number":["62362041"]}],"id":[{"id":"10.13039\/501100004479","id-type":"DOI","asserted-by":"crossref"}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["J Ambient Intell Human Comput"],"published-print":{"date-parts":[[2024,6]]},"DOI":"10.1007\/s12652-024-04779-x","type":"journal-article","created":{"date-parts":[[2024,4,21]],"date-time":"2024-04-21T02:37:58Z","timestamp":1713667078000},"page":"2839-2852","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":1,"title":["Video emotional description with fact reinforcement and emotion awaking"],"prefix":"10.1007","volume":"15","author":[{"given":"Pengjie","family":"Tang","sequence":"first","affiliation":[]},{"given":"Hong","family":"Rao","sequence":"additional","affiliation":[]},{"given":"Ai","family":"Zhang","sequence":"additional","affiliation":[]},{"given":"Yunlan","family":"Tan","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,4,20]]},"reference":[{"key":"4779_CR1","unstructured":"Banerjee S, Lavie A (2005) Meteor: an automatic metric for mt evaluation with improved correlation with human judgments. In: Annual Meeting of the Association for Computational Linguistics Workshop, pp 65\u201372"},{"issue":"8","key":"4779_CR2","doi-asserted-by":"publisher","first-page":"1617","DOI":"10.1109\/TPAMI.2016.2608901","volume":"39","author":"X Chang","year":"2017","unstructured":"Chang X, Yu Y, Yang Y et al (2017) Semantic pooling for complex event analysis in untrimmed videos. IEEE Trans Pattern Anal Mach Intell 39(8):1617\u20131632","journal-title":"IEEE Trans Pattern Anal Mach Intell"},{"issue":"1","key":"4779_CR3","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1109\/TPAMI.2021.3137605","volume":"45","author":"X Chang","year":"2023","unstructured":"Chang X, Ren P, Xu P et al (2023) A comprehensive survey of scene graphs: generation and application. IEEE Trans Pattern Anal Mach Intell 45(1):1\u201326","journal-title":"IEEE Trans Pattern Anal Mach Intell"},{"key":"4779_CR4","doi-asserted-by":"crossref","unstructured":"Chen S, Jiang Y (2019) Motion guided spatial attention for video captioning. In: AAAI Conference on artificial intelligence, pp 8191\u20138198","DOI":"10.1609\/aaai.v33i01.33018191"},{"key":"4779_CR5","doi-asserted-by":"crossref","unstructured":"Chen T, Zhang Z, You Q, et\u00a0al (2018) \u201cfactual\u201d or \u201cemotional\u201d: Stylized image captioning with adaptive learning and attention. In: European Conference on computer vision, pp 527\u2013543","DOI":"10.1007\/978-3-030-01249-6_32"},{"key":"4779_CR6","doi-asserted-by":"crossref","unstructured":"Chollet F (2017) Xception: deep learning with depthwise separable convolutions. In: IEEE Conference on computer vision and pattern recognition, pp 1251\u20131258","DOI":"10.1109\/CVPR.2017.195"},{"key":"4779_CR7","doi-asserted-by":"crossref","unstructured":"Deb T, Sadmanee A, Bhaumik K et\u00a0al (2022) Variational stacked local attention networks for diverse video captioning. In: IEEE Winter Conference on applications of computer vision, pp 4070\u20134079","DOI":"10.1109\/WACV51458.2022.00255"},{"key":"4779_CR8","doi-asserted-by":"crossref","unstructured":"Fan S, Shen Z, Jiang M et\u00a0al (2018) Emotional attention: a study of image sentiment and visual attention. In: IEEE Conference on computer vision and pattern recognition, pp 7521\u20137531","DOI":"10.1109\/CVPR.2018.00785"},{"key":"4779_CR9","doi-asserted-by":"crossref","unstructured":"Fu T, Li L, Gan Z et\u00a0al (2023) An empirical study of end-to-end video-language transformers with masked visual modeling. In: IEEE Conference on computer vision and pattern recognition, pp 22898\u201322909","DOI":"10.1109\/CVPR52729.2023.02193"},{"key":"4779_CR10","doi-asserted-by":"crossref","unstructured":"Gan C, Gan Z, He X et\u00a0al (2017) Stylenet: generating attractive visual captions with styles. In: IEEE Conference on computer vision and pattern recognition, pp 955\u2013964","DOI":"10.1109\/CVPR.2017.108"},{"issue":"9","key":"4779_CR11","doi-asserted-by":"publisher","first-page":"2045","DOI":"10.1109\/TMM.2017.2729019","volume":"19","author":"L Gao","year":"2017","unstructured":"Gao L, Guo Z, Zhang H et al (2017) Video captioning with attention-based lstm and semantic consistency. IEEE Trans Multimed 19(9):2045\u20132055","journal-title":"IEEE Trans Multimed"},{"key":"4779_CR12","doi-asserted-by":"crossref","unstructured":"Guadarrama S, Krishnamoorthy N, Malkarnenkar G et\u00a0al (2013) Youtube2text: recognizing and describing arbitrary activities using semantic hierarchies and zero-shot. In: IEEE International Conference on computer vision, pp 2712\u20132719","DOI":"10.1109\/ICCV.2013.337"},{"key":"4779_CR13","doi-asserted-by":"crossref","unstructured":"Gupta A, Srinivasan P, Shi J et\u00a0al (2009) Understanding videos, constructing plots learning a visually grounded storyline model from annotated videos. In: IEEE Conference on computer vision and pattern recognition, pp 2012\u20132019","DOI":"10.1109\/CVPRW.2009.5206492"},{"key":"4779_CR14","doi-asserted-by":"crossref","unstructured":"He K, Zhang X, Ren S et\u00a0al (2016) Deep residual learning for image recognition. In: IEEE Conference on computer vision and pattern recognition, pp 770\u2013778","DOI":"10.1109\/CVPR.2016.90"},{"issue":"5786","key":"4779_CR15","doi-asserted-by":"publisher","first-page":"504","DOI":"10.1126\/science.1127647","volume":"313","author":"G Hinton","year":"2006","unstructured":"Hinton G, Salakhutdinov R (2006) Reducing the dimensionality of data with neural networks. Science 313(5786):504\u2013507","journal-title":"Science"},{"key":"4779_CR16","doi-asserted-by":"crossref","unstructured":"Jiang Y, Xu B, Xue X (2014) Predicting emotions in user-generated videos. In: AAAI Conference on artificial intelligence, pp 73\u201379","DOI":"10.1609\/aaai.v28i1.8724"},{"key":"4779_CR17","doi-asserted-by":"crossref","unstructured":"Jia Y, Shelhamer E, Donahue J et\u00a0al (2014) Caffe: convolutional architecture for fast feature embedding. In: ACM Conference on multimedia, pp 675\u2013678","DOI":"10.1145\/2647868.2654889"},{"key":"4779_CR18","doi-asserted-by":"crossref","unstructured":"Karayil T, Irfan A, Raue F et\u00a0al (2019) Conditional gans for image captioning with sentiments. In: International Conference on artificial neural networks, pp 300\u2013312","DOI":"10.1007\/978-3-030-30490-4_25"},{"issue":"2","key":"4779_CR19","doi-asserted-by":"publisher","first-page":"171","DOI":"10.1023\/A:1020346032608","volume":"50","author":"A Kojima","year":"2002","unstructured":"Kojima A, Tamura T, Fukunaga K (2002) Natural language description of human activities from video images based on concept hierarchy of actions. Int J Comput Vis 50(2):171\u2013184","journal-title":"Int J Comput Vis"},{"key":"4779_CR20","doi-asserted-by":"crossref","unstructured":"Li L, Gong B (2019) End-to-end video captioning with multitask reinforcement learning. In: IEEE Winter Conference on applications of computer vision, pp 339\u2013348","DOI":"10.1109\/WACV.2019.00042"},{"issue":"4","key":"4779_CR21","doi-asserted-by":"crossref","first-page":"4430","DOI":"10.1109\/TPAMI.2022.3194044","volume":"45","author":"C Li","year":"2023","unstructured":"Li C, Wang G, Wang B et al (2023a) Ds-net++: dynamic weight slicing for efficient inference in cnns and vision transformers. IEEE Trans Pattern Anal Mach Intell 45(4):4430\u20134446","journal-title":"IEEE Trans Pattern Anal Mach Intell"},{"issue":"3","key":"4779_CR22","first-page":"3918","volume":"45","author":"M Li","year":"2023","unstructured":"Li M, Huang P, Chang X et al (2023b) Video pivoting unsupervised multi-modal machine translation. IEEE Trans Pattern Anal Mach Intell 45(3):3918\u20133932","journal-title":"IEEE Trans Pattern Anal Mach Intell"},{"key":"4779_CR23","doi-asserted-by":"crossref","unstructured":"Lin C, Och F (2004) Automatic evaluation of machine translation quality using longest common subsequence and skip-bigram statistics. In: Annual Meeting of the Association for Computational Linguistics, pp 21\u201326","DOI":"10.3115\/1218955.1219032"},{"key":"4779_CR24","doi-asserted-by":"crossref","unstructured":"Lin T, Maire M, Belongie S et\u00a0al (2014) Microsoft coco: common objects in context. In: European Conference on computer vision, pp 740\u2013755","DOI":"10.1007\/978-3-319-10602-1_48"},{"key":"4779_CR25","doi-asserted-by":"crossref","unstructured":"Lin K, Li L, Lin C et\u00a0al (2022) Swinbert: End-to-end transformers with sparse attention for video captioning. In: IEEE Conference on computer vision and pattern recognition, pp 17949\u201317958","DOI":"10.1109\/CVPR52688.2022.01742"},{"issue":"4","key":"4779_CR26","doi-asserted-by":"publisher","first-page":"1098","DOI":"10.1109\/TMM.2019.2936805","volume":"22","author":"T Liu","year":"2020","unstructured":"Liu T, Wan J, Dai X et al (2020) Sentiment recognition for short annotated gifs using visualtextual fusion. IEEE Trans Multimed 22(4):1098\u20131110","journal-title":"IEEE Trans Multimed"},{"issue":"9","key":"4779_CR27","doi-asserted-by":"publisher","first-page":"3259","DOI":"10.1109\/TPAMI.2019.2940007","volume":"43","author":"S Liu","year":"2021","unstructured":"Liu S, Ren Z, Yuan J (2021) Sibnet: sibling convolutional encoder for video captioning. IEEE Trans Pattern Anal Mach Intell 43(9):3259\u20133272","journal-title":"IEEE Trans Pattern Anal Mach Intell"},{"key":"4779_CR28","doi-asserted-by":"publisher","first-page":"3386","DOI":"10.1109\/TIP.2021.3139234","volume":"31","author":"G Luo","year":"2022","unstructured":"Luo G, Zhou Y, Sun X et al (2022) Towards lightweight transformer via group-wise transformation for vision-and-language tasks. IEEE Trans Image Process 31:3386\u20133398","journal-title":"IEEE Trans Image Process"},{"key":"4779_CR29","doi-asserted-by":"crossref","unstructured":"Mathews A, Xie L, He X (2016) Senticap: generating image descriptions with sentiments. In: AAAI Conference on artificial intelligence, pp 3574\u20133580","DOI":"10.1609\/aaai.v30i1.10475"},{"issue":"2","key":"4779_CR30","doi-asserted-by":"publisher","first-page":"189","DOI":"10.1007\/BF00849074","volume":"8","author":"H Nagel","year":"1994","unstructured":"Nagel H (1994) A vision of \u201cvision and language\u201d comprises action: an example from road traffic. Artif Intell Rev 8(2):189\u2013214","journal-title":"Artif Intell Rev"},{"key":"4779_CR31","doi-asserted-by":"crossref","unstructured":"Pan B, Cai H, Huang D et\u00a0al (2020) Spatio-temporal graph for video captioning with knowledge distillation. In: IEEE Conference on computer vision and pattern recognition, pp 10870\u201310879","DOI":"10.1109\/CVPR42600.2020.01088"},{"key":"4779_CR32","doi-asserted-by":"crossref","unstructured":"Papineni K, Roukos S, Ward T et\u00a0al (2002) Bleu: a method for automatic evaluation of machine translation. In: Annual Meeting of the Association for Computational Linguistics, pp 311\u2013318","DOI":"10.3115\/1073083.1073135"},{"key":"4779_CR33","doi-asserted-by":"crossref","unstructured":"Park J, Rohrbach M, Darrell T et\u00a0al (2019) Adversarial inference for multi-sentence video description. In: IEEE Conference on computer vision and pattern recognition, pp 6591\u20136601","DOI":"10.1109\/CVPR.2019.00676"},{"key":"4779_CR34","doi-asserted-by":"crossref","unstructured":"Pei W, Zhang J, Wang X et\u00a0al (2019) Memory-attended recurrent network for video captioning. In: IEEE Conference on computer vision and pattern recognition, pp 8347\u20138356","DOI":"10.1109\/CVPR.2019.00854"},{"key":"4779_CR35","doi-asserted-by":"publisher","first-page":"4165","DOI":"10.1007\/s10462-021-10104-1","volume":"55","author":"J Perez-Martin","year":"2022","unstructured":"Perez-Martin J, Bustos B, Guimaraes S et al (2022) A comprehensive review of the video-to-text problem. Artif Intell Rev 55:4165\u20134239","journal-title":"Artif Intell Rev"},{"issue":"6","key":"4779_CR36","doi-asserted-by":"publisher","first-page":"1137","DOI":"10.1109\/TPAMI.2016.2577031","volume":"39","author":"S Ren","year":"2017","unstructured":"Ren S, He K, Girshick R et al (2017) Faster r-cnn: towards real-time object detection with region proposal networks. IEEE Trans Pattern Anal Mach Intell 39(6):1137\u20131149","journal-title":"IEEE Trans Pattern Anal Mach Intell"},{"issue":"3","key":"4779_CR37","doi-asserted-by":"publisher","first-page":"211","DOI":"10.1007\/s11263-015-0816-y","volume":"115","author":"O Russakovsky","year":"2015","unstructured":"Russakovsky O, Deng J, Su H et al (2015) Imagenet large scale visual recognition challenge. Int J Comput Vis 115(3):211\u2013252","journal-title":"Int J Comput Vis"},{"key":"4779_CR38","doi-asserted-by":"crossref","unstructured":"Song J, Guo Z, Gao L et\u00a0al (2017) Hierarchical lstm with adjusted temporal attention for video captioning. In: International Joint Conference on artificial intelligence, pp 2737\u20132743","DOI":"10.24963\/ijcai.2017\/381"},{"key":"4779_CR39","doi-asserted-by":"publisher","first-page":"154","DOI":"10.1016\/j.neucom.2018.05.086","volume":"312","author":"P Tang","year":"2018","unstructured":"Tang P, Wang H, Kwong S (2018) Deep sequential fusion lstm network for image description. Neurocomputing 312:154\u2013164","journal-title":"Neurocomputing"},{"issue":"2","key":"4779_CR40","doi-asserted-by":"publisher","first-page":"311","DOI":"10.1145\/3303083","volume":"15","author":"P Tang","year":"2019","unstructured":"Tang P, Wang H, Li Q (2019) Rich visual and language representation with complementary semantics for video captioning. ACM Trans Multimed Comput Commun Appl 15(2):311\u2013323","journal-title":"ACM Trans Multimed Comput Commun Appl"},{"key":"4779_CR41","unstructured":"Vaswani A, Shazeer N, Parmar N et\u00a0al (2017) Attention is all you need. In: International Conference on neural information processing systems, pp 5998\u20136008"},{"key":"4779_CR42","doi-asserted-by":"crossref","unstructured":"Vedantam R, Zitnick C, Parikh D (2015) Cider: consensus-based image description evaluation. In: IEEE Conference on computer vision and pattern recognition, pp 4566\u20134575","DOI":"10.1109\/CVPR.2015.7299087"},{"key":"4779_CR43","doi-asserted-by":"crossref","unstructured":"Venugopalan S, Rohrbach M, Donahue J et\u00a0al (2015) Sequence to sequence-video to text. In: IEEE International Conference on computer vision, pp 4534\u20134542","DOI":"10.1109\/ICCV.2015.515"},{"key":"4779_CR44","doi-asserted-by":"crossref","unstructured":"Wang J, Wang W, Huang Y, et\u00a0al (2018a) M3: multimodal memory modelling for video captioning. In: IEEE Conference on computer vision and pattern recognition, pp 7512\u20137520","DOI":"10.1109\/CVPR.2018.00784"},{"key":"4779_CR45","doi-asserted-by":"crossref","unstructured":"Wang X, Chen W, Wu J et\u00a0al (2018b) Video captioning via hierarchical reinforcement learning. In: IEEE Conference on computer vision and pattern recognition, pp 4213\u20134222","DOI":"10.1109\/CVPR.2018.00443"},{"key":"4779_CR46","doi-asserted-by":"crossref","unstructured":"Wang T, Zhang R, Lu Z et\u00a0al (2021) End-to-end dense video vaptioning with parallel decoding. In: IEEE International Conference on computer vision, pp 4847\u20136857","DOI":"10.1109\/ICCV48922.2021.00677"},{"key":"4779_CR47","doi-asserted-by":"publisher","first-page":"715","DOI":"10.1109\/TMM.2021.3058555","volume":"24","author":"H Wang","year":"2022","unstructured":"Wang H, Tang P, Li Q et al (2022) Emotion expression with fact transfer for video description. IEEE Trans Multimed 24:715\u2013727","journal-title":"IEEE Trans Multimed"},{"key":"4779_CR48","doi-asserted-by":"crossref","unstructured":"Xue F, Shi Z, Wei F et\u00a0al (2022) Go wider instead of deeper. In: AAAI Conference on artificial intelligence, pp 8779\u20138787","DOI":"10.1609\/aaai.v36i8.20858"},{"issue":"1","key":"4779_CR49","first-page":"41","volume":"12","author":"C Yan","year":"2020","unstructured":"Yan C, Chang X, Luo M et al (2020) Self-weighted robust lda for multiclass classification with edge classes. ACM Trans Intell Syst Technol 12(1):41\u2013419","journal-title":"ACM Trans Intell Syst Technol"},{"issue":"12","key":"4779_CR50","doi-asserted-by":"publisher","first-page":"9733","DOI":"10.1109\/TPAMI.2021.3127346","volume":"44","author":"C Yan","year":"2022","unstructured":"Yan C, Chang X, Li Z et al (2022) Zeronas: differentiable generative adversarial networks search for zero-shot learning. IEEE Trans Pattern Anal Mach Intell 44(12):9733\u20139740","journal-title":"IEEE Trans Pattern Anal Mach Intell"},{"issue":"11","key":"4779_CR51","doi-asserted-by":"publisher","first-page":"5600","DOI":"10.1109\/TIP.2018.2855422","volume":"27","author":"Y Yang","year":"2018","unstructured":"Yang Y, Zhou J, Ai J et al (2018) Video captioning by adversarial lstm. IEEE Trans Image Process 27(11):5600\u20135611","journal-title":"IEEE Trans Image Process"},{"key":"4779_CR52","doi-asserted-by":"crossref","unstructured":"You Q, Luo J, Jin H, et\u00a0al (2015) Robust image sentiment analysis using progressively trained and domain transferred deep networks. In: AAAI Conference on artificial intelligence, pp 381\u2013388","DOI":"10.1609\/aaai.v29i1.9179"},{"issue":"3","key":"4779_CR53","doi-asserted-by":"publisher","first-page":"701","DOI":"10.1145\/3486678","volume":"18","author":"D Yuan","year":"2022","unstructured":"Yuan D, Chang X, Li Z et al (2022) Learning adaptive spatial-temporal context-aware correlation filters for uav tracking. ACM Trans Multimed Comput Commun Appl 18(3):701\u2013718","journal-title":"ACM Trans Multimed Comput Commun Appl"},{"key":"4779_CR54","doi-asserted-by":"crossref","unstructured":"Zhang Z, Shi Y, Yuan C et\u00a0al (2020) Object relational graph with teacher-recommended learning for video captioning. In: IEEE Conference on computer vision and pattern recognition, pp 13275\u201313285","DOI":"10.1109\/CVPR42600.2020.01329"},{"issue":"3","key":"4779_CR55","doi-asserted-by":"crossref","first-page":"3848","DOI":"10.1109\/TPAMI.2022.3181579","volume":"45","author":"L Zhang","year":"2023","unstructured":"Zhang L, Chang X, Liu J et al (2023) Tn-zstad: transferable network for zero-shot temporal activity detection. IEEE Trans Pattern Anal Mach Intell 45(3):3848\u20133861","journal-title":"IEEE Trans Pattern Anal Mach Intell"},{"key":"4779_CR56","doi-asserted-by":"crossref","unstructured":"Zhao Z, Lu H, Cai D, et\u00a0al. (2017) Microblog sentiment classification via recurrent random walk network learning. In: International Joint Conference on Artificial Intelligence, pp 3532\u20133538","DOI":"10.24963\/ijcai.2017\/494"},{"issue":"11","key":"4779_CR57","doi-asserted-by":"publisher","first-page":"5552","DOI":"10.1109\/TIP.2019.2916757","volume":"28","author":"B Zhao","year":"2019","unstructured":"Zhao B, Li X, Lu X (2019) Cam-rnn: co-attention model based rnn for video captioning. IEEE Trans Image Process 28(11):5552\u20135565","journal-title":"IEEE Trans Image Process"}],"container-title":["Journal of Ambient Intelligence and Humanized Computing"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s12652-024-04779-x.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s12652-024-04779-x\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s12652-024-04779-x.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,11,16]],"date-time":"2024-11-16T18:32:49Z","timestamp":1731781969000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s12652-024-04779-x"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,4,20]]},"references-count":57,"journal-issue":{"issue":"6","published-print":{"date-parts":[[2024,6]]}},"alternative-id":["4779"],"URL":"https:\/\/doi.org\/10.1007\/s12652-024-04779-x","relation":{},"ISSN":["1868-5137","1868-5145"],"issn-type":[{"value":"1868-5137","type":"print"},{"value":"1868-5145","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,4,20]]},"assertion":[{"value":"11 April 2023","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"28 February 2024","order":2,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"20 April 2024","order":3,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"All authors declare that they have no conflict of interest with other people or organizations.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}},{"value":"This article does not contain any study with human participants performed by any of the authors.","order":3,"name":"Ethics","group":{"name":"EthicsHeading","label":"Ethical approval"}},{"value":"Informed consent was obtained from all individual participants included in the study.","order":4,"name":"Ethics","group":{"name":"EthicsHeading","label":"Informed Consent"}}]}}