{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,1,18]],"date-time":"2025-01-18T11:10:02Z","timestamp":1737198602325,"version":"3.33.0"},"reference-count":53,"publisher":"Springer Science and Business Media LLC","issue":"1","license":[{"start":{"date-parts":[[2024,12,21]],"date-time":"2024-12-21T00:00:00Z","timestamp":1734739200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,12,21]],"date-time":"2024-12-21T00:00:00Z","timestamp":1734739200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"funder":[{"name":"Heilongjiang Province Higher Education Teaching Reform Project","award":["SJGY 20200320"],"award-info":[{"award-number":["SJGY 20200320"]}]},{"name":"Key Research and Development Project of Heilongjiang Province","award":["2022ZX01A34"],"award-info":[{"award-number":["2022ZX01A34"]}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Machine Vision and Applications"],"published-print":{"date-parts":[[2025,1]]},"DOI":"10.1007\/s00138-024-01645-w","type":"journal-article","created":{"date-parts":[[2024,12,21]],"date-time":"2024-12-21T03:44:46Z","timestamp":1734752686000},"update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["Graph-based relational reasoning network for video question answering"],"prefix":"10.1007","volume":"36","author":[{"given":"Tao","family":"Tan","sequence":"first","affiliation":[]},{"given":"Guanglu","family":"Sun","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,12,21]]},"reference":[{"key":"1645_CR1","doi-asserted-by":"publisher","DOI":"10.1016\/j.eswa.2023.122399","volume":"239","author":"H Pei","year":"2024","unstructured":"Pei, H., Zhang, C., Zhang, X., Liu, X., Ma, Y.: Recognizing materials in cultural relic images using computer vision and attention mechanism. Expert Syst. Appl. 239, 122399 (2024). https:\/\/doi.org\/10.1016\/j.eswa.2023.122399","journal-title":"Expert Syst. Appl."},{"key":"1645_CR2","doi-asserted-by":"publisher","DOI":"10.1016\/j.eswa.2023.121542","volume":"237","author":"S Razaa","year":"2024","unstructured":"Razaa, S., Garg, M., Reji, D.J., Bashir, S.R., Ding, C.: Nbias: a natural language processing framework for bias identification in text. Expert Syst. Appl. 237, 121542 (2024). https:\/\/doi.org\/10.1016\/j.eswa.2023.121542","journal-title":"Expert Syst. Appl."},{"key":"1645_CR3","doi-asserted-by":"publisher","DOI":"10.1016\/j.engappai.2023.107832","volume":"131","author":"X Song","year":"2024","unstructured":"Song, X., Wu, C., Song, S., Stojanovic, V., Tejado, I.: Fuzzy wavelet neural adaptive finite-time self-triggered fault-tolerant control for a quadrotor unmanned aerial vehicle with scheduled performance. Eng. Appl. Artif. Intell. 131, 107832 (2024). https:\/\/doi.org\/10.1016\/j.engappai.2023.107832","journal-title":"Eng. Appl. Artif. Intell."},{"key":"1645_CR4","doi-asserted-by":"publisher","DOI":"10.1016\/j.cnsns.2024.107945","volume":"132","author":"X Song","year":"2024","unstructured":"Song, X., Peng, Z., Song, S., Stojanovic, V.: Anti-disturbance state estimation for PDT-switched RDNNs utilizing time-sampling and space-splitting measurements. Commun. Nonlinear Sci. Numer. Simul. 132, 107945 (2024). https:\/\/doi.org\/10.1016\/j.cnsns.2024.107945","journal-title":"Commun. Nonlinear Sci. Numer. Simul."},{"key":"1645_CR5","doi-asserted-by":"publisher","first-page":"5060","DOI":"10.1109\/TIP.2023.3310332","volume":"32","author":"S Zhou","year":"2023","unstructured":"Zhou, S., Guo, D., Li, J., Yang, X., Wang, M.: Exploring sparse spatial relation in graph inference for text-based VQA. IEEE Trans. Image Process. 32, 5060\u20135074 (2023). https:\/\/doi.org\/10.1109\/TIP.2023.3310332","journal-title":"IEEE Trans. Image Process."},{"key":"1645_CR6","doi-asserted-by":"crossref","unstructured":"Li, L., Chen, H., Gao, C., Yang, X.: How to configure good in-context sequence for visual question answering. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (2024)","DOI":"10.1109\/CVPR52733.2024.02522"},{"key":"1645_CR7","doi-asserted-by":"crossref","unstructured":"Song, X., Shi, Y., Chen, X., Han, Y.: Explore multi-step reasoning in video question answering. In: Proceedings of the 26th ACM International Conference on Multimedia (2018)","DOI":"10.1145\/3240508.3240563"},{"key":"1645_CR8","doi-asserted-by":"crossref","unstructured":"Fan, C., Zhang, X., Zhang, S., Wang, W., Zhang, C., Huang, H.: Heterogeneous memory enhanced multimodal attention model for video question answering. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (2019)","DOI":"10.1109\/CVPR.2019.00210"},{"key":"1645_CR9","doi-asserted-by":"crossref","unstructured":"Gao, L., Zeng, P., Song, J., Li, Y.-F., Liu, W., Mei, T., Shen, H.: Structured two-stream attention network for video question answering. In: Proceedings of the AAAI Conference on Artificial Intelligence (2019)","DOI":"10.1609\/aaai.v33i01.33016391"},{"key":"1645_CR10","doi-asserted-by":"crossref","unstructured":"Li, X., Song, J., Gao, L., Liu, X., Huang, W., He, X., Gan, C.: Beyond RNNs:: Positional self-attention with co-attention for video question answering. In: Proceedings of the AAAI Conference on Artificial Intelligence (2019)","DOI":"10.1609\/aaai.v33i01.33018658"},{"key":"1645_CR11","doi-asserted-by":"crossref","unstructured":"Liu, F., Liu, J., Wang, W., Hanqing, L.: Hair: Hierarchical visual-semantic relational reasoning for video question answering. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision (2021)","DOI":"10.1109\/ICCV48922.2021.00172"},{"key":"1645_CR12","doi-asserted-by":"crossref","unstructured":"Li, F., Bai, T., Cao, C., Liu, Z., Yan, C., Wu, B.: Relation-aware hierarchical attention framework for video question answering. In: Proceedings of the 2021 International Conference on Multimedia Retrieval (2021)","DOI":"10.1145\/3460426.3463635"},{"key":"1645_CR13","doi-asserted-by":"crossref","unstructured":"Huang, D., Chen, P., Zeng, R., Du, Q., Tan, M., Gan, C.: Location-aware graph convolutional networks for video question answering (2020)","DOI":"10.1609\/aaai.v34i07.6737"},{"key":"1645_CR14","doi-asserted-by":"crossref","unstructured":"Seo, A., Kang, G.-C., Park, J., Zhang, B.-T.: Attend what you need: motion-appearance synergistic networks for video question answering. In: Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (2021)","DOI":"10.18653\/v1\/2021.acl-long.481"},{"issue":"4","key":"1645_CR15","doi-asserted-by":"publisher","first-page":"103375","DOI":"10.1016\/j.ipm.2023.103375","volume":"60","author":"Z Wang","year":"2023","unstructured":"Wang, Z., Li, F., Ota, k, Mianxiong, D., Wu, B.: Regr: Relation-aware graph reasoning framework for video question answering. Inf. Process. Manage. 60(4), 103375 (2023). https:\/\/doi.org\/10.1016\/j.ipm.2023.103375","journal-title":"Inf. Process. Manage."},{"key":"1645_CR16","doi-asserted-by":"crossref","unstructured":"Qi, J., Xu, Y., Wu, B.: Bottom-up hierarchical propagation networks with heterogeneous graph modeling for video question answering. In: International Joint Conference on Neural Networks (2024)","DOI":"10.1109\/IJCNN60899.2024.10650620"},{"key":"1645_CR17","doi-asserted-by":"publisher","first-page":"1109","DOI":"10.1109\/TIP.2024.3358726","volume":"33","author":"Z Bai","year":"2024","unstructured":"Bai, Z., Wang, R., Gao, D., Chen, X.: Event graph guided compositional spatial-temporal reasoning forvideo question answering. IEEE Trans. Image Process. 33, 1109\u20131121 (2024). https:\/\/doi.org\/10.1109\/TIP.2024.3358726","journal-title":"IEEE Trans. Image Process."},{"key":"1645_CR18","doi-asserted-by":"crossref","unstructured":"Peng, L., Yang, S., Bin, Y., Wang: Progressive graph attention network for video question answering. In: Proceedings of the 29th ACM International Conference on Multimedia (2021)","DOI":"10.1145\/3474085.3475193"},{"issue":"4","key":"1645_CR19","doi-asserted-by":"publisher","first-page":"5477","DOI":"10.1109\/TIP.2021.3076556","volume":"30","author":"W Jin","year":"2021","unstructured":"Jin, W., Zhao, Z., Cao, X., Zhu, j, He, X., Zhuang, Y.: Adaptive spatio-temporal graph enhanced vision-language representation for video qa. IEEE Trans. Image Process. 30(4), 5477\u20135489 (2021). https:\/\/doi.org\/10.1109\/TIP.2021.3076556","journal-title":"IEEE Trans. Image Process."},{"key":"1645_CR20","doi-asserted-by":"crossref","unstructured":"Dang, L.H., Le, T.M., Le, V., Tran, T.: Hierarchical object-oriented spatio-temporal reasoning for video question answering. In: Proceedings of the 30th International Joint Conference on Artificial Intelligence (IJCAI) (2021)","DOI":"10.24963\/ijcai.2021\/88"},{"key":"1645_CR21","doi-asserted-by":"publisher","first-page":"2758","DOI":"10.1109\/TIP.2021.3051756","volume":"30","author":"M Gu","year":"2021","unstructured":"Gu, M., Zhao, Z., Jin, W., Hong, R., Wu, F.: Graph-based multi-interaction network for video question answering. IEEE Trans. Image Process. 30, 2758\u20132770 (2021). https:\/\/doi.org\/10.1109\/TIP.2021.3051756","journal-title":"IEEE Trans. Image Process."},{"key":"1645_CR22","unstructured":"Chowdhury, M.I.H., Nguyen, K., Sridharan, S., Fookes, C.: Hierarchical relational attention for video question answering. In 2018 25th IEEE International Conference on Image Processing (ICIP) (2018)"},{"key":"1645_CR23","doi-asserted-by":"crossref","unstructured":"Jang, Y., Song, Y., Yu, Y., Kim, Y., Kim, G.: TGIF-QA: Toward spatio-temporal reasoning in visual question answering. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (2017)","DOI":"10.1109\/CVPR.2017.149"},{"key":"1645_CR24","doi-asserted-by":"crossref","unstructured":"Zhao, Z., Jiang, X., Cai, D., Xiao, J., He, X., Pu, S.: Multi-turn video question answering via multi-stream hierarchical attention context network. IJCAI (2018)","DOI":"10.24963\/ijcai.2018\/513"},{"key":"1645_CR25","doi-asserted-by":"crossref","unstructured":"Gao, J., Ge, R., Chen, K., Nevatia, R.: Motion-appearance co-memory networks for video question answering. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (2018)","DOI":"10.1109\/CVPR.2018.00688"},{"issue":"3","key":"1645_CR26","doi-asserted-by":"publisher","first-page":"1367","DOI":"10.1109\/TNNLS.2021.3105280","volume":"34","author":"F Liu","year":"2021","unstructured":"Liu, F., Liu, J., Hong, R., Lu, H.: Question-guided erasing-based spatiotemporal attention learning for video question answering. IEEE Trans Neural Netw Learn Syst 34(3), 1367\u20131379 (2021). https:\/\/doi.org\/10.1109\/TNNLS.2021.3105280","journal-title":"IEEE Trans Neural Netw Learn Syst"},{"key":"1645_CR27","doi-asserted-by":"crossref","unstructured":"Li, D., Li, J., Li, H., Niebles, J.C., Hoi, S.C.: Align and prompt: Video-and-language pre-training with entity prompts. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (2022)","DOI":"10.1109\/CVPR52688.2022.00490"},{"key":"1645_CR28","unstructured":"Yi, K., Wu, J., Gan, C., Torralba, A., Kohli, P., Tenenbaum, J.: Neural-symbolic VQA: disentangling reasoning from vision and language understanding. Advances in neural information processing systems (2018)"},{"key":"1645_CR29","doi-asserted-by":"crossref","unstructured":"Jin, W., Zhao, Z., Gu, M., Yu, J., Xiao, J., Zhuang, y.: Multi-interaction network with object relation for video question answering. In: Proceedings of the 27th ACM International Conference on Multimedia (2019)","DOI":"10.1145\/3343031.3351065"},{"issue":"6","key":"1645_CR30","doi-asserted-by":"publisher","DOI":"10.1007\/s11704-021-1248-1","volume":"16","author":"R Liu","year":"2022","unstructured":"Liu, R., Han, Y.: Instance-sequence reasoning for video question answering. Front. Comput. Sci. 16(6), 166708 (2022). https:\/\/doi.org\/10.1007\/s11704-021-1248-1","journal-title":"Front. Comput. Sci."},{"key":"1645_CR31","doi-asserted-by":"crossref","unstructured":"Min, J., Buch, S., Nagrani, A., Cho, M., Schmid, C.: MoReVQA: exploring modular reasoning models for video question answering. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition. (2024)","DOI":"10.1109\/CVPR52733.2024.01257"},{"key":"1645_CR32","doi-asserted-by":"crossref","unstructured":"Jiang, P., Han, Y.: Reasoning with heterogeneous graph alignment for video question answering. In: Proceedings of the AAAI Conference on Artificial Intelligence (2020)","DOI":"10.1609\/aaai.v34i07.6767"},{"key":"1645_CR33","doi-asserted-by":"publisher","first-page":"3369","DOI":"10.1109\/TMM.2021.3097171","volume":"24","author":"J Wang","year":"2021","unstructured":"Wang, J., Bao, B.-K., Xu, C.: Dualvgr: A dual-visual graph reasoning unit for video question answering. IEEE Trans. Multimedia 24, 3369\u20133380 (2021). https:\/\/doi.org\/10.1109\/TMM.2021.3097171","journal-title":"IEEE Trans. Multimedia"},{"key":"1645_CR34","doi-asserted-by":"publisher","first-page":"5002","DOI":"10.1109\/TMM.2022.3185900","volume":"25","author":"J Jiang","year":"2022","unstructured":"Jiang, J., Liu, Z., Zheng, N.: Livlr: A lightweight visual-linguistic reasoning framework for video question answering. IEEE Trans. Multimedia 25, 5002\u20135013 (2022). https:\/\/doi.org\/10.1109\/TMM.2022.3185900","journal-title":"IEEE Trans. Multimedia"},{"key":"1645_CR35","doi-asserted-by":"crossref","unstructured":"Cherian, A., Hori, C., Marks, T.K., Le\u00a0Roux, J.: (2.5+ 1) D spatio-temporal scene graphs for video question answering. In: Proceedings of the AAAI Conference on Artificial Intelligence (2022)","DOI":"10.1609\/aaai.v36i1.19922"},{"key":"1645_CR36","doi-asserted-by":"crossref","unstructured":"Xiao, J., Zhou, P., Chua, T.-S., Yan, S.: Video graph transformer for video question answering. In: European Conference on Computer Vision (2022)","DOI":"10.1007\/978-3-031-20059-5_3"},{"key":"1645_CR37","doi-asserted-by":"crossref","unstructured":"Mao, J., Jiang, W., Wang, X., Feng, Z., Lyu, Y., Liu, H., Zhu, Y.: Dynamic multistep reasoning based on video scene graph for video question answering. In: Proceedings of the 2022 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (2022)","DOI":"10.18653\/v1\/2022.naacl-main.286"},{"key":"1645_CR38","doi-asserted-by":"publisher","first-page":"1684","DOI":"10.1109\/TIP.2022.3142526","volume":"31","author":"Y Liu","year":"2022","unstructured":"Liu, Y., Zhang, X., Huang, F., Zhang, B., Li, Z.: Cross-attentional spatio-temporal semantic graph networks for video question answering. IEEE Trans. Image Process. 31, 1684\u20131696 (2022). https:\/\/doi.org\/10.1109\/TIP.2022.3142526","journal-title":"IEEE Trans. Image Process."},{"key":"1645_CR39","doi-asserted-by":"crossref","unstructured":"Xu, D., Zhao, Z., Xiao, J., Wu, F., Zhang, H., He, X., Zhuang, Y.: Video question answering via gradually refined attention over appearance and motion. In: Proceedings of the 25th ACM International Conference on Multimedia (2017)","DOI":"10.1145\/3123266.3123427"},{"key":"1645_CR40","doi-asserted-by":"crossref","unstructured":"Le, T.M., Le, V., Venkatesh, S., Tran, T.: Hierarchical conditional relation networks for video question answering. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (2020)","DOI":"10.1109\/CVPR42600.2020.00999"},{"key":"1645_CR41","doi-asserted-by":"publisher","first-page":"993","DOI":"10.1007\/s11063-019-10003-1","volume":"52","author":"S Xiao","year":"2020","unstructured":"Xiao, S., Li, Y., Ye, Y., Chen, L., Pu, S., Zhao, Z., Shao, J., Xiao, J.: Hierarchical temporal fusion of multi-grained attention features for video question answering. Neural Process. Lett. 52, 993\u20131003 (2020). https:\/\/doi.org\/10.1007\/s11063-019-10003-1","journal-title":"Neural Process. Lett."},{"key":"1645_CR42","unstructured":"Peng, M., Wang, C., Gao, Y., Shi, Y., Zhou, X.-D.: Temporal pyramid transformer with multimodal interaction for video question answering (2021) arXiv:2109.04735"},{"key":"1645_CR43","doi-asserted-by":"crossref","unstructured":"Gao, D., Zhou, L., Ji, L., Zhu, L., Yang, Y., Shou, M.Z.: Mist: multi-modal iterative spatial-temporal transformer for long-form video question answering. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (2023)","DOI":"10.1109\/CVPR52729.2023.01419"},{"key":"1645_CR44","doi-asserted-by":"crossref","unstructured":"Li, X., Gao, L., Wang, X., Liu, W., Xu, X., Shen, H.T., Song, J.: Learnable aggregating net with diversity learning for video question answering. In: Proceedings of the 27th ACM International Conference on Multimedia (2019)","DOI":"10.1145\/3343031.3350971"},{"key":"1645_CR45","doi-asserted-by":"crossref","unstructured":"Kim, J., Ma, M., Kim, K., Kim, S., Yoo, C.D.: Progressive attention memory network for movie story question answering. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (2019)","DOI":"10.1109\/CVPR.2019.00853"},{"key":"1645_CR46","doi-asserted-by":"crossref","unstructured":"Le, H., Sahoo, D., Chen, N.F., Hoi, S.C.: BiST: Bi-directional spatio-temporal reasoning for video-grounded dialogues. In: Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (2020)","DOI":"10.18653\/v1\/2020.emnlp-main.145"},{"key":"1645_CR47","doi-asserted-by":"crossref","unstructured":"Jiang, J., Chen, Z., Lin, H., Zhao, X., Gao, Y.: Divide and conquer: question-guided spatio-temporal contextual attention for video question answering. In: Proceedings of the AAAI Conference on Artificial Intelligence (2020)","DOI":"10.1609\/aaai.v34i07.6766"},{"issue":"3","key":"1645_CR48","doi-asserted-by":"publisher","first-page":"1454","DOI":"10.1109\/TCSVT.2022.3212463","volume":"33","author":"k Zhang","year":"2023","unstructured":"Zhang, k, Wang, R., Zhou, F., Luo, Y.: ERM: energy-based refined-attention mechanism for video question answering. IEEE Trans. Circuits Syst. Video Technol. 33(3), 1454\u20131467 (2023). https:\/\/doi.org\/10.1109\/TCSVT.2022.3212463","journal-title":"IEEE Trans. Circuits Syst. Video Technol."},{"key":"1645_CR49","doi-asserted-by":"crossref","unstructured":"He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (2016)","DOI":"10.1109\/CVPR.2016.90"},{"key":"1645_CR50","unstructured":"Ren, S., He, K., Girshick, R., Sun, J.: Faster R-CNN: Towards real-time object detection with region proposal networks. Advances in Neural Information Processing Systems (2015)"},{"key":"1645_CR51","doi-asserted-by":"crossref","unstructured":"Pennington, J., Socher, R., Manning, C.D.: Glove: global vectors for word representation. In: Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing (2014)","DOI":"10.3115\/v1\/D14-1162"},{"key":"1645_CR52","unstructured":"Chen, D., Dolan, W.B.: Collecting highly parallel data for paraphrase evaluation. In: Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics: Human Language Technologies (2011)"},{"key":"1645_CR53","doi-asserted-by":"crossref","unstructured":"Xu, J., Mei, T., Yao, T., Rui, Y.: MSR-VTT: A large video description dataset for bridging video and language. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (2016)","DOI":"10.1109\/CVPR.2016.571"}],"container-title":["Machine Vision and Applications"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s00138-024-01645-w.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s00138-024-01645-w\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s00138-024-01645-w.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,1,18]],"date-time":"2025-01-18T10:28:42Z","timestamp":1737196122000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s00138-024-01645-w"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,12,21]]},"references-count":53,"journal-issue":{"issue":"1","published-print":{"date-parts":[[2025,1]]}},"alternative-id":["1645"],"URL":"https:\/\/doi.org\/10.1007\/s00138-024-01645-w","relation":{},"ISSN":["0932-8092","1432-1769"],"issn-type":[{"type":"print","value":"0932-8092"},{"type":"electronic","value":"1432-1769"}],"subject":[],"published":{"date-parts":[[2024,12,21]]},"assertion":[{"value":"21 October 2024","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"2 December 2024","order":2,"name":"revised","label":"Revised","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"8 December 2024","order":3,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"21 December 2024","order":4,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors declare no Conflict of interest.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}}],"article-number":"24"}}