{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,12,6]],"date-time":"2025-12-06T17:20:50Z","timestamp":1765041650942,"version":"3.44.0"},"reference-count":49,"publisher":"Springer Science and Business Media LLC","issue":"4","license":[{"start":{"date-parts":[[2025,7,7]],"date-time":"2025-07-07T00:00:00Z","timestamp":1751846400000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2025,7,7]],"date-time":"2025-07-07T00:00:00Z","timestamp":1751846400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"crossref","award":["No. 62166050","No. 62166050","No. 62166050","No. 62166050","No. 62166050","No. 62166050"],"award-info":[{"award-number":["No. 62166050","No. 62166050","No. 62166050","No. 62166050","No. 62166050","No. 62166050"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"crossref"}]},{"name":"Major Science and Technology Project of Yunnan Province","award":["202402AD080002","202402AD080002","202402AD080002","202402AD080002","202402AD080002","202402AD080002"],"award-info":[{"award-number":["202402AD080002","202402AD080002","202402AD080002","202402AD080002","202402AD080002","202402AD080002"]}]},{"name":"Key Program of Fundamental Research Project of Yunnan Science and Technology Plan, China","award":["No. 202201AS070021","No. 202201AS070021","No. 202201AS070021","No. 202201AS070021","No. 202201AS070021","No. 202201AS070021"],"award-info":[{"award-number":["No. 202201AS070021","No. 202201AS070021","No. 202201AS070021","No. 202201AS070021","No. 202201AS070021","No. 202201AS070021"]}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Multimedia Systems"],"published-print":{"date-parts":[[2025,8]]},"DOI":"10.1007\/s00530-025-01881-8","type":"journal-article","created":{"date-parts":[[2025,7,7]],"date-time":"2025-07-07T09:01:03Z","timestamp":1751878863000},"update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":1,"title":["Text semantic-guided adaptive feature aggregation for image-text retrieval"],"prefix":"10.1007","volume":"31","author":[{"given":"Yajie","family":"Gu","sequence":"first","affiliation":[]},{"given":"Mingjie","family":"Wang","sequence":"additional","affiliation":[]},{"given":"Jianhou","family":"Gan","sequence":"additional","affiliation":[]},{"given":"Yiming","family":"Zhao","sequence":"additional","affiliation":[]},{"given":"Jiatian","family":"Mei","sequence":"additional","affiliation":[]},{"given":"Chuanzhi","family":"Zhang","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2025,7,7]]},"reference":[{"key":"1881_CR1","doi-asserted-by":"crossref","unstructured":"You, Q., Jin, H., Wang, Z., Fang, C., Luo, J.: Image captioning with semantic attention. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 4651\u20134659 (2016)","DOI":"10.1109\/CVPR.2016.503"},{"key":"1881_CR2","doi-asserted-by":"crossref","unstructured":"Antol, S., Agrawal, A., Lu, J., Mitchell, M., Batra, D., Zitnick, C.L., Parikh, D.: Vqa: Visual question answering. In: Proceedings of the IEEE International Conference on Computer Vision, pp. 2425\u20132433 (2015)","DOI":"10.1109\/ICCV.2015.279"},{"key":"1881_CR3","doi-asserted-by":"crossref","unstructured":"Yan, F., Mikolajczyk, K.: Deep correlation for matching images and text. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 3441\u20133450 (2015)","DOI":"10.1109\/CVPR.2015.7298966"},{"key":"1881_CR4","unstructured":"Faghri, F., Fleet, D.J., Kiros, J.R., Fidler, S.: Vse++: Improving visual-semantic embeddings with hard negatives. In: British Machine Vision Conference (BMVC) (2018)"},{"issue":"2","key":"1881_CR5","doi-asserted-by":"publisher","first-page":"394","DOI":"10.1109\/TPAMI.2018.2797921","volume":"41","author":"L Wang","year":"2018","unstructured":"Wang, L., Li, Y., Huang, J., Lazebnik, S.: Learning two-branch neural networks for image-text matching tasks. IEEE Trans. Pattern Anal. Mach. Intell. 41(2), 394\u2013407 (2018)","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"1881_CR6","doi-asserted-by":"crossref","unstructured":"Sarafianos, N., Xu, X., Kakadiaris, I.A.: Adversarial representation learning for text-to-image matching. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 5814\u20135824 (2019)","DOI":"10.1109\/ICCV.2019.00591"},{"issue":"2","key":"1881_CR7","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/3383184","volume":"16","author":"Z Zheng","year":"2020","unstructured":"Zheng, Z., Zheng, L., Garrett, M., Yang, Y., Xu, M., Shen, Y.-D.: Dual-path convolutional image-text embeddings with instance loss. ACM Trans. Multimed. Comput. Commun. Appl. 16(2), 1\u201323 (2020)","journal-title":"ACM Trans. Multimed. Comput. Commun. Appl."},{"key":"1881_CR8","doi-asserted-by":"crossref","unstructured":"Chen, J., Hu, H., Wu, H., Jiang, Y., Wang, C.: Learning the best pooling strategy for visual semantic embedding. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 15789\u201315798 (2021)","DOI":"10.1109\/CVPR46437.2021.01553"},{"key":"1881_CR9","doi-asserted-by":"crossref","unstructured":"Pham, K., Huynh, C., Lim, S.-N., Shrivastava, A.: Composing object relations and attributes for image-text matching. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 14354\u201314363 (2024)","DOI":"10.1109\/CVPR52733.2024.01361"},{"key":"1881_CR10","doi-asserted-by":"crossref","unstructured":"Wang, Z., Yin, Y., Ramakrishnan, I.: Enhancing image-text matching with adaptive feature aggregation. In: ICASSP 2024-2024 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 8245\u20138249 (2024). IEEE","DOI":"10.1109\/ICASSP48485.2024.10446913"},{"key":"1881_CR11","doi-asserted-by":"crossref","unstructured":"Lee, K.-H., Chen, X., Hua, G., Hu, H., He, X.: Stacked cross attention for image-text matching. In: Proceedings of the European Conference on Computer Vision (ECCV), pp. 201\u2013216 (2018)","DOI":"10.1007\/978-3-030-01225-0_13"},{"key":"1881_CR12","doi-asserted-by":"crossref","unstructured":"Zhang, K., Mao, Z., Wang, Q., Zhang, Y.: Negative-aware attention framework for image-text matching. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 15661\u201315670 (2022)","DOI":"10.1109\/CVPR52688.2022.01521"},{"issue":"4","key":"1881_CR13","doi-asserted-by":"publisher","first-page":"1981","DOI":"10.1007\/s00530-023-01079-w","volume":"29","author":"L Yang","year":"2023","unstructured":"Yang, L., Feng, Y., Zhou, M., Xiong, X., Wang, Y., Qiang, B.: Multi-level network based on transformer encoder for fine-grained image-text matching. Multimed. Syst. 29(4), 1981\u20131994 (2023)","journal-title":"Multimed. Syst."},{"key":"1881_CR14","doi-asserted-by":"publisher","first-page":"2322","DOI":"10.1109\/TIP.2023.3266887","volume":"32","author":"H Diao","year":"2023","unstructured":"Diao, H., Zhang, Y., Liu, W., Ruan, X., Lu, H.: Plug-and-play regulators for image-text matching. IEEE Trans. Image Process. 32, 2322\u20132334 (2023)","journal-title":"IEEE Trans. Image Process."},{"key":"1881_CR15","doi-asserted-by":"crossref","unstructured":"Fu, Z., Mao, Z., Song, Y., Zhang, Y.: Learning semantic relationship among instances for image-text matching. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 15159\u201315168 (2023)","DOI":"10.1109\/CVPR52729.2023.01455"},{"key":"1881_CR16","doi-asserted-by":"crossref","unstructured":"Diao, H., Zhang, Y., Ma, L., Lu, H.: Similarity reasoning and filtration for image-text matching. In: Proceedings of the AAAI Conference on Artificial Intelligence, vol. 35, pp. 1218\u20131226 (2021)","DOI":"10.1609\/aaai.v35i2.16209"},{"key":"1881_CR17","doi-asserted-by":"crossref","unstructured":"Kim, D., Kim, N., Kwak, S.: Improving cross-modal retrieval with set of diverse embeddings. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 23422\u201323431 (2023)","DOI":"10.1109\/CVPR52729.2023.02243"},{"issue":"4","key":"1881_CR18","doi-asserted-by":"publisher","first-page":"179","DOI":"10.1007\/s00530-024-01383-z","volume":"30","author":"W Cheng","year":"2024","unstructured":"Cheng, W., Han, Z., He, D., Wu, L.: Multi-view and region reasoning semantic enhancement for image-text retrieval. Multimed. Syst. 30(4), 179 (2024)","journal-title":"Multimed. Syst."},{"issue":"3","key":"1881_CR19","doi-asserted-by":"publisher","first-page":"1057","DOI":"10.1007\/s00530-022-01038-x","volume":"29","author":"H Sun","year":"2023","unstructured":"Sun, H., Qin, X., Liu, X.: Image-text matching using multi-subspace joint representation. Multimed. Syst. 29(3), 1057\u20131071 (2023)","journal-title":"Multimed. Syst."},{"key":"1881_CR20","doi-asserted-by":"crossref","unstructured":"Bin, Y., Li, H., Xu, Y., Xu, X., Yang, Y., Shen, H.T.: Unifying two-stream encoders with transformers for cross-modal retrieval. In: Proceedings of the 31st ACM International Conference on Multimedia, pp. 3041\u20133050 (2023)","DOI":"10.1145\/3581783.3612427"},{"key":"1881_CR21","doi-asserted-by":"crossref","unstructured":"Li, H., Bin, Y., Liao, J., Yang, Y., Shen, H.T.: Your negative may not be true negative: Boosting image-text matching with false negative elimination. In: Proceedings of the 31st ACM International Conference on Multimedia, pp. 924\u2013934 (2023)","DOI":"10.1145\/3581783.3612101"},{"key":"1881_CR22","doi-asserted-by":"crossref","unstructured":"Liu, Y., Liu, M., Huang, S., Lv, J.: Asymmetric visual semantic embedding framework for efficient vision-language alignment. In: Proceedings of the AAAI Conference on Artificial Intelligence, vol. 39, pp. 5676\u20135684 (2025)","DOI":"10.1609\/aaai.v39i6.32605"},{"key":"1881_CR23","unstructured":"Kiros, R., Salakhutdinov, R., Zemel, R.S.: Unifying visual-semantic embeddings with multimodal neural language models. Computer Science (2014)"},{"key":"1881_CR24","first-page":"7","volume":"2","author":"Z Li","year":"2022","unstructured":"Li, Z., Guo, C., Feng, Z., Hwang, J.-N., Xue, X.: Multi-view visual semantic embedding. IJCAI 2, 7 (2022)","journal-title":"IJCAI"},{"key":"1881_CR25","doi-asserted-by":"publisher","DOI":"10.1016\/j.knosys.2024.111550","volume":"291","author":"Y Ding","year":"2024","unstructured":"Ding, Y., Yu, J., Lv, Q., Zhao, H., Dong, J., Li, Y.: Multiview adaptive attention pooling for image-text retrieval. Knowl.-Based Syst. 291, 111550 (2024)","journal-title":"Knowl.-Based Syst."},{"issue":"2","key":"1881_CR26","doi-asserted-by":"publisher","first-page":"569","DOI":"10.1007\/s00530-022-00962-2","volume":"29","author":"C Zhang","year":"2023","unstructured":"Zhang, C., Yang, Y., Guo, J., Jin, G., Song, D., Liu, A.A.: Improving text-image cross-modal retrieval with contrastive loss. Multimed. Syst. 29(2), 569\u2013575 (2023)","journal-title":"Multimed. Syst."},{"issue":"12","key":"1881_CR27","doi-asserted-by":"publisher","first-page":"8825","DOI":"10.1007\/s00371-024-03274-w","volume":"40","author":"F Wu","year":"2024","unstructured":"Wu, F., Wang, Q., Wang, Z., Yu, S., Li, Y., Zhang, B., Lim, E.G.: Itcontrast: contrastive learning with hard negative synthesis for image-text matching. Vis. Comput. 40(12), 8825\u20138838 (2024)","journal-title":"Vis. Comput."},{"key":"1881_CR28","doi-asserted-by":"crossref","unstructured":"Karpathy, A., Fei-Fei, L.: Deep visual-semantic alignments for generating image descriptions. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 3128\u20133137 (2015)","DOI":"10.1109\/CVPR.2015.7298932"},{"key":"1881_CR29","doi-asserted-by":"crossref","unstructured":"Chen, H., Ding, G., Liu, X., Lin, Z., Liu, J., Han, J.: Imram: Iterative matching with recurrent attention memory for cross-modal image-text retrieval. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 12655\u201312663 (2020)","DOI":"10.1109\/CVPR42600.2020.01267"},{"issue":"1","key":"1881_CR30","doi-asserted-by":"publisher","DOI":"10.1016\/j.ipm.2023.103575","volume":"61","author":"X Qin","year":"2024","unstructured":"Qin, X., Li, L., Hao, F., Ge, M., Pang, G.: Multi-level knowledge-driven feature representation and triplet loss optimization network for image-text retrieval. Inform. Process. Manage. 61(1), 103575 (2024)","journal-title":"Inform. Process. Manage."},{"issue":"2","key":"1881_CR31","doi-asserted-by":"publisher","DOI":"10.1016\/j.ipm.2024.103968","volume":"62","author":"A-A Liu","year":"2025","unstructured":"Liu, A.-A., Yang, L., Li, W., Nie, W., Liu, X., Chen, H.: Multi-level semantics probability embedding for image-text matching. Inform. Process. Manage. 62(2), 103968 (2025)","journal-title":"Inform. Process. Manage."},{"key":"1881_CR32","doi-asserted-by":"publisher","DOI":"10.1016\/j.engappai.2024.108005","volume":"133","author":"T Yao","year":"2024","unstructured":"Yao, T., Peng, S., Sun, Y., Sheng, G., Fu, H., Kong, X.: Cross-modal semantic interference suppression for image-text matching. Eng. Appl. Artif. Intell. 133, 108005 (2024)","journal-title":"Eng. Appl. Artif. Intell."},{"key":"1881_CR33","doi-asserted-by":"crossref","unstructured":"Zhang, X., Li, H., Ye, M.: Negative pre-aware for noisy cross-modal matching. In: Proceedings of the AAAI Conference on Artificial Intelligence, vol. 38, pp. 7341\u20137349 (2024)","DOI":"10.1609\/aaai.v38i7.28564"},{"key":"1881_CR34","doi-asserted-by":"crossref","unstructured":"Pan, Z., Wu, F., Zhang, B.: Fine-grained image-text matching by cross-modal hard aligning network. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 19275\u201319284 (2023)","DOI":"10.1109\/CVPR52729.2023.01847"},{"key":"1881_CR35","unstructured":"Devlin, J., Chang, M.-W., Lee, K., Toutanova, K.: Bert: Pre-training of deep bidirectional transformers for language understanding. In: Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (long and Short Papers), pp. 4171\u20134186 (2019)"},{"key":"1881_CR36","unstructured":"Vaswani, A.: Attention is all you need. Advances in Neural Information Processing Systems (2017)"},{"key":"1881_CR37","doi-asserted-by":"crossref","unstructured":"Tsai, Y.-H.H., Bai, S., Liang, P.P., Kolter, J.Z., Morency, L.-P., Salakhutdinov, R.: Multimodal transformer for unaligned multimodal language sequences. In: Proceedings of the Conference. Association for Computational Linguistics. Meeting, vol. 2019, p. 6558 (2019). NIH Public Access","DOI":"10.18653\/v1\/P19-1656"},{"key":"1881_CR38","unstructured":"Dosovitskiy, A., Beyer, L., Kolesnikov, A., Weissenborn, D., Zhai, X., Unterthiner, T., Dehghani, M., Minderer, M., Heigold, G., Gelly, S., et al.: An image is worth 16x16 words: Transformers for image recognition at scale. In: International Conference on Learning Representations (ICLR) (2021)"},{"key":"1881_CR39","doi-asserted-by":"crossref","unstructured":"Liu, Z., Lin, Y., Cao, Y., Hu, H., Wei, Y., Zhang, Z., Lin, S., Guo, B.: Swin transformer: Hierarchical vision transformer using shifted windows. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 10012\u201310022 (2021)","DOI":"10.1109\/ICCV48922.2021.00986"},{"key":"1881_CR40","unstructured":"Liang, Y., Ge, C., Tong, Z., Song, Y., Xie, P., et al.: Not all patches are what you need: Expediting vision transformers via token reorganizations. In: International Conference on Learning Representations (ICLR) (2022)"},{"key":"1881_CR41","doi-asserted-by":"crossref","unstructured":"Xu, Y., Zhang, Z., Zhang, M., Sheng, K., Li, K., Dong, W., Zhang, L., Xu, C., Sun, X.: Evo-vit: Slow-fast token evolution for dynamic vision transformer. In: Proceedings of the AAAI Conference on Artificial Intelligence, vol. 36, pp. 2964\u20132972 (2022)","DOI":"10.1609\/aaai.v36i3.20202"},{"issue":"9","key":"1881_CR42","doi-asserted-by":"publisher","first-page":"6437","DOI":"10.1109\/TCSVT.2022.3164230","volume":"32","author":"X Dong","year":"2022","unstructured":"Dong, X., Zhang, H., Zhu, L., Nie, L., Liu, L.: Hierarchical feature aggregation based on transformer for image-text matching. IEEE Trans. Circuits Syst. Video Technol. 32(9), 6437\u20136447 (2022)","journal-title":"IEEE Trans. Circuits Syst. Video Technol."},{"issue":"4","key":"1881_CR43","doi-asserted-by":"publisher","DOI":"10.1016\/j.ipm.2024.103716","volume":"61","author":"X Ge","year":"2024","unstructured":"Ge, X., Xu, S., Chen, F., Wang, J., Wang, G., An, S., Jose, J.M.: 3shnet: Boosting image-sentence retrieval via visual semantic-spatial self-highlighting. Inform. Process. Manage. 61(4), 103716 (2024)","journal-title":"Inform. Process. Manage."},{"key":"1881_CR44","doi-asserted-by":"crossref","unstructured":"Plummer, B.A., Wang, L., Cervantes, C.M., Caicedo, J.C., Hockenmaier, J., Lazebnik, S.: Flickr30k entities: Collecting region-to-phrase correspondences for richer image-to-sentence models. In: Proceedings of the IEEE International Conference on Computer Vision, pp. 2641\u20132649 (2015)","DOI":"10.1109\/ICCV.2015.303"},{"key":"1881_CR45","doi-asserted-by":"crossref","unstructured":"Lin, T.-Y., Maire, M., Belongie, S., Hays, J., Perona, P., Ramanan, D., Doll\u00e1r, P., Zitnick, C.L.: Microsoft coco: Common objects in context. In: Computer Vision\u2013ECCV 2014: 13th European Conference, Zurich, Switzerland, September 6-12, 2014, Proceedings, Part V 13, pp. 740\u2013755 (2014). Springer","DOI":"10.1007\/978-3-319-10602-1_48"},{"key":"1881_CR46","unstructured":"Loshchilov, I., Hutter, F.: Decoupled weight decay regularization. In: International Conference on Learning Representations (ICLR) (2019)"},{"issue":"1","key":"1881_CR47","doi-asserted-by":"publisher","first-page":"641","DOI":"10.1109\/TPAMI.2022.3148470","volume":"45","author":"K Li","year":"2022","unstructured":"Li, K., Zhang, Y., Li, K., Li, Y., Fu, Y.: Image-text embedding learning via visual and textual semantic reasoning. IEEE Trans. Pattern Anal. Mach. Intell. 45(1), 641\u2013656 (2022)","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"issue":"4","key":"1881_CR48","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/3451390","volume":"17","author":"N Messina","year":"2021","unstructured":"Messina, N., Amato, G., Esuli, A., Falchi, F., Gennaro, C., Marchand-Maillet, S.: Fine-grained visual textual alignment for cross-modal retrieval using transformer encoders. ACM Trans. Multimed. Comput. Commun. Appl. 17(4), 1\u201323 (2021)","journal-title":"ACM Trans. Multimed. Comput. Commun. Appl."},{"key":"1881_CR49","doi-asserted-by":"publisher","first-page":"3622","DOI":"10.1109\/TIP.2023.3286710","volume":"32","author":"C Liu","year":"2023","unstructured":"Liu, C., Zhang, Y., Wang, H., Chen, W., Wang, F., Huang, Y., Shen, Y.-D., Wang, L.: Efficient token-guided image-text retrieval with consistent multimodal contrastive training. IEEE Trans. Image Process. 32, 3622\u20133633 (2023)","journal-title":"IEEE Trans. Image Process."}],"container-title":["Multimedia Systems"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s00530-025-01881-8.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s00530-025-01881-8\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s00530-025-01881-8.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,9,15]],"date-time":"2025-09-15T09:02:19Z","timestamp":1757926939000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s00530-025-01881-8"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,7,7]]},"references-count":49,"journal-issue":{"issue":"4","published-print":{"date-parts":[[2025,8]]}},"alternative-id":["1881"],"URL":"https:\/\/doi.org\/10.1007\/s00530-025-01881-8","relation":{},"ISSN":["0942-4962","1432-1882"],"issn-type":[{"type":"print","value":"0942-4962"},{"type":"electronic","value":"1432-1882"}],"subject":[],"published":{"date-parts":[[2025,7,7]]},"assertion":[{"value":"22 October 2024","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"27 May 2025","order":2,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"7 July 2025","order":3,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors declare that they have no confict of insterest.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}}],"article-number":"300"}}