{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,2]],"date-time":"2026-03-02T13:54:34Z","timestamp":1772459674202,"version":"3.50.1"},"reference-count":55,"publisher":"Springer Science and Business Media LLC","issue":"3","license":[{"start":{"date-parts":[[2026,3,1]],"date-time":"2026-03-01T00:00:00Z","timestamp":1772323200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2026,3,1]],"date-time":"2026-03-01T00:00:00Z","timestamp":1772323200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["U24A201401"],"award-info":[{"award-number":["U24A201401"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Mach Learn"],"published-print":{"date-parts":[[2026,3]]},"DOI":"10.1007\/s10994-025-06969-w","type":"journal-article","created":{"date-parts":[[2026,3,2]],"date-time":"2026-03-02T12:57:18Z","timestamp":1772456238000},"update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["W2S: Weak-to-Strong Prompt Correction for Large Language Models"],"prefix":"10.1007","volume":"115","author":[{"given":"Lirong","family":"Gao","sequence":"first","affiliation":[]},{"given":"Xinyi","family":"Wang","sequence":"additional","affiliation":[]},{"given":"Hao","family":"Chen","sequence":"additional","affiliation":[]},{"given":"Ru","family":"Peng","sequence":"additional","affiliation":[]},{"given":"Qi","family":"Zhang","sequence":"additional","affiliation":[]},{"given":"Yiming","family":"Zhang","sequence":"additional","affiliation":[]},{"given":"Wentao","family":"Ye","sequence":"additional","affiliation":[]},{"given":"Haoze","family":"Li","sequence":"additional","affiliation":[]},{"given":"Haobo","family":"Wang","sequence":"additional","affiliation":[]},{"given":"Junbo","family":"Zhao","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2026,3,2]]},"reference":[{"key":"6969_CR1","unstructured":"Anthropic. (2024). Claude 3.5 Sonnet. https:\/\/www.anthropic.com\/claude\/sonnet"},{"key":"6969_CR2","unstructured":"Banerjee, S., & Lavie, A. (2005). Meteor: An automatic metric for MT evaluation with improved correlation with human judgments. In: Proceedings of the ACL workshop on intrinsic and extrinsic evaluation measures for machine translation and\/or summarization (pp. 65\u201372)"},{"key":"6969_CR3","first-page":"1877","volume":"33","author":"T Brown","year":"2020","unstructured":"Brown, T. B., Mann, B., Ryder, N., Subbiah, M., Kaplan, J., Dhariwal, P., Neelakantan, A., Shyam, P., Sastry, G., Askell, A., Agarwal, S., Herbert-Voss, A., Krueger, G., Henighan, T., Child, R., Ramesh, A., Ziegler, D. M., Wu, J., Winter, C., Hesse, C., Chen, M., Sigler, E., Litwin, M., Gray, S., Chess, B., Clark, J., Berner, C., McCandlish, S., Radford, A., Sutskever, I., Amodei, D.(2020). Language models are few-shot learners. Advances in Neural Information Processing Systems,33, 1877\u20131901.","journal-title":"Advances in Neural Information Processing Systems"},{"key":"6969_CR5","unstructured":"Burns, C., Izmailov, P., Kirchner, J.H., Baker, B., Gao, L., Aschenbrenner, L., Chen, Y., Ecoffet, A., Joglekar, M., Leike, J., Sutskever, I., & Wu, J. (2024). Weak-to-strong generalization: Eliciting strong capabilities with weak supervision. In: Salakhutdinov, R., Kolter, Z., Heller, K., Weller, A., Oliver, N., Scarlett, J., Berkenkamp, F. (eds.) Proceedings of the 41st international conference on machine learning. Proceedings of machine learning research (Vol. 235, pp. 4971\u20135012)."},{"key":"6969_CR6","unstructured":"Castro Ferreira, T., Gardent, C., Ilinykh, N., Lee, C., Mille, S., Moussallem, D., & Shimorina, A. (2020). In T. Castro Ferreira, C. Gardent, N. Ilinykh, C. Lee, S. Mille, D. Moussallem, & A. Shimorina (Eds.), Proceedings of the 3rd International Workshop on Natural Language Generation from the Semantic Web (WebNLG+). The 2020 bilingual, bi-directional WebNLG+ shared task: Overview and evaluation results (WebNLG+ 2020) (pp. 55\u201376). Association for Computational Linguistics, Dublin, Ireland (Virtual)."},{"key":"6969_CR7","unstructured":"Chen, L., Chen, J., Goldstein, T., Huang, H., & Zhou, T. (2024). Instructzero: Efficient instruction optimization for black-box large language models. In Forty-first international conference on machine learning"},{"key":"6969_CR8","doi-asserted-by":"crossref","unstructured":"Chen, Y., Wen, Z., Fan, G., Chen, Z., Wu, W., Liu, D., Li, Z., Liu, B., & Xiao, Y. (2024). Mapo: Boosting large language model performance with model-adaptive prompt optimization. arXiv preprint arXiv:2407.04118","DOI":"10.18653\/v1\/2023.findings-emnlp.215"},{"key":"6969_CR9","doi-asserted-by":"crossref","unstructured":"Cheng, J., Liu, X., Zheng, K., Ke, P., Wang, H., Dong, Y., Tang, J., & Huang, M. (2023). Black-box prompt optimization: Aligning large language models without model training. arXiv preprint arXiv:2311.04155","DOI":"10.18653\/v1\/2024.acl-long.176"},{"key":"6969_CR10","doi-asserted-by":"crossref","unstructured":"Deng, M., Wang, J., Hsieh, C.-P., Wang, Y., Guo, H., Shu, T., Song, M., Xing, E., & Hu, Z. (2022). RLPrompt: Optimizing discrete text prompts with reinforcement learning. In Proceedings of the 2022 conference on empirical methods in natural language processing (pp. 3369\u20133391)","DOI":"10.18653\/v1\/2022.emnlp-main.222"},{"key":"6969_CR11","doi-asserted-by":"crossref","unstructured":"Du\u0161ek, O., Howcroft, D. M., & Rieser, V. (2019). Semantic noise matters for neural natural language generation. In Proceedings of the 12th international conference on natural language generation (pp. 421\u2013426)","DOI":"10.18653\/v1\/W19-8652"},{"key":"6969_CR12","doi-asserted-by":"crossref","unstructured":"Gao, L., Peng, R., Zhang, Y., & Zhao, J. (2024). DORY: Deliberative prompt recovery for LLM. In L.-W. Ku, A. Martins, & V. Srikumar (Eds.), Findings of the association for computational linguistics ACL 2024 (pp. 10614\u201310632). Association for Computational Linguistics, Bangkok, Thailand and virtual meeting.","DOI":"10.18653\/v1\/2024.findings-acl.631"},{"key":"6969_CR14","unstructured":"Goyal, T., Li, J. J., & Durrett, G. (2022). News summarization and evaluation in the era of gpt-3. arXiv preprint arXiv:2209.12356"},{"key":"6969_CR15","unstructured":"Guo, Q., Wang, R., Guo, J., Li, B., Song, K., Tan, X., Liu, G., Bian, J., & Yang, Y. (2024). Connecting large language models with evolutionary algorithms yields powerful prompt optimizers. In The twelfth international conference on learning representations"},{"key":"6969_CR16","first-page":"86309","volume":"37","author":"W Hu","year":"2024","unstructured":"Hu, W., Shu, Y., Yu, Z., Wu, Z., Lin, X., Dai, Z., Ng, S.-K., & Low, B. K. H. (2024). Localized zeroth-order prompt optimization. Advances in Neural Information Processing Systems,37, 86309\u201386345.","journal-title":"Advances in Neural Information Processing Systems"},{"key":"6969_CR17","unstructured":"Ji, J., Chen, B., Lou, H., Hong, D., Zhang, B., Pan, X., Dai, J., & Yang, Y. (2024). Aligner: Achieving efficient alignment through weak-to-strong correction. arXiv preprint arXiv:2402.02416"},{"key":"6969_CR18","doi-asserted-by":"crossref","unstructured":"Ke, P., Ji, H., Ran, Y., Cui, X., Wang, L., Song, L., Zhu, X., & Huang, M. (2021). JointGT: Graph-text joint representation learning for text generation from knowledge graphs. In: Zong, C., Xia, F., Li, W., Navigli, R. (eds.) Findings of the association for computational Linguistics: ACL-IJCNLP 2021 (pp. 2526\u20132538). Association for Computational Linguistics.","DOI":"10.18653\/v1\/2021.findings-acl.223"},{"key":"6969_CR19","doi-asserted-by":"crossref","unstructured":"Kim, H., Yi, X., Yao, J., Lian, J., Huang, M., Duan, S., Bak, J., & Xie, X. (2024). The road to artificial superintelligence: A comprehensive survey of superalignment. arXiv preprint arXiv:2412.16468","DOI":"10.70777\/si.v2i1.13963"},{"key":"6969_CR20","doi-asserted-by":"crossref","unstructured":"Kong, W., Hombaiah, S., Zhang, M., Mei, Q., & Bendersky, M. (2024). Prewrite: Prompt rewriting with reinforcement learning. In Proceedings of the 62nd annual meeting of the association for computational linguistics (Volume 2: Short Papers) (pp. 594\u2013601)","DOI":"10.18653\/v1\/2024.acl-short.54"},{"key":"6969_CR21","doi-asserted-by":"publisher","first-page":"46837","DOI":"10.52202\/079017-1486","volume":"37","author":"H Lang","year":"2024","unstructured":"Lang, H., Sontag, D., & Vijayaraghavan, A. (2024). Theoretical analysis of weak-to-strong generalization. Advances in Neural Information Processing Systems,37, 46837\u201346880.","journal-title":"Advances in Neural Information Processing Systems"},{"key":"6969_CR22","unstructured":"Li, Z., Peng, B., He, P., Galley, M., Gao, J., & Yan, X. (2023). Guiding large language models via directional stimulus prompting. Advances in Neural Information Processing Systems, 36"},{"key":"6969_CR23","unstructured":"Li, X., Yu, P., Zhou, C., Schick, T., Levy, O., Zettlemoyer, L., Weston, J. E., & Lewis, M. (2024). Self-alignment with instruction backtranslation. In The twelfth international conference on learning representations"},{"key":"6969_CR24","doi-asserted-by":"crossref","unstructured":"Li, M., Zhang, Y., He, S., Li, Z., Zhao, H., Wang, J., Cheng, N., & Zhou, T. (2024). Superfiltering: Weak-to-strong data filtering for fast instruction-tuning. In Proceedings of the 62nd annual meeting of the association for computational linguistics (Volume 1: Long Papers) (pp. 14255\u201314273).","DOI":"10.18653\/v1\/2024.acl-long.769"},{"key":"6969_CR25","unstructured":"Liang, J., Jiang, T., Wang, Y., Zhu, R., Ma, F., & Wang, T. (2025). Autoran: Weak-to-strong jailbreaking of large reasoning models. arXiv preprint arXiv:2505.10846"},{"key":"6969_CR26","unstructured":"Lin, C.-Y. (2004). ROUGE: A package for automatic evaluation of summaries. In Text summarization branches out (pp. 74\u201381). Association for Computational Linguistics."},{"key":"6969_CR27","unstructured":"Lin, X., Dai, Z., Verma, A., Ng, S.-K., Jaillet, P., Low, B. K. H. (2024). Prompt optimization with human feedback. In ICML 2024 workshop on models of human feedback for AI alignment"},{"key":"6969_CR28","unstructured":"Liu, C., Chao, Q., Zhang, W., Wu, X., Li, B., Luu, A. T., & Bing, L. (2024). Zero-to-strong generalization: Eliciting strong capabilities of large language models iteratively without gold labels. arXiv preprint arXiv:2409.12425"},{"key":"6969_CR29","unstructured":"Meta. (2024). The llama 3 herd of models. arXiv preprint arXiv:2407.21783"},{"key":"6969_CR30","doi-asserted-by":"crossref","unstructured":"Nallapati, R., Zhou, B., Santos, C., G\u00fcl\u00e7ehre, \u00c7., & Xiang, B. (2016). Abstractive text summarization using sequence-to-sequence rnns and beyond. In Proceedings of the 20th SIGNLL conference on computational natural language learning (pp. 280\u2013290)","DOI":"10.18653\/v1\/K16-1028"},{"key":"6969_CR31","unstructured":"OpenAI. (2023). Introducing Superalignment. https:\/\/openai.com\/blog\/introducing- superalignment"},{"key":"6969_CR32","unstructured":"OpenAI. (2024). Introducing OpenAI o1-preview . https:\/\/openai.com\/index\/introducing-openai-o1-preview"},{"key":"6969_CR33","doi-asserted-by":"crossref","unstructured":"Papineni, K., Roukos, S., Ward, T., & Zhu, W.-J. (2002). Bleu: a method for automatic evaluation of machine translation. In Proceedings of the 40th annual meeting of the association for computational linguistics (pp. 311\u2013318). Association for Computational Linguistics.","DOI":"10.3115\/1073083.1073135"},{"key":"6969_CR34","doi-asserted-by":"crossref","unstructured":"Prasad, A., Hase, P., Zhou, X., & Bansal, M. (2023). Grips: Gradient-free, edit-based instruction search for prompting large language models. In Proceedings of the 17th conference of the European chapter of the association for computational linguistics (pp. 3845\u20133864)","DOI":"10.18653\/v1\/2023.eacl-main.277"},{"key":"6969_CR35","doi-asserted-by":"crossref","unstructured":"Pryzant, R., Iter, D., Li, J., Lee, Y., Zhu, C., & Zeng, M. (2023). Automatic prompt optimization with \u201cgradient descent\u201d and beam search. In Proceedings of the 2023 conference on empirical methods in natural language processing (pp. 7957\u20137968)","DOI":"10.18653\/v1\/2023.emnlp-main.494"},{"key":"6969_CR36","unstructured":"Rafailov, R., Sharma, A., Mitchell, E., Manning, C.D., Ermon, S., & Finn, C. (2024). Direct preference optimization: Your language model is secretly a reward model. Advances in Neural Information Processing Systems36"},{"issue":"140","key":"6969_CR37","first-page":"1","volume":"21","author":"C Raffel","year":"2020","unstructured":"Raffel, C., Shazeer, N., Roberts, A., Lee, K., Narang, S., Matena, M., Zhou, Y., Li, W., & Liu, P. J. (2020). Exploring the limits of transfer learning with a unified text-to-text transformer. Journal of Machine Learning Research,21(140), 1\u201367.","journal-title":"Journal of Machine Learning Research"},{"key":"6969_CR38","unstructured":"Somerstep, S., Polo, F. M., Banerjee, M., Ritov, Y., Yurochkin, M., & Sun, Y. (2025). A transfer learning framework for weak to strong generalization. In The thirteenth international conference on learning representations"},{"key":"6969_CR39","unstructured":"Sun, H., H\u00fcy\u00fck, A., & Schaar, M. (2023). Query-dependent prompt evaluation and optimization with offline inverse RL. In The twelfth international conference on learning representations"},{"key":"6969_CR40","unstructured":"Sun, H., H\u00fcy\u00fck, A., & Schaar, M. (2024). Query-dependent prompt evaluation and optimization with offline inverse RL. In The twelfth international conference on learning representations"},{"key":"6969_CR41","unstructured":"Sun, Z., Yu, L., Shen, Y., Liu, W., Yang, Y., Welleck, S., & Gan, C. (2024). Easy-to-hard generalization: Scalable alignment beyond human supervision. arXiv preprint arXiv:2403.09472"},{"key":"6969_CR42","doi-asserted-by":"crossref","unstructured":"Suzgun, M., Melas-Kyriazi, L., & Jurafsky, D. (2023). Follow the wisdom of the crowd: Effective text generation via minimum Bayes risk decoding. In: Findings of the association for computational linguistics: ACL 2023 (pp. 4265\u20134293)","DOI":"10.18653\/v1\/2023.findings-acl.262"},{"key":"6969_CR43","doi-asserted-by":"crossref","unstructured":"Tang, X., Wang, X., Zhao, W. X., Lu, S., Li, Y., & Wen, J.-R. (2025). Unleashing the potential of large language models as prompt optimizers: Analogical analysis with gradient-based model optimizers. In Proceedings of the AAAI conference on artificial intelligence (Vol. 39, pp. 25264\u201325272).","DOI":"10.1609\/aaai.v39i24.34713"},{"key":"6969_CR44","unstructured":"Wang, R., An, S., Cheng, M., Zhou, T., Hwang, S. J., & Hsieh, C.-J. (2024). One prompt is not enough: Automated construction of a mixture-of-expert prompts. arXiv preprint arXiv:2407.00256"},{"key":"6969_CR45","doi-asserted-by":"crossref","unstructured":"Wang, Y., Kordi, Y., Mishra, S., Liu, A., Smith, N. A., Khashabi, D., & Hajishirzi, H. (2023). Self-instruct: Aligning language models with self-generated instructions. In Proceedings of the 61st annual meeting of the association for computational linguistics (Volume 1: Long Papers) (pp. 13484\u201313508)","DOI":"10.18653\/v1\/2023.acl-long.754"},{"key":"6969_CR46","first-page":"24824","volume":"35","author":"J Wei","year":"2022","unstructured":"Wei, J., Wang, X., Schuurmans, D., Bosma, M., Ichter, B., Xia, F., Chi, E. H., Le, Q. V., Zhou, D. (2022). Chain-of-thought prompting elicits reasoning in large language models. Advances in Neural Information Processing Systems,35, 24824\u201324837.","journal-title":"Advances in Neural Information Processing Systems"},{"key":"6969_CR47","doi-asserted-by":"crossref","unstructured":"Wu, Z., Wang, S., Gu, J., Hou, R., Dong, Y., Vydiswaran, V. G. V., & Ma, H. (2022). IDPG: An instance-dependent prompt generation method. In: Carpuat, M., Marneffe, M.-C., Meza Ruiz, I. V. (eds.) Proceedings of the 2022 conference of the North American chapter of the association for computational linguistics: Human language technologies (pp. 5507\u20135521). Association for Computational Linguistics, Seattle, United States (2022)","DOI":"10.18653\/v1\/2022.naacl-main.403"},{"key":"6969_CR48","unstructured":"Xu, C., Sun, Q., Zheng, K., Geng, X., Zhao, P., Feng, J., Tao, C., & Jiang, D. (2023). Wizardlm: Empowering large language models to follow complex instructions. arXiv preprint arXiv:2304.12244"},{"key":"6969_CR49","doi-asserted-by":"crossref","unstructured":"Yang, Y., Ma, Y., & Liu, P.(2024). Weak-to-strong reasoning. In Findings of the association for computational linguistics: EMNLP 2024 (pp. 8350\u20138367)","DOI":"10.18653\/v1\/2024.findings-emnlp.490"},{"key":"6969_CR50","doi-asserted-by":"publisher","unstructured":"Yang, A.,Yang, B.,Hui, B.,Zheng, B.,Yu, B.,Zhou, C.,Li, C.,Li, C.,Liu, D.,Huang, F.,Dong, G.,Wei, H.,Lin, H.,Tang, J.,Wang, J.,Yang, J.,Tu, J.,Zhang, J.,Ma, J.,Yang, J.,Xu, J.,Zhou, J.,Bai, J.,He, J.,Lin, J.,Dang, K.,Lu, K.,Chen, K.,Yang, K.,Li, M.,Xue, M.,Ni, N.,Zhang, P.,Wang, P.,Peng, R.,Men, R.,Gao, R.,Lin, R.,Wang, S.,Bai, S.,Tan, S.,Zhu, T.,Li, T.,Liu, T.,Ge, W.,Deng, X.,Zhou, X.,Ren, X.,Zhang, X.,Wei, X.,Ren, X.,Liu, X.,Fan, Y.,Yao, Y.,Zhang, Y.,Wan, Y.,Chu, Y.,Liu, Y.,Cui, Z.,Zhang, Z.,Guo, Z.,Fan, Z. (2024). Qwen2 technical report. arXiv preprint https:\/\/doi.org\/10.48550\/arXiv.2407.10671","DOI":"10.48550\/arXiv.2407.10671"},{"key":"6969_CR51","unstructured":"Yu, J., He, R., & Ying, Z. (2024). Thought propagation: An analogical approach to complex reasoning with large language models. In The twelfth international conference on learning representations"},{"key":"6969_CR13","doi-asserted-by":"publisher","unstructured":"Zeng, A., Xu, B., Wang, B., Zhang, C., Yin, D., Rojas, D., Feng, G., Zhao, H., Lai, H., Yu, H., Wang, H., Sun, J., Zhang, J., Cheng, J., Gui, J., Tang, J., Zhang, J., Li, J., Zhao, L., Wu, L., Zhong, L., Liu, M., Huang, M., Zhang, P., Zheng, Q., Lu, R., Duan, S., Zhang, S., Cao, S., Yang, S., Tam, W. L., Zhao, W., Liu, X., Xia, X., Zhang, X., Gu, X., Lv, X., Liu, X., Liu, X., Yang, X., Song, X., Zhang, X., An, Y., Xu, Y., Niu, Y., Yang, Y., Li, Y., Bai, Y., Dong, Y., Qi, Z., Wang, Z., Yang, Z., Du, Z., Hou, Z., Wang, Z.(2024). ChatGLM: A family of large language models from GLM-130B to GLM-4 all tools. arXiv preprint https:\/\/doi.org\/10.48550\/arXiv.2406.12793","DOI":"10.48550\/arXiv.2406.12793"},{"key":"6969_CR52","unstructured":"Zhang, T., Kishore, V., Wu, F., Weinberger, K.Q., & Artzi, Y. (2020). BERTScore: Evaluating text generation with BERT. In International conference on learning representations"},{"key":"6969_CR53","unstructured":"Zhang, T., Wang, X., Zhou, D., Schuurmans, D., Gonzalez, J.E., & Tempera. (2023). Test-time prompt editing via reinforcement learning. In The eleventh international conference on learning representations"},{"key":"6969_CR54","unstructured":"Zhao, X., Yang, X., Pang, T., Du, C., Li, L., Wang, Y.-X., & Wang, W. Y. (2024). Weak-to-strong jailbreaking on large language models. In ICML 2024 next generation of AI safety workshop"},{"key":"6969_CR55","unstructured":"Zhou, Y., Muresanu, A. I., Han, Z., Paster, K., Pitis, S., Chan, H., & Ba, J. (2023). Large language models are human-level prompt engineers. In The eleventh international conference on learning representations"},{"key":"6969_CR56","unstructured":"Zhou, Y., Shen, J., & Cheng, Y. (2025). Weak to strong generalization for large language models with multi-capabilities. In The thirteenth international conference on learning representations"}],"container-title":["Machine Learning"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s10994-025-06969-w.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s10994-025-06969-w","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s10994-025-06969-w.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,3,2]],"date-time":"2026-03-02T12:57:51Z","timestamp":1772456271000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s10994-025-06969-w"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2026,3]]},"references-count":55,"journal-issue":{"issue":"3","published-print":{"date-parts":[[2026,3]]}},"alternative-id":["6969"],"URL":"https:\/\/doi.org\/10.1007\/s10994-025-06969-w","relation":{},"ISSN":["0885-6125","1573-0565"],"issn-type":[{"value":"0885-6125","type":"print"},{"value":"1573-0565","type":"electronic"}],"subject":[],"published":{"date-parts":[[2026,3]]},"assertion":[{"value":"1 June 2025","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"23 September 2025","order":2,"name":"revised","label":"Revised","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"18 December 2025","order":3,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"2 March 2026","order":4,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors declare no Conflict of interest.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}}],"article-number":"47"}}