{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,9,11]],"date-time":"2025-09-11T20:38:06Z","timestamp":1757623086403,"version":"3.44.0"},"reference-count":56,"publisher":"Springer Science and Business Media LLC","issue":"9","license":[{"start":{"date-parts":[[2025,8,14]],"date-time":"2025-08-14T00:00:00Z","timestamp":1755129600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2025,8,14]],"date-time":"2025-08-14T00:00:00Z","timestamp":1755129600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Mach Learn"],"published-print":{"date-parts":[[2025,9]]},"DOI":"10.1007\/s10994-025-06833-x","type":"journal-article","created":{"date-parts":[[2025,8,14]],"date-time":"2025-08-14T19:48:05Z","timestamp":1755200885000},"update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["Raising the numbers: multi-generation adversarial attack and frequency-based defense for heightened NLP security"],"prefix":"10.1007","volume":"114","author":[{"given":"Salim","family":"Khemis","sequence":"first","affiliation":[]},{"given":"Yacine","family":"Amara","sequence":"additional","affiliation":[]},{"given":"Mohamed Akrem","family":"Benatia","sequence":"additional","affiliation":[]},{"given":"Ishak","family":"Messalti","sequence":"additional","affiliation":[]},{"given":"Mohammed Elamin Ilyas","family":"Khanous","sequence":"additional","affiliation":[]},{"given":"Bernard","family":"De Baets","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2025,8,14]]},"reference":[{"issue":"8","key":"6833_CR1","doi-asserted-by":"publisher","first-page":"5789","DOI":"10.1007\/s10462-021-09958-2","volume":"54","author":"FA Acheampong","year":"2021","unstructured":"Acheampong, F. A., Nunoo-Mensah, H., & Chen, W. (2021). Transformer models for text-based emotion detection: A review of Bert-based approaches. Artificial Intelligence Review, 54(8), 5789\u20135829.","journal-title":"Artificial Intelligence Review"},{"key":"6833_CR2","doi-asserted-by":"crossref","unstructured":"Alzantot, M., Sharma, Y., Elgohary, A., Ho, B., Srivastava, M. B., & Chang, K.-.W. (2018). Generating natural language adversarial examples. In 2018 Conference on empirical methods in natural language processing (EMNLP) (pp. 2890\u20132896). ISBN 978-1-948087-84-1.","DOI":"10.18653\/v1\/D18-1316"},{"key":"6833_CR3","unstructured":"Anderson, M., Bartolo, A., Tandon, P., Bartolo, T. (2017). Crafting adversarial attacks on recurrent neural networks. https:\/\/api.semanticscholar.org\/CorpusID:34561505."},{"key":"6833_CR4","doi-asserted-by":"publisher","unstructured":"Bao, R., Wang, J., & Zhao, H. (2021). Defending pre-trained language models from adversarial word substitution without performance sacrifice. In Findings of the association for computational linguistics: ACL-IJCNLP 2021. https:\/\/doi.org\/10.18653\/v1\/2021.findings-acl.287. https:\/\/aclanthology.org\/2021.findings-acl.287.","DOI":"10.18653\/v1\/2021.findings-acl.287"},{"key":"6833_CR6","doi-asserted-by":"crossref","unstructured":"Biggio, B., Corona, I., Maiorca, D., Nelson, B., \u0160rndi\u0107, N., Laskov, P., Giacinto, G., & Roli, F. (2013). Evasion attacks against machine learning at test time. In Machine learning and knowledge discovery in databases (pp. 387\u2013402). Berlin: Springer.","DOI":"10.1007\/978-3-642-40994-3_25"},{"key":"6833_CR5","doi-asserted-by":"publisher","first-page":"317","DOI":"10.1016\/j.patcog.2018.07.023","volume":"84","author":"B Biggio","year":"2018","unstructured":"Biggio, B., & Roli, F. (2018). Wild patterns: Ten years after the rise of adversarial machine learning. Pattern Recognition, 84, 317\u2013331. https:\/\/doi.org\/10.1016\/j.patcog.2018.07.023. ISSN 0031-3203.","journal-title":"Pattern Recognition"},{"key":"6833_CR7","doi-asserted-by":"crossref","unstructured":"Cer, D., Yang, Y., Kong, S., Hua, N., Limtiaco, N., St. John, R., Constant, N., Guajardo-Cespedes, M., Yuan, S., Tar, C., et\u00a0al. (2018). Universal sentence encoder for English. In Proceedings of the 2018 conference on empirical methods in natural language processing: system demonstrations (pp. 169\u2013174).","DOI":"10.18653\/v1\/D18-2029"},{"key":"6833_CR8","doi-asserted-by":"crossref","unstructured":"Ebrahimi, J., Rao, A., Lowd, D., & Dou, D. (2018). Hotflip: White-box adversarial examples for text classification. In Proceedings of the 56th annual meeting of the association for computational linguistics (Vol. 2, pp. 31\u201336). ISBN 978-1-948087-34-6.","DOI":"10.18653\/v1\/P18-2006"},{"key":"6833_CR9","doi-asserted-by":"crossref","unstructured":"Fu, X., Gu, Z., Han, W., Qian, Y., Wang, B. (2021). Exploring security vulnerabilities of deep learning models by adversarial attacksWireless Communications and Mobile Computing, 2021(1), 9969867.","DOI":"10.1155\/2021\/9969867"},{"key":"6833_CR10","doi-asserted-by":"crossref","unstructured":"Gao, J., Lanchantin, J., Soffa, M. L., & Qi, Y. (2018). Black-box generation of adversarial text sequences to evade deep learning classifiers. In 2018 IEEE security and privacy workshops (SPW) (pp. 50\u201356). IEEE.","DOI":"10.1109\/SPW.2018.00016"},{"key":"6833_CR11","doi-asserted-by":"crossref","unstructured":"Garg, S., & Ramakrishnan, G. (2020). Bae: Bert-based adversarial examples for text classification. In Proceedings of the 2020 conference on empirical methods in natural language processing (EMNLP) (pp. 6174\u20136181). ISBN 978-1-952148-60-6.","DOI":"10.18653\/v1\/2020.emnlp-main.498"},{"key":"6833_CR12","unstructured":"Gong, C., He, D., Tan, Xu., Qin, T., Wang, L., & Liu, T.-Y. (2018) Frage: Frequency-agnostic word representation. In Advances in neural information processing systems (Vol. 31)."},{"key":"6833_CR13","unstructured":"Goodfellow, I. J., Shlens, J., & Szegedy, C. (2014). Explaining and harnessing adversarial examples. CoRR, abs\/1412.6572. https:\/\/api.semanticscholar.org\/CorpusID:6706414."},{"key":"6833_CR14","doi-asserted-by":"publisher","DOI":"10.1145\/3593042","author":"S Goyal","year":"2023","unstructured":"Goyal, S., Doddapaneni, S., Khapra, M. M., & Ravindran, B. (2023). A survey of adversarial defenses and robustness in nlp. ACM Computing Surveys. https:\/\/doi.org\/10.1145\/3593042","journal-title":"ACM Computing Surveys"},{"key":"6833_CR15","doi-asserted-by":"crossref","unstructured":"Harbecke, D., & Alt, C. (2020). Considering likelihood in nlp classification explanations with occlusion and language modeling. In 58th Annual meeting of the association for computational linguistics (ACL 2020): Student research workshop (pp. 111\u2013117). Association for Computational Linguistics. ISBN 978-1-952148-03-3.","DOI":"10.18653\/v1\/2020.acl-srw.16"},{"key":"6833_CR16","doi-asserted-by":"crossref","unstructured":"Huber, L., K\u00fchn, M. A., Mosca, E., & Groh, G. (2022). Detecting word-level adversarial text attacks via Shapley additive explanations. In Proceedings of the 7th workshop on representation learning for NLP (pp. 156\u2013166).","DOI":"10.18653\/v1\/2022.repl4nlp-1.16"},{"key":"6833_CR17","doi-asserted-by":"publisher","unstructured":"Ivgi, M., & Berant, J. (2021). Achieving model robustness through discrete adversarial training (pp. 1529\u20131544). https:\/\/doi.org\/10.18653\/v1\/2021.emnlp-main.115.","DOI":"10.18653\/v1\/2021.emnlp-main.115"},{"key":"6833_CR18","doi-asserted-by":"publisher","unstructured":"Jia, R., Raghunathan, A., G\u00f6ksel, K., & Liang, P. (2019). Certified robustness to adversarial word substitutions. In Proceedings of the 2019 conference on empirical methods in natural language processing and the 9th international joint conference on natural language processing (EMNLP-IJCNLP), Hong Kong, China, November (pp. 4129\u20134142). https:\/\/doi.org\/10.18653\/v1\/D19-1423. https:\/\/aclanthology.org\/D19-1423.","DOI":"10.18653\/v1\/D19-1423"},{"key":"6833_CR19","doi-asserted-by":"crossref","unstructured":"Jin, D., Jin, Z., Zhou, J. T., & Szolovits, P. (2020). Is Bert really robust? A strong baseline for natural language attack on text classification and entailment. In Proceedings of the AAAI conference on artificial intelligence (Vol.\u00a034, pp. 8018\u20138025).","DOI":"10.1609\/aaai.v34i05.6311"},{"key":"6833_CR20","doi-asserted-by":"crossref","unstructured":"Khassanov, Y., Zeng, Z., Pham, V. T., Xu, H., & Chng, E. S. (2019). Enriching rare word representations in neural language models by embedding matrix augmentation. arXiv preprint arXiv:1904.03799.","DOI":"10.21437\/Interspeech.2019-1858"},{"key":"6833_CR21","doi-asserted-by":"crossref","unstructured":"Li, D., Zhang, Y., Peng, H., Chen, L., Brockett, C., Sun, M.-T., & Dolan, B. (2021). Contextualized perturbation for textual adversarial attack (pp. 5053\u20135069). ISBN 978-1-954085-46-6.","DOI":"10.18653\/v1\/2021.naacl-main.400"},{"key":"6833_CR22","doi-asserted-by":"publisher","unstructured":"Li, J., Ji, S., Du, T., Li, B., & Wang, T. (2019). Textbugger: Generating adversarial text against real-world applications. In 26th Annual network and distributed system security symposium (NDSS 2019). ISBN 978-1-891562-55-6. https:\/\/doi.org\/10.14722\/ndss.2019.23138.","DOI":"10.14722\/ndss.2019.23138"},{"key":"6833_CR23","doi-asserted-by":"crossref","unstructured":"Li, L., Ma, R., Guo, Q., Xue, X., & Qiu, X. (2020). Bert-attack: Adversarial attack against BERT using BERT. In Proceedings of the 2020 conference on empirical methods in natural language processing (EMNLP) (pp. 6193\u20136202). ISBN 978-1-952148-60-6.","DOI":"10.18653\/v1\/2020.emnlp-main.500"},{"key":"6833_CR24","unstructured":"Liu, H., Yu, J., Li, S., Ma, J., & Ji, B. (2022). A context-aware approach for textual adversarial attack through probability difference guided beam search. CoRR, abs\/2208.08029."},{"key":"6833_CR25","doi-asserted-by":"publisher","unstructured":"Liu, R., Lam, K-Y., Zhou, W., Wu, S., Zhao, J., Hu, D., & Gong, M. (2025). STBA: Towards evaluating the robustness of DNNs for query-limited black-box scenario. IEEE Transactions on Multimedia, 27, 2666\u20132681. https:\/\/doi.org\/10.1109\/TMM.2025.3535328","DOI":"10.1109\/TMM.2025.3535328"},{"key":"6833_CR26","doi-asserted-by":"crossref","unstructured":"Liu, R., Zhang, J., Li, H., Zhang, J., Wang, Y., & Zhou, W. (2023). Aflow: Developing adversarial examples under extremely noise-limited settings. In International conference on information and communications security (pp. 502\u2013518). Springer.","DOI":"10.1007\/978-981-99-7356-9_30"},{"key":"6833_CR27","doi-asserted-by":"publisher","unstructured":"Liu, R., Zhou, W., Zhang, Ti., Chen, K., Zhao, J., & Lam, K-Y. (2024). Boosting black-box attack to deep neural networks with conditional diffusion models. IEEE Transactions on Information Forensics and Security, 19, 5207\u20135219. https:\/\/doi.org\/10.1109\/TIFS.2024.3390609","DOI":"10.1109\/TIFS.2024.3390609"},{"key":"6833_CR28","doi-asserted-by":"crossref","unstructured":"Lu, N., Liu, S., Zhang, Z., Wang, Q., Liu, H., & Tang, K. (2023). Less is more: Understanding word-level textual adversarial attack via n-gram frequency descend. In Proceedings of the  2024 IEEE Conference on Artificial Intelligence (CAI) (pp. 823\u2013830). IEEE Computer Society. https:\/\/ieeexplore.ieee.org\/document\/10605391","DOI":"10.1109\/CAI59869.2024.00155"},{"key":"6833_CR29","doi-asserted-by":"crossref","unstructured":"Ma, X., Jin, R., Paik, J.-Y., & Chung, T.-S. (2018). Large scale text classification with efficient word embedding. In Mobile and wireless technologies 2017: ICMWT 2017 4 (pp. 465\u2013469). Springer.","DOI":"10.1007\/978-981-10-5281-1_51"},{"key":"6833_CR30","doi-asserted-by":"crossref","unstructured":"Maheshwary, R., Maheshwary, S., & Pudi, V. (2021). Generating natural language attacks in a hard label black box setting. In Proceedings of the AAAI conference on artificial intelligence (Vol. 35, pp. 13525\u201313533).","DOI":"10.1609\/aaai.v35i15.17595"},{"key":"6833_CR31","doi-asserted-by":"crossref","unstructured":"Malik, V., Bhat, A., & Modi, A. (2021). Adv-OLM: Generating textual adversaries via olm. In 16th Conference of the European chapter of the Association for Computational Linguistics (EACL 2021) (pp. 841\u2013849). ISBN 978-1-954085-02-2.","DOI":"10.18653\/v1\/2021.eacl-main.71"},{"key":"6833_CR32","doi-asserted-by":"crossref","unstructured":"Minh, D. N., & Luu, A. T. (2022). Textual manifold-based defense against natural language adversarial examples. In Proceedings of the 2022 conference on empirical methods in natural language processing (pp. 6612\u20136625).","DOI":"10.18653\/v1\/2022.emnlp-main.443"},{"key":"6833_CR33","unstructured":"Mkadry, A., Makelov, A., Schmidt, L., Tsipras, D., & Vladu, A. (2017). Towards deep learning models resistant to adversarial attacks. stat, 1050(9)."},{"key":"6833_CR34","doi-asserted-by":"crossref","unstructured":"Moosavi-Dezfooli, S.-M., Fawzi, A., & Frossard, P. (2016). Deepfool: A simple and accurate method to fool deep neural networks. In Proceedings of the IEEE conference on computer vision and pattern recognition (pp. 2574\u20132582).","DOI":"10.1109\/CVPR.2016.282"},{"key":"6833_CR35","doi-asserted-by":"crossref","unstructured":"Moosavi-Dezfooli, S.-M., Fawzi, A., Fawzi, O., & Frossard, P. (2017). Universal adversarial perturbations. In Proceedings of the IEEE conference on computer vision and pattern recognition (pp. 1765\u20131773).","DOI":"10.1109\/CVPR.2017.17"},{"key":"6833_CR36","doi-asserted-by":"crossref","unstructured":"Morris, J. X., Lifland, E., Yoo, J. Y., Grigsby, J., Jin, D., & Qi, Y. (2020). Textattack: A framework for adversarial attacks, data augmentation, and adversarial training in nlp. In Proceedings of the 2020 conference on empirical methods in natural language processing: system demonstrations (pp. 119\u2013126). ISBN 978-1-952148-62-0.","DOI":"10.18653\/v1\/2020.emnlp-demos.16"},{"key":"6833_CR37","doi-asserted-by":"publisher","unstructured":"Mosca, E., Agarwal, S., Ram\u00edrez, J. R., & Groh, G. (2022). \u201cthat is a suspicious reaction!\u201d: Interpreting logits variation to detect NLP adversarial attacks. In Proceedings of the 60th annual meeting of the Association for Computational Linguistics (Volume 1: Long Papers), Dublin, Ireland (pp. 7806\u20137816). https:\/\/doi.org\/10.18653\/v1\/2022.acl-long.538. https:\/\/aclanthology.org\/2022.acl-long.538.","DOI":"10.18653\/v1\/2022.acl-long.538"},{"key":"6833_CR38","doi-asserted-by":"publisher","unstructured":"Mozes, M., Stenetorp, P., Kleinberg, B., & Griffin, L. (2021). Frequency-guided word substitutions for detecting textual adversarial examples. In Proceedings of the 16th conference of the European chapter of the association for computational linguistics: Main volume (pp. 171\u2013186). https:\/\/doi.org\/10.18653\/v1\/2021.eacl-main.13. https:\/\/aclanthology.org\/2021.eacl-main.13.","DOI":"10.18653\/v1\/2021.eacl-main.13"},{"key":"6833_CR39","doi-asserted-by":"publisher","unstructured":"Mrk\u0161i\u0107, N., S\u00e9aghdha, D.\u00d3., Thomson, B., Ga\u0161i\u0107, M., Rojas-Barahona, L. M., Su, P.-H., Vandyke, D, Wen, T.-H., & Young, S. (2016). Counter-fitting word vectors to linguistic constraints (pp. 142\u2013148). https:\/\/doi.org\/10.18653\/v1\/N16-1018. https:\/\/aclanthology.org\/N16-1018.","DOI":"10.18653\/v1\/N16-1018"},{"key":"6833_CR40","doi-asserted-by":"crossref","unstructured":"Papernot, N., McDaniel, P., Jha, S., Fredrikson, M., Celik, Z. B., & Swami, A. (2016). The limitations of deep learning in adversarial settings. In 2016 IEEE European symposium on security and privacy (EuroS &P) (pp. 372\u2013387). IEEE.","DOI":"10.1109\/EuroSP.2016.36"},{"issue":"12","key":"6833_CR41","doi-asserted-by":"publisher","first-page":"4539","DOI":"10.1007\/s10994-022-06263-z","volume":"111","author":"M Qaraei","year":"2022","unstructured":"Qaraei, M., & Babbar, R. (2022). Adversarial examples for extreme multilabel text classification. Machine Learning, 111(12), 4539\u20134563. https:\/\/doi.org\/10.1007\/s10994-022-06263-z","journal-title":"Machine Learning"},{"key":"6833_CR42","doi-asserted-by":"crossref","unstructured":"Ren, S., Deng, Y., He, K., & Che, W. (2019). Generating natural language adversarial examples through probability weighted word saliency. In Proceedings of the 57th annual meeting of the association for computational linguistics (pp. 1085\u20131097).","DOI":"10.18653\/v1\/P19-1103"},{"issue":"1\u20132","key":"6833_CR43","doi-asserted-by":"publisher","first-page":"141","DOI":"10.1504\/ijics.2024.142697","volume":"25","author":"K Salim","year":"2024","unstructured":"Salim, K., Yacine, A., & Akrem, B. M. (2024). Improving greedy adversarial attacks on text classification. International Journal of Computer Science and Information Security, 25(1\u20132), 141\u2013166. https:\/\/doi.org\/10.1504\/ijics.2024.142697. ISSN 1744-1765.","journal-title":"International Journal of Computer Science and Information Security"},{"key":"6833_CR44","doi-asserted-by":"crossref","unstructured":"Shi, Z., & Huang, M. (2019). Robustness to modification with shared words in paraphrase identification. CoRR, abs\/1909.02560.","DOI":"10.18653\/v1\/2020.findings-emnlp.16"},{"key":"6833_CR45","unstructured":"Valentini, F. T., Fernandez\u00a0Slezak, D., & Altszyler\u00a0Lemcovich, E. J. (2022). The dependence on frequency of word embedding similarity measures."},{"key":"6833_CR46","unstructured":"Wang, X., Hao, J., Yang, Y., & He, K. (2021a). Natural language adversarial defense through synonym encoding. In Uncertainty in artificial intelligence (pp. 823\u2013833). PMLR."},{"key":"6833_CR47","doi-asserted-by":"crossref","unstructured":"Wang, X., Yang, Y., Deng, Y., & He, K. (2021b). Adversarial training with fast gradient projection method against synonym substitution based text attacks. In Proceedings of the AAAI conference on artificial intelligence (Vol. 35, pp. 13997\u201314005).","DOI":"10.1609\/aaai.v35i16.17648"},{"key":"6833_CR48","unstructured":"Wang, X., Yifeng, X., & He, K. (2022). Detecting textual adversarial examples through randomized substitution and vote. In Uncertainty in artificial intelligence (pp. 2056\u20132065). PMLR."},{"key":"6833_CR49","doi-asserted-by":"crossref","unstructured":"Wang, Z., & Wang, H. (2020). Defense of word-level adversarial attacks via random substitution encoding. In Knowledge science, engineering and management: 13th international conference, KSEM 2020, Hangzhou, China, August 28\u201330, proceedings, part II 13 (pp. 312\u2013324). Springer.","DOI":"10.1007\/978-3-030-55393-7_28"},{"key":"6833_CR50","doi-asserted-by":"crossref","unstructured":"Yang, X., Liu, W., Tao, D., & Liu, W. (2021). Besa: Bert-based simulated annealing for adversarial text attacks. In IJCAI (pp. 3293\u20133299).","DOI":"10.24963\/ijcai.2021\/453"},{"key":"6833_CR51","doi-asserted-by":"publisher","unstructured":"Ye, M., Gong, C., & Liu, Q. (2020). SAFER: A structure-free approach for certified robustness to adversarial word substitutions. In Proceedings of the 58th annual meeting of the association for computational linguistics, July (pp. 3465\u20133475). https:\/\/doi.org\/10.18653\/v1\/2020.acl-main.317. https:\/\/aclanthology.org\/2020.acl-main.317.","DOI":"10.18653\/v1\/2020.acl-main.317"},{"key":"6833_CR52","doi-asserted-by":"crossref","unstructured":"Yoo, J. Y., & Qi, Y. (2021). Towards improving adversarial training of nlp models. In Findings of the association for computational linguistics, emnlp 2021 (pp. 945\u2013956). Association for Computational Linguistics. ISBN 978-1-955917-10-0.","DOI":"10.18653\/v1\/2021.findings-emnlp.81"},{"key":"6833_CR53","unstructured":"Yuan, L., Zhang, Y., Chen, Y., & Wei, W. (2021). Bridge the gap between cv and nlp! A gradient-based textual adversarial attack framework. ArXiv, abs\/2110.15317. https:\/\/api.semanticscholar.org\/CorpusID:240070451."},{"key":"6833_CR54","doi-asserted-by":"publisher","unstructured":"Zang, Y., Qi, F., Yang, C., Liu, Z., Zhang, M., Liu, Q., & Sun, M. (2020). Word-level textual adversarial attacking as combinatorial optimization. In Proceedings of the 58th annual meeting of the association for computational linguistics (pp. 6066\u20136080). https:\/\/doi.org\/10.18653\/v1\/2020.acl-main.540. https:\/\/aclanthology.org\/2020.acl-main.540.","DOI":"10.18653\/v1\/2020.acl-main.540"},{"key":"6833_CR56","doi-asserted-by":"publisher","unstructured":"Zhou, Y., Jiang, J.-Y., Chang, K.-W., & Wang, W. (2019). Learning to discriminate perturbations for blocking adversarial attacks in text classification. In Proceedings of the 2019 conference on empirical methods in natural language processing and the 9th international joint conference on natural language processing, EMNLP-IJCNLP (pp. 4903\u20134912). https:\/\/doi.org\/10.18653\/v1\/D19-1496.","DOI":"10.18653\/v1\/D19-1496"},{"key":"6833_CR55","doi-asserted-by":"crossref","unstructured":"Zhou, Y., Zheng, X., Hsieh, C.-J., Chang, K.-W., & Huan, X. (2021). Defense against synonym substitution-based adversarial attacks via Dirichlet neighborhood ensemble. In Association for computational linguistics (ACL).","DOI":"10.18653\/v1\/2021.acl-long.426"}],"container-title":["Machine Learning"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s10994-025-06833-x.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s10994-025-06833-x\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s10994-025-06833-x.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,9,9]],"date-time":"2025-09-09T02:02:08Z","timestamp":1757383328000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s10994-025-06833-x"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,8,14]]},"references-count":56,"journal-issue":{"issue":"9","published-print":{"date-parts":[[2025,9]]}},"alternative-id":["6833"],"URL":"https:\/\/doi.org\/10.1007\/s10994-025-06833-x","relation":{},"ISSN":["0885-6125","1573-0565"],"issn-type":[{"type":"print","value":"0885-6125"},{"type":"electronic","value":"1573-0565"}],"subject":[],"published":{"date-parts":[[2025,8,14]]},"assertion":[{"value":"18 December 2024","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"3 July 2025","order":2,"name":"revised","label":"Revised","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"4 July 2025","order":3,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"14 August 2025","order":4,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors declare that they have no Conflict of interest.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}}],"article-number":"207"}}