{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,11,25]],"date-time":"2025-11-25T20:45:20Z","timestamp":1764103520196,"version":"3.37.3"},"reference-count":60,"publisher":"Springer Science and Business Media LLC","issue":"6","license":[{"start":{"date-parts":[[2024,8,7]],"date-time":"2024-08-07T00:00:00Z","timestamp":1722988800000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by-nc-nd\/4.0"},{"start":{"date-parts":[[2024,8,7]],"date-time":"2024-08-07T00:00:00Z","timestamp":1722988800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by-nc-nd\/4.0"}],"funder":[{"DOI":"10.13039\/501100013254","name":"National College Students Innovation and Entrepreneurship Training Program","doi-asserted-by":"publisher","award":["202310293152E"],"award-info":[{"award-number":["202310293152E"]}],"id":[{"id":"10.13039\/501100013254","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Complex Intell. Syst."],"published-print":{"date-parts":[[2024,12]]},"DOI":"10.1007\/s40747-024-01595-w","type":"journal-article","created":{"date-parts":[[2024,8,7]],"date-time":"2024-08-07T04:03:51Z","timestamp":1723003431000},"page":"7863-7875","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":2,"title":["A novel\niteration\nscheme with\nconjugate\ngradient for\nfaster pruning\non transformer\nmodels"],"prefix":"10.1007","volume":"10","author":[{"given":"Jun","family":"Li","sequence":"first","affiliation":[]},{"given":"Yuchen","family":"Zhu","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-7045-3182","authenticated-orcid":false,"given":"Kexue","family":"Sun","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,8,7]]},"reference":[{"key":"1595_CR1","doi-asserted-by":"publisher","first-page":"604","DOI":"10.1109\/TNNLS.2020.2979670","volume":"32","author":"DW Otter","year":"2020","unstructured":"Otter DW, Medina JR, Kalita JK (2020) A survey of the usages of deep learning for natural language processing. IEEE Trans Neural Netw Learn Syst 32:604\u2013624","journal-title":"IEEE Trans Neural Netw Learn Syst"},{"key":"1595_CR2","unstructured":"Vaswani A, Shazeer N, Parmar N, Uszkoreit J, Jones L, Gomez AN, Kaiser \u0141, Polosukhin I (2017) Attention is all you need. Adv Neural Inf Process Syst 30"},{"key":"1595_CR3","unstructured":"Devlin J, Chang M-W, Lee K, Toutanova K (2018) Bert: pre-training of deep bidirectional transformers for language understanding. arXiv preprint http:\/\/arxiv.org\/abs\/1810.04805"},{"key":"1595_CR4","first-page":"7267","volume":"34","author":"J Zhang","year":"2021","unstructured":"Zhang J, Chang W-C, Yu H-F, Dhillon I (2021) Fast multi-resolution transformer fine-tuning for extreme multi-label text classification. Adv Neural Inf Process Syst 34:7267\u20137280","journal-title":"Adv Neural Inf Process Syst"},{"key":"1595_CR5","doi-asserted-by":"crossref","unstructured":"Jiang T, Wang D, Sun L, Yang H, Zhao Z, Zhuang F (2021) LightXML: Transformer with dynamic negative sampling for high-performance extreme multi-label text classification. In: Proceedings of the AAAI conference on artificial intelligence, pp 7987\u20137994","DOI":"10.1609\/aaai.v35i9.16974"},{"key":"1595_CR6","doi-asserted-by":"publisher","DOI":"10.1002\/cpe.6486","volume":"34","author":"M Tezgider","year":"2022","unstructured":"Tezgider M, Yildiz B, Aydin G (2022) Text classification using improved bidirectional transformer. Concurr Comput Pract Exp 34:e6486","journal-title":"Concurr Comput Pract Exp"},{"key":"1595_CR7","doi-asserted-by":"publisher","first-page":"6232","DOI":"10.1109\/TCYB.2021.3050508","volume":"52","author":"T Zhang","year":"2021","unstructured":"Zhang T, Gong X, Chen CP (2021) BMT-Net: broad multitask transformer network for sentiment analysis. IEEE Trans Cybern 52:6232\u20136243","journal-title":"IEEE Trans Cybern"},{"key":"1595_CR8","doi-asserted-by":"publisher","first-page":"58","DOI":"10.1016\/j.future.2020.06.050","volume":"113","author":"U Naseem","year":"2020","unstructured":"Naseem U, Razzak I, Musial K, Imran M (2020) Transformer based deep intelligent contextual embedding for twitter sentiment analysis. Future Gener Comput Syst 113:58\u201369","journal-title":"Future Gener Comput Syst"},{"key":"1595_CR9","doi-asserted-by":"crossref","unstructured":"Chang Y, Kong L, Jia K, Meng Q (2021) Chinese named entity recognition method based on BERT. In: 2021 IEEE international conference on data science and computer application (ICDSCA), (IEEE 2021), pp 294\u2013299","DOI":"10.1109\/ICDSCA53499.2021.9650256"},{"key":"1595_CR10","doi-asserted-by":"publisher","DOI":"10.1016\/j.eswa.2021.115049","volume":"182","author":"G Aras","year":"2021","unstructured":"Aras G, Makaro\u011flu D, Demir S, Cakir A (2021) An evaluation of recent neural sequence tagging models in Turkish named entity recognition. Expert Syst Appl 182:115049","journal-title":"Expert Syst Appl"},{"key":"1595_CR11","doi-asserted-by":"publisher","first-page":"747","DOI":"10.1080\/13658816.2022.2133125","volume":"37","author":"C Berragan","year":"2023","unstructured":"Berragan C, Singleton A, Calafiore A, Morley J (2023) Transformer based named entity recognition for place name extraction from unstructured text. Int J Geogr Inf Sci 37:747\u2013766","journal-title":"Int J Geogr Inf Sci"},{"key":"1595_CR12","doi-asserted-by":"crossref","unstructured":"Chen H, Wang Y, Guo T, Xu C, Deng Y, Liu Z, Ma S, Xu C, Xu C, Gao W (2021) Pre-trained image processing transformer. In: Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition, pp 12299\u201312310","DOI":"10.1109\/CVPR46437.2021.01212"},{"key":"1595_CR13","doi-asserted-by":"publisher","first-page":"87","DOI":"10.1109\/TPAMI.2022.3152247","volume":"45","author":"K Han","year":"2022","unstructured":"Han K, Wang Y, Chen H, Chen X, Guo J, Liu Z, Tang Y, Xiao A, Xu C, Xu Y (2022) A survey on vision transformer. IEEE Trans Pattern Anal Mach Intell 45:87\u2013110","journal-title":"IEEE Trans Pattern Anal Mach Intell"},{"key":"1595_CR14","doi-asserted-by":"crossref","unstructured":"Liu Z, Lin Y, Cao Y, Hu H, Wei Y, Zhang Z, Lin S, Guo B (2021) Swin transformer: Hierarchical vision transformer using shifted windows. In: Proceedings of the IEEE\/CVF international conference on computer vision, pp 10012\u201310022","DOI":"10.1109\/ICCV48922.2021.00986"},{"key":"1595_CR15","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/3505244","volume":"54","author":"S Khan","year":"2022","unstructured":"Khan S, Naseer M, Hayat M, Zamir SW, Khan FS, Shah M (2022) Transformers in vision: a survey. ACM Comput Surv (CSUR) 54:1\u201341","journal-title":"ACM Comput Surv (CSUR)"},{"key":"1595_CR16","first-page":"9361","volume":"35","author":"S Kim","year":"2022","unstructured":"Kim S, Gholami A, Shaw A, Lee N, Mangalam K, Malik J, Mahoney MW, Keutzer K (2022) Squeezeformer: an efficient transformer for automatic speech recognition. Adv Neural Inf Process Syst 35:9361\u20139373","journal-title":"Adv Neural Inf Process Syst"},{"key":"1595_CR17","doi-asserted-by":"crossref","unstructured":"Wang Y, Mohamed A, Le D, Liu C, Xiao A, Mahadeokar J, Huang H, Tjandra A, Zhang X, Zhang F (2020) Transformer-based acoustic modeling for hybrid speech recognition. In: ICASSP 2020\u20132020 IEEE international conference on acoustics, speech and signal processing (ICASSP), (IEEE 2020), pp 6874\u20136878","DOI":"10.1109\/ICASSP40776.2020.9054345"},{"key":"1595_CR18","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/3641289","volume":"15","author":"Y Chang","year":"2023","unstructured":"Chang Y, Wang X, Wang J, Wu Y, Yang L, Zhu K, Chen H, Yi X, Wang C, Wang Y (2023) A survey on evaluation of large language models. ACM Trans Intell Syst Technol 15:1\u201345","journal-title":"ACM Trans Intell Syst Technol"},{"key":"1595_CR19","doi-asserted-by":"publisher","first-page":"126","DOI":"10.1109\/MSP.2017.2765695","volume":"35","author":"Y Cheng","year":"2018","unstructured":"Cheng Y, Wang D, Zhou P, Zhang T (2018) Model compression and acceleration for deep neural networks: the principles, progress, and challenges. IEEE Signal Process Mag 35:126\u2013136","journal-title":"IEEE Signal Process Mag"},{"key":"1595_CR20","doi-asserted-by":"publisher","first-page":"127468","DOI":"10.1016\/j.neucom.2024.127468","volume":"582","author":"M Farina","year":"2024","unstructured":"Farina M, Ahmad U, Taha A, Younes H, Mesbah Y, Yu X, Pedrycz W (2024) Sparsity in transformers: a systematic literature review. Neurocomputing 582:127468","journal-title":"Neurocomputing"},{"key":"1595_CR21","doi-asserted-by":"publisher","first-page":"2900","DOI":"10.1109\/TPAMI.2023.3334614","volume":"46","author":"Y He","year":"2023","unstructured":"He Y, Xiao L (2023) Structured pruning for deep convolutional neural networks: a survey. IEEE Trans Pattern Anal Mach Intell 46:2900\u20132919","journal-title":"IEEE Trans Pattern Anal Mach Intell"},{"key":"1595_CR22","first-page":"1","volume":"22","author":"T Hoefler","year":"2021","unstructured":"Hoefler T, Alistarh D, Ben-Nun T, Dryden N, Peste A (2021) Sparsity in deep learning: pruning and growth for efficient inference and training in neural networks. J Mach Learn Res 22:1\u2013124","journal-title":"J Mach Learn Res"},{"key":"1595_CR23","doi-asserted-by":"publisher","first-page":"e6","DOI":"10.4108\/airo.v2i1.3392","volume":"2","author":"A Durojaye","year":"2023","unstructured":"Durojaye A, Amin K, Abdullah N, Moshayedi A (2023) Immersive horizons: exploring the transformative power of virtual reality across economic sectors. EAI Endors Trans AI Robot 2:e6","journal-title":"EAI Endors Trans AI Robot"},{"key":"1595_CR24","unstructured":"Sun M, Liu Z, Bair A, Kolter JZ (2023) A simple and effective pruning approach for large language models. In: The twelfth international conference on learning representations"},{"key":"1595_CR25","first-page":"21702","volume":"36","author":"X Ma","year":"2023","unstructured":"Ma X, Fang G, Wang X (2023) LLM-pruner: on the structural pruning of large language models. Adv Neural Inf Process Syst 36:21702\u201321720","journal-title":"Adv Neural Inf Process Syst"},{"key":"1595_CR26","doi-asserted-by":"publisher","first-page":"297","DOI":"10.1016\/j.neucom.2021.04.141","volume":"485","author":"D Liu","year":"2022","unstructured":"Liu D, Kong H, Luo X, Liu W, Subramaniam R (2022) Bringing AI to edge: from deep learning\u2019s perspective. Neurocomputing 485:297\u2013320","journal-title":"Neurocomputing"},{"key":"1595_CR27","doi-asserted-by":"publisher","first-page":"1905","DOI":"10.1007\/s10462-022-10213-5","volume":"56","author":"S Cong","year":"2023","unstructured":"Cong S, Zhou Y (2023) A review of convolutional neural network architectures and their optimizations. Artif Intell Rev 56:1905\u20131969","journal-title":"Artif Intell Rev"},{"key":"1595_CR28","doi-asserted-by":"publisher","first-page":"102990","DOI":"10.1016\/j.sysarc.2023.102990","volume":"144","author":"KT Chitty-Venkata","year":"2023","unstructured":"Chitty-Venkata KT, Mittal S, Emani M, Vishwanath V, Somani AK (2023) A survey of techniques for optimizing transformer inference. J Syst Archit 144:102990","journal-title":"J Syst Archit"},{"key":"1595_CR29","doi-asserted-by":"crossref","unstructured":"Fang G, Ma X, Song M, Michael B, Wang X (2023) DepGraph: towards any structural pruning. In: Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition, pp 16091\u201316101","DOI":"10.1109\/CVPR52729.2023.01544"},{"key":"1595_CR30","first-page":"24101","volume":"35","author":"W Kwon","year":"2022","unstructured":"Kwon W, Kim S, Mahoney MW, Hassoun J, Keutzer K, Gholami A (2022) A fast post-training pruning framework for transformers. Adv Neural Inf Process Syst 35:24101\u201324116","journal-title":"Adv Neural Inf Process Syst"},{"key":"1595_CR31","first-page":"16716","volume":"36","author":"G Fang","year":"2024","unstructured":"Fang G, Ma X, Wang X (2024) Structural pruning for diffusion models. Adv Neural Inf Process Syst 36:16716\u201316728","journal-title":"Adv Neural Inf Process Syst"},{"key":"1595_CR32","doi-asserted-by":"publisher","first-page":"4038","DOI":"10.1109\/TCAD.2023.3273992","volume":"42","author":"S Tuli","year":"2023","unstructured":"Tuli S, Jha NK (2023) AccelTran: a sparsity-aware accelerator for dynamic inference with transformers. IEEE Trans Comput-Aid Design Integr Circuits Syst 42:4038\u20134051","journal-title":"IEEE Trans Comput-Aid Design Integr Circuits Syst"},{"key":"1595_CR33","unstructured":"Park S, Choi H, Kang U (2023) Accurate retraining-free pruning for pretrained encoder-based language models. In: The twelfth international conference on learning representations"},{"key":"1595_CR34","doi-asserted-by":"publisher","first-page":"149","DOI":"10.1093\/comjnl\/7.2.149","volume":"7","author":"R Fletcher","year":"1964","unstructured":"Fletcher R, Reeves CM (1964) Function minimization by conjugate gradients. Comput J 7:149\u2013154","journal-title":"Comput J"},{"key":"1595_CR35","first-page":"585","volume":"33","author":"W Kim","year":"2020","unstructured":"Kim W, Kim S, Park M, Jeon G (2020) Neuron merging: compensating for pruned neurons. Adv Neural Inf Process Syst 33:585\u2013595","journal-title":"Adv Neural Inf Process Syst"},{"key":"1595_CR36","first-page":"7950","volume":"32","author":"R Banner","year":"2019","unstructured":"Banner R, Nahshan Y, Soudry D (2019) Post training 4-bit quantization of convolutional networks for rapid-deployment. Adv Neural Inf Proces Syst 32:7950\u20137958","journal-title":"Adv Neural Inf Proces Syst"},{"key":"1595_CR37","doi-asserted-by":"crossref","unstructured":"Lin S, Xie H, Wang B, Yu K, Chang X, Liang X, Wang G (2022) Knowledge distillation via the target-aware transformer. In: Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition, pp 10915\u201310924","DOI":"10.1109\/CVPR52688.2022.01064"},{"key":"1595_CR38","unstructured":"Liu R, Yang K, Roitberg A, Zhang J, Peng K, Liu H, Stiefelhagen R (2022) TransKD: transformer knowledge distillation for efficient semantic segmentation. arXiv preprint http:\/\/arxiv.org\/abs\/2202.13393"},{"key":"1595_CR39","unstructured":"Kim S, Gholami A, Yao Z, Mahoney MW, Keutzer K (2021) I-BERT: integer-only BERT quantization. In: International conference on machine learning, (PMLR 2021), pp 5506\u20135518"},{"key":"1595_CR40","first-page":"34451","volume":"35","author":"Y Li","year":"2022","unstructured":"Li Y, Xu S, Zhang B, Cao X, Gao P, Guo G (2022) Q-VIT: ACCURATE and fully quantized low-bit vision transformer. Adv Neural Inf Process Syst 35:34451\u201334463","journal-title":"Adv Neural Inf Process Syst"},{"key":"1595_CR41","unstructured":"Wu Z, Liu Z, Lin J, Lin Y, Han S (2019) Lite transformer with long-short range attention. In: International conference on learning representations"},{"key":"1595_CR42","doi-asserted-by":"crossref","unstructured":"Wang H, Wu Z, Liu Z, Cai H, Zhu L, Gan C, Han S (2020) HAT: hardware-aware transformers for efficient natural language processing. In: Proceedings of the 58th annual meeting of the association for computational linguistics, pp 7675\u20137688","DOI":"10.18653\/v1\/2020.acl-main.686"},{"key":"1595_CR43","first-page":"1135","volume":"28","author":"S Han","year":"2015","unstructured":"Han S, Pool J, Tran J, Dally W (2015) Learning both weights and connections for efficient neural network. Adv Neural Inf Process Syst 28:1135\u20131143","journal-title":"Adv Neural Inf Process Syst"},{"key":"1595_CR44","unstructured":"Frankle J, Carbin M (2018) The lottery ticket hypothesis: finding sparse, trainable neural networks. arXiv preprint http:\/\/arxiv.org\/abs\/1803.03635"},{"key":"1595_CR45","first-page":"4475","volume":"35","author":"E Frantar","year":"2022","unstructured":"Frantar E, Alistarh D (2022) Optimal brain compression: a framework for accurate post-training quantization and pruning. Adv Neural Inf Process Syst 35:4475\u20134488","journal-title":"Adv Neural Inf Process Syst"},{"key":"1595_CR46","first-page":"18098","volume":"33","author":"SP Singh","year":"2020","unstructured":"Singh SP, Alistarh D (2020) Woodfisher: efficient second-order approximation for neural network compression. Adv Neural Inf Process Syst 33:18098\u201318109","journal-title":"Adv Neural Inf Process Syst"},{"key":"1595_CR47","doi-asserted-by":"crossref","unstructured":"Molchanov P, Mallya A, Tyree S, Frosio I, Kautz J (2019) Importance estimation for neural network pruning. In: Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition, pp 11264\u201311272","DOI":"10.1109\/CVPR.2019.01152"},{"key":"1595_CR48","doi-asserted-by":"crossref","unstructured":"Lazarevich I, Kozlov A, Malinin N (2021) Post-training deep neural network pruning via layer-wise calibration. In: Proceedings of the IEEE\/CVF international conference on computer vision, pp 798\u2013805","DOI":"10.1109\/ICCVW54120.2021.00094"},{"key":"1595_CR49","doi-asserted-by":"crossref","unstructured":"Shi Y, Bai S, Wei X, Gong R, Yang J (2023) Lossy and lossless (L2) post-training model size compression. In: Proceedings of the IEEE\/CVF international conference on computer vision, pp 17546\u201317556","DOI":"10.1109\/ICCV51070.2023.01609"},{"key":"1595_CR50","unstructured":"Wan A, Hao H, Patnaik K, Xu Y, Hadad O, G\u00fcera D, Ren Z, Shan Q (2023) UPSCALE: unconstrained channel pruning. In: International conference on machine learning, (PMLR 2023), pp 35384\u201335412"},{"key":"1595_CR51","doi-asserted-by":"crossref","unstructured":"Cai L, An Z, Yang C, Yan Y, Xu Y (2022) Prior gradient mask guided pruning-aware fine-tuning. In: Proceedings of the AAAI conference on artificial intelligence, pp 140\u2013148","DOI":"10.1609\/aaai.v36i1.19888"},{"key":"1595_CR52","doi-asserted-by":"crossref","unstructured":"Kurtic E, Campos D, Nguyen T, Frantar E, Kurtz M, Fineran B, Goin M, Alistarh D (2022) The optimal BERT surgeon: scalable and accurate second-order pruning for large language models. arXiv preprint http:\/\/arxiv.org\/abs\/2203.07259","DOI":"10.18653\/v1\/2022.emnlp-main.279"},{"key":"1595_CR53","doi-asserted-by":"crossref","unstructured":"Lagunas F, Charlaix E, Sanh V, Rush AM (2021) Block pruning for faster transformers. arXiv preprint http:\/\/arxiv.org\/abs\/2109.04838","DOI":"10.18653\/v1\/2021.emnlp-main.829"},{"key":"1595_CR54","doi-asserted-by":"publisher","first-page":"57","DOI":"10.1016\/j.insmatheco.2022.07.013","volume":"107","author":"S Xu","year":"2022","unstructured":"Xu S, Zhang C, Hong D (2022) BERT-based NLP techniques for classification and severity modeling in basic warranty data study. Insur Math Econ 107:57\u201367","journal-title":"Insur Math Econ"},{"key":"1595_CR55","doi-asserted-by":"crossref","unstructured":"Choi H, Kim J, Joe S, Gwon Y (2021) Evaluation of BERT and ALBERT sentence embedding performance on downstream NLP tasks. In: 2020 25th international conference on pattern recognition (ICPR), (IEEE 2021), pp 5482\u20135487","DOI":"10.1109\/ICPR48806.2021.9412102"},{"key":"1595_CR56","unstructured":"Hendrycks D, Gimpel K (2016) Gaussian error linear units (GELUS). arXiv preprint http:\/\/arxiv.org\/abs\/1606.08415"},{"key":"1595_CR57","unstructured":"Sanh V, Debut L, Chaumond J, Wolf T (2019) DistilBERT, a distilled version of BERT: smaller, faster, cheaper and lighter. arXiv preprint http:\/\/arxiv.org\/abs\/1910.01108"},{"key":"1595_CR58","unstructured":"Shewchuk JR (1994) An introduction to the conjugate gradient method without the agonizing pain"},{"key":"1595_CR59","unstructured":"Molchanov P, Tyree S, Karras T, Aila T, Kautz J (2016) Pruning convolutional neural networks for resource efficient inference. arXiv preprint http:\/\/arxiv.org\/abs\/1611.06440"},{"key":"1595_CR60","unstructured":"Liu L, Zhang S, Kuang Z, Zhou A, Xue J-H, Wang X, Chen Y, Yang W, Liao Q, Zhang W (2021) Group fisher pruning for practical network compression. In: International conference on machine learning, (PMLR 2021), pp 7021\u20137032"}],"container-title":["Complex &amp; Intelligent Systems"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s40747-024-01595-w.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s40747-024-01595-w\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s40747-024-01595-w.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,10,16]],"date-time":"2024-10-16T22:13:21Z","timestamp":1729116801000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s40747-024-01595-w"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,8,7]]},"references-count":60,"journal-issue":{"issue":"6","published-print":{"date-parts":[[2024,12]]}},"alternative-id":["1595"],"URL":"https:\/\/doi.org\/10.1007\/s40747-024-01595-w","relation":{},"ISSN":["2199-4536","2198-6053"],"issn-type":[{"type":"print","value":"2199-4536"},{"type":"electronic","value":"2198-6053"}],"subject":[],"published":{"date-parts":[[2024,8,7]]},"assertion":[{"value":"20 March 2024","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"24 July 2024","order":2,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"7 August 2024","order":3,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"We know of no conflicts of interest associated with this publication, and there has been no significant financial support for this work that could have influenced its outcome. On behalf of all authors, the corresponding author states that there is no conflict of interest.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}}]}}