{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,26]],"date-time":"2026-02-26T14:13:45Z","timestamp":1772115225290,"version":"3.50.1"},"reference-count":162,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","license":[{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by\/4.0\/legalcode"}],"funder":[{"name":"Technology Innovation Institute (TII) under the \"CASTLE: Cross-Layer Security for Machine Learning Systems IoT\" project"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Access"],"published-print":{"date-parts":[[2025]]},"DOI":"10.1109\/access.2025.3571995","type":"journal-article","created":{"date-parts":[[2025,5,20]],"date-time":"2025-05-20T17:16:15Z","timestamp":1747761375000},"page":"93190-93221","source":"Crossref","is-referenced-by-count":7,"title":["Survey on Backdoor Attacks on Deep Learning: Current Trends, Categorization, Applications, Research Challenges, and Future Prospects"],"prefix":"10.1109","volume":"13","author":[{"ORCID":"https:\/\/orcid.org\/0000-0001-9841-6132","authenticated-orcid":false,"given":"Muhammad Abdullah","family":"Hanif","sequence":"first","affiliation":[{"name":"eBrain Laboratory, Division of Engineering, New York University (NYU) Abu Dhabi, Abu Dhabi, United Arab Emirates"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-1611-9378","authenticated-orcid":false,"given":"Nandish","family":"Chattopadhyay","sequence":"additional","affiliation":[{"name":"eBrain Laboratory, Division of Engineering, New York University (NYU) Abu Dhabi, Abu Dhabi, United Arab Emirates"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-6534-9295","authenticated-orcid":false,"given":"Bassem","family":"Ouni","sequence":"additional","affiliation":[{"name":"AI and Digital Science Research Center, Technology Innovation Institute (TII), Abu Dhabi, United Arab Emirates"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-2607-8135","authenticated-orcid":false,"given":"Muhammad","family":"Shafique","sequence":"additional","affiliation":[{"name":"eBrain Laboratory, Division of Engineering, New York University (NYU) Abu Dhabi, Abu Dhabi, United Arab Emirates"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1038\/nature14539"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1016\/j.dsp.2022.103514"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1016\/j.neucom.2019.11.118"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1002\/rob.21918"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1146\/annurev-bioeng071516-044442"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1155\/2018\/7068349"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1201\/9781351251389-8"},{"key":"ref8","article-title":"Adversarial machine learning at scale","author":"Kurakin","year":"2016","journal-title":"arXiv:1611.01236"},{"key":"ref9","article-title":"BadNets: Identifying vulnerabilities in the machine learning model supply chain","author":"Gu","year":"2017","journal-title":"arXiv:1708.06733"},{"key":"ref10","article-title":"Backdoor attacks and countermeasures on deep learning: A comprehensive review","author":"Gao","year":"2020","journal-title":"arXiv:2007.10760"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/ISQED48828.2020.9137011"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1016\/j.neucom.2020.07.133"},{"key":"ref13","article-title":"A survey of neural trojan attacks and defenses in deep learning","author":"Wang","year":"2022","journal-title":"arXiv:2202.07183"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1049\/cje.2021.00.126"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/MWC.017.2100714"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1145\/3551636"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/OJSP.2022.3190213"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2022.3162397"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-98795-4_13"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2022.3182979"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/OJCS.2023.3267221"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1145\/3701985"},{"key":"ref23","article-title":"A survey of recent backdoor attacks and defenses in large language models","author":"Zhao","year":"2024","journal-title":"arXiv:2406.06852"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/COMST.2024.3361451"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2024.3355816"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2024.3382584"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1016\/j.engappai.2023.107166"},{"key":"ref28","first-page":"6105","article-title":"EfficientNet: Rethinking model scaling for convolutional neural networks","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Tan"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.01175"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2019.2909068"},{"key":"ref31","article-title":"Natural backdoor datasets","author":"Wenger","year":"2022","journal-title":"arXiv:2206.10673"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1109\/TrustCom53373.2021.00093"},{"key":"ref33","article-title":"Augmentation backdoors","author":"Rance","year":"2022","journal-title":"arXiv:2209.15139"},{"key":"ref34","article-title":"Targeted backdoor attacks on deep learning systems using data poisoning","author":"Chen","year":"2017","journal-title":"arXiv:1712.05526"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/ICIP.2019.8802997"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1155\/2022\/4593002"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58607-2_11"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1109\/TIFS.2022.3202687"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v35i2.16201"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1145\/3374664.3375751"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1109\/AICAS54282.2022.9869920"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1016\/j.cose.2023.103523"},{"key":"ref43","article-title":"FRIB: Low-poisoning rate invisible backdoor attack based on feature repair","author":"Xia","year":"2022","journal-title":"arXiv:2207.12863"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.1145\/3560830.3563730"},{"key":"ref45","article-title":"Backdoor attacks on vision transformers","author":"Subramanya","year":"2022","journal-title":"arXiv:2206.08477"},{"key":"ref46","article-title":"Enhancing clean label backdoor attack with two-phase specific triggers","author":"Luo","year":"2022","journal-title":"arXiv:2206.04881"},{"key":"ref47","article-title":"Circumventing backdoor defenses that are based on latent separability","author":"Qi","year":"2022","journal-title":"arXiv:2205.13613"},{"key":"ref48","article-title":"Narcissus: A practical clean-label backdoor attack with limited information","author":"Zeng","year":"2022","journal-title":"arXiv:2204.05255"},{"key":"ref49","doi-asserted-by":"publisher","DOI":"10.1145\/3579856.3582829"},{"key":"ref50","article-title":"Trojan horse training for breaking defenses against backdoor attacks in deep learning","author":"Rajabi","year":"2022","journal-title":"arXiv:2203.15506"},{"key":"ref51","article-title":"Under-confidence backdoors are resilient and stealthy backdoors","author":"Peng","year":"2022","journal-title":"arXiv:2202.11203"},{"key":"ref52","article-title":"Imperceptible and multi-channel backdoor attack against deep neural networks","author":"Xue","year":"2022","journal-title":"arXiv:2201.13164"},{"key":"ref53","doi-asserted-by":"publisher","DOI":"10.1007\/s10489-023-04575-8"},{"key":"ref54","article-title":"Backdoor attack through frequency domain","author":"Wang","year":"2021","journal-title":"arXiv:2111.10991"},{"key":"ref55","article-title":"Check your other door! Creating backdoor attacks in the frequency domain","author":"Abed Al Kader Hammoud","year":"2021","journal-title":"arXiv:2109.05507"},{"key":"ref56","first-page":"19165","article-title":"Sleeper agent: Scalable hidden trigger backdoors for neural networks trained from scratch","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Souri"},{"key":"ref57","article-title":"Light can hack your face! Black-box backdoor attack on face recognition systems","author":"Li","year":"2020","journal-title":"arXiv:2009.06996"},{"key":"ref58","article-title":"FaceHack: Triggering backdoored facial recognition systems using facial characteristics","author":"Sarkar","year":"2020","journal-title":"arXiv:2006.11623"},{"key":"ref59","article-title":"HaS-nets: A heal and select mechanism to defend DNNs against backdoor attacks for data collection scenarios","author":"Ali","year":"2020","journal-title":"arXiv:2012.07474"},{"key":"ref60","article-title":"Label-consistent backdoor attacks","author":"Turner","year":"2019","journal-title":"arXiv:1912.02771"},{"key":"ref61","doi-asserted-by":"publisher","DOI":"10.1145\/3372297.3423362"},{"key":"ref62","doi-asserted-by":"publisher","DOI":"10.1016\/j.cose.2022.102726"},{"key":"ref63","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01478"},{"key":"ref64","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01465"},{"key":"ref65","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.00614"},{"key":"ref66","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.01445"},{"key":"ref67","doi-asserted-by":"publisher","DOI":"10.1109\/EuroSP48549.2020.00019"},{"key":"ref68","doi-asserted-by":"publisher","DOI":"10.1109\/EuroSP53844.2022.00049"},{"key":"ref69","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP43922.2022.9746008"},{"key":"ref70","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP43922.2022.9747582"},{"key":"ref71","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.01615"},{"key":"ref72","doi-asserted-by":"publisher","DOI":"10.1109\/ICIP42928.2021.9506313"},{"key":"ref73","article-title":"WaNet-imperceptible warping-based backdoor attack","author":"Nguyen","year":"2021","journal-title":"arXiv:2102.10369"},{"key":"ref74","article-title":"Backdoor attack in the physical world","author":"Li","year":"2021","journal-title":"arXiv:2104.02361"},{"key":"ref75","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2021.3110239"},{"key":"ref76","doi-asserted-by":"publisher","DOI":"10.1109\/JSAC.2021.3087237"},{"key":"ref77","doi-asserted-by":"publisher","DOI":"10.1109\/TAI.2022.3206259"},{"key":"ref78","doi-asserted-by":"publisher","DOI":"10.1109\/TDSC.2022.3161477"},{"key":"ref79","doi-asserted-by":"publisher","DOI":"10.1109\/TDSC.2020.3028448"},{"key":"ref80","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2022.3201472"},{"key":"ref81","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58201-2_29"},{"key":"ref82","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2022\/242"},{"key":"ref83","doi-asserted-by":"publisher","DOI":"10.1109\/INFOCOM42981.2021.9488902"},{"key":"ref84","doi-asserted-by":"publisher","DOI":"10.1002\/int.22785"},{"key":"ref85","doi-asserted-by":"publisher","DOI":"10.1145\/3460319.3464809"},{"key":"ref86","doi-asserted-by":"publisher","DOI":"10.1007\/s11042-021-11135-0"},{"key":"ref87","first-page":"13238","article-title":"Untargeted backdoor watermark: Towards harmless and stealthy dataset copyright protection","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Li"},{"key":"ref88","first-page":"18944","article-title":"Backdoor attack with imperceptible input and latent modification","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"34","author":"Doan"},{"key":"ref89","first-page":"18021","article-title":"Manipulating SGD with data ordering attacks","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Shumailov"},{"key":"ref90","first-page":"3454","article-title":"Input-aware dynamic backdoor attack","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Nguyen"},{"key":"ref91","doi-asserted-by":"publisher","DOI":"10.1016\/j.patrec.2021.01.009"},{"key":"ref92","doi-asserted-by":"publisher","DOI":"10.1007\/s12083-020-01031-z"},{"key":"ref93","doi-asserted-by":"publisher","DOI":"10.1109\/SPW50608.2020.00024"},{"key":"ref94","first-page":"1505","article-title":"Blind backdoors in deep learning models","volume-title":"Proc. 30th USENIX Secur. Symp.","author":"Bagdasaryan"},{"key":"ref95","doi-asserted-by":"publisher","DOI":"10.1109\/TDSC.2020.3021407"},{"key":"ref96","article-title":"Don\u2019t trigger me! A triggerless backdoor attack against deep neural networks","author":"Salem","year":"2020","journal-title":"arXiv:2010.03282"},{"key":"ref97","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v34i07.6871"},{"key":"ref98","article-title":"Clean-label backdoor attacks","author":"Turner","year":"2018"},{"key":"ref99","first-page":"1","article-title":"Poison frogs! Targeted clean-label poisoning attacks on neural networks","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Shafahi"},{"key":"ref100","first-page":"7614","article-title":"Transferable clean-label poisoning attacks on deep neural nets","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Zhu"},{"key":"ref101","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.00786"},{"key":"ref102","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v38i18.29957"},{"key":"ref103","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.02355"},{"key":"ref104","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-72359-9_14"},{"key":"ref105","doi-asserted-by":"publisher","DOI":"10.3233\/FAIA240496"},{"key":"ref106","first-page":"71029","article-title":"Label poisoning is all you need","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Jha"},{"key":"ref107","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP49357.2023.10096034"},{"key":"ref108","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.00392"},{"key":"ref109","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2023.3296408"},{"key":"ref110","doi-asserted-by":"publisher","DOI":"10.1109\/CVPRW50498.2020.00406"},{"key":"ref111","article-title":"Mimicus: A Python library for adversarial classifier evasion","author":"Srndic","year":"2014"},{"key":"ref112","article-title":"Don\u2019t knock! Rowhammer at the backdoor of DNN models","author":"Caner Tol","year":"2021","journal-title":"arXiv:2110.07683"},{"key":"ref113","article-title":"On hiding neural networks inside neural networks","author":"Guo","year":"2020","journal-title":"arXiv:2002.10078"},{"key":"ref114","doi-asserted-by":"publisher","DOI":"10.1109\/IJCB48548.2020.9304875"},{"key":"ref115","doi-asserted-by":"publisher","DOI":"10.1145\/3340531.3412130"},{"key":"ref116","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.01321"},{"key":"ref117","article-title":"DBIA: Data-free backdoor injection attack against transformer networks","author":"Lv","year":"2021","journal-title":"arXiv:2111.11870"},{"key":"ref118","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-20065-6_7"},{"key":"ref119","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00762"},{"key":"ref120","article-title":"How to inject backdoors with better consistency: Logit anchoring on clean data","author":"Zhang","year":"2021","journal-title":"arXiv:2109.01300"},{"key":"ref121","first-page":"8068","article-title":"Handcrafted backdoors in deep neural networks","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"35","author":"Hong"},{"key":"ref122","doi-asserted-by":"publisher","DOI":"10.1145\/3531536.3532966"},{"key":"ref123","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP49357.2023.10094691"},{"key":"ref124","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.02356"},{"key":"ref125","doi-asserted-by":"publisher","DOI":"10.1145\/3548606.3560678"},{"key":"ref126","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01299"},{"key":"ref127","doi-asserted-by":"publisher","DOI":"10.1109\/ICSE43902.2021.00035"},{"key":"ref128","doi-asserted-by":"publisher","DOI":"10.1109\/TDSC.2022.3164073"},{"key":"ref129","doi-asserted-by":"publisher","DOI":"10.1109\/TDSC.2022.3166671"},{"key":"ref130","doi-asserted-by":"publisher","DOI":"10.1145\/3394486.3403064"},{"key":"ref131","article-title":"ImpNet: Imperceptible and blackbox-undetectable backdoors in compiled neural networks","author":"Clifford","year":"2022","journal-title":"arXiv:2210.00108"},{"key":"ref132","doi-asserted-by":"publisher","DOI":"10.1109\/SP40001.2021.00034"},{"key":"ref133","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00038"},{"key":"ref134","doi-asserted-by":"publisher","DOI":"10.1145\/3243734.3243757"},{"key":"ref135","first-page":"1541","article-title":"Demon in the variant: Statistical analysis of $DNNs$ for robust backdoor contamination detection","volume-title":"Proc. 30th USENIX Secur. Symp.","author":"Tang"},{"key":"ref136","doi-asserted-by":"publisher","DOI":"10.1109\/SP.2019.00031"},{"key":"ref137","first-page":"3301","article-title":"Shallow-deep networks: Understanding and mitigating network overthinking","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Kaya"},{"key":"ref138","doi-asserted-by":"publisher","DOI":"10.1109\/ICPR.2016.7900006"},{"key":"ref139","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00244"},{"key":"ref140","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-25056-9_26"},{"key":"ref141","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP49357.2023.10095980"},{"key":"ref142","article-title":"Attacking by aligning: Clean-label backdoor attacks on object detection","author":"Cheng","year":"2023","journal-title":"arXiv:2307.10487"},{"key":"ref143","first-page":"1","article-title":"Clean-image backdoor: Attacking multi-label models with poisoned labels only","volume-title":"Proc. The 11th Int. Conf. Learn. Represent.","author":"Chen"},{"key":"ref144","doi-asserted-by":"publisher","DOI":"10.1145\/3359789.3359790"},{"key":"ref145","doi-asserted-by":"publisher","DOI":"10.14722\/ndss.2018.23291"},{"key":"ref146","article-title":"SCALE-UP: An efficient black-box input-level backdoor detection via analyzing scaled prediction consistency","author":"Guo","year":"2023","journal-title":"arXiv:2302.03251"},{"key":"ref147","first-page":"1685","article-title":"Towards a proactive $ML$ approach for detecting backdoor poison samples","volume-title":"Proc. 32nd USENIX Secur. Symp.","author":"Qi"},{"key":"ref148","doi-asserted-by":"publisher","DOI":"10.1109\/ICCD.2017.16"},{"key":"ref149","article-title":"Detecting backdoor attacks on deep neural networks by activation clustering","author":"Chen","year":"2018","journal-title":"arXiv:1811.03728"},{"key":"ref150","first-page":"1","article-title":"Spectral signatures in backdoor attacks","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"31","author":"Tran"},{"key":"ref151","doi-asserted-by":"publisher","DOI":"10.5040\/9781350250307-0201"},{"key":"ref152","article-title":"AEVA: Black-box backdoor detection using adversarial extreme value analysis","author":"Guo","year":"2021","journal-title":"arXiv:2110.14880"},{"key":"ref153","article-title":"TABOR: A highly accurate approach to inspecting and restoring trojan backdoors in AI systems","author":"Guo","year":"2019","journal-title":"arXiv:1908.01763"},{"key":"ref154","doi-asserted-by":"publisher","DOI":"10.1145\/3319535.3363216"},{"key":"ref155","article-title":"Neural attention distillation: Erasing backdoor triggers from deep neural networks","author":"Li","year":"2021","journal-title":"arXiv:2101.05930"},{"key":"ref156","first-page":"14900","article-title":"Anti-backdoor learning: Training clean models on poisoned data","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Li"},{"key":"ref157","article-title":"Backdoor defense via decoupling the training process","author":"Huang","year":"2022","journal-title":"arXiv:2202.03423"},{"key":"ref158","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-00470-5_13"},{"key":"ref159","first-page":"16913","article-title":"Adversarial neuron pruning purifies backdoored deep models","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Wu"},{"key":"ref160","article-title":"Baseline pruning-based approach to trojan detection in neural networks","author":"Bajcsy","year":"2021","journal-title":"arXiv:2101.12016"},{"key":"ref161","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00068"},{"key":"ref162","first-page":"9389","article-title":"Just how toxic is data poisoning? A unified benchmark for backdoor and data poisoning attacks","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Schwarzschild"}],"container-title":["IEEE Access"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/6287639\/10820123\/11007533.pdf?arnumber=11007533","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,6,3]],"date-time":"2025-06-03T05:53:36Z","timestamp":1748930016000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11007533\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025]]},"references-count":162,"URL":"https:\/\/doi.org\/10.1109\/access.2025.3571995","relation":{},"ISSN":["2169-3536"],"issn-type":[{"value":"2169-3536","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025]]}}}