{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,11,15]],"date-time":"2025-11-15T07:13:05Z","timestamp":1763190785105,"version":"3.45.0"},"reference-count":53,"publisher":"IEEE","license":[{"start":{"date-parts":[[2025,6,30]],"date-time":"2025-06-30T00:00:00Z","timestamp":1751241600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,6,30]],"date-time":"2025-06-30T00:00:00Z","timestamp":1751241600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025,6,30]]},"DOI":"10.1109\/ijcnn64981.2025.11227233","type":"proceedings-article","created":{"date-parts":[[2025,11,14]],"date-time":"2025-11-14T18:46:15Z","timestamp":1763145975000},"page":"1-8","source":"Crossref","is-referenced-by-count":0,"title":["OWLed: Outlier-weighed Layerwise Pruning for Efficient Autonomous Driving Framework"],"prefix":"10.1109","author":[{"given":"Jiaxi","family":"Li","sequence":"first","affiliation":[{"name":"University of Surrey,Computer Science Research Centre,Guildford,United Kingdom"}]},{"given":"Lu","family":"Yin","sequence":"additional","affiliation":[{"name":"University of Surrey,Computer Science Research Centre,Guildford,United Kingdom"}]},{"given":"Xilu","family":"Wang","sequence":"additional","affiliation":[{"name":"University of Surrey,Computer Science Research Centre,Guildford,United Kingdom"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/TIV.2023.3274536"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2024.3435937"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58592-1_37"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-20077-9_1"},{"key":"ref5","first-page":"6531","article-title":"Motion transformer with global intention localization and local movement refinement","volume-title":"Proc. Adv. Neural Inf. Process. Syst. (NeurIPS)","volume":"35","author":"Shi"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52733.2024.01447"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.01712"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.01319"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA57147.2024.10611018"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/WACVW60836.2024.00106"},{"key":"ref11","first-page":"1877","article-title":"Language models are few-shot learners","volume-title":"Proc. Adv. Neural Inf. Process. Syst. (NeurIPS)","volume":"33","author":"Brown"},{"article-title":"Sparks of artificial general intelligence: Early experiments with gpt-4","year":"2023","author":"Bubeck","key":"ref12"},{"article-title":"Llama: Open and efficient foundation language models","year":"2023","author":"Touvron","key":"ref13"},{"article-title":"Languagempc: Large language models as decision makers for autonomous driving","year":"2023","author":"Sha","key":"ref14"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/LRA.2024.3440097"},{"article-title":"Gpt-driver: Learning to drive with gpt","volume-title":"NeurIPS workshop Found. Models Decis. Mak","author":"Mao","key":"ref16"},{"article-title":"On the road with gpt-4v (ision): Explorations of utilizing visual-language model as autonomous driving agent","volume-title":"ICLR Workshop Large Language Model (LLM) Agents","author":"Wen","key":"ref17"},{"article-title":"Dilu: A knowledge-driven approach to autonomous driving with large language models","volume-title":"Proc. Int. Conf. Learn. Rep. (ICLR)","author":"Wen","key":"ref18"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-73347-5_17"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-72995-9_23"},{"article-title":"Drivevlm: The convergence of autonomous driving and large vision-language models","year":"2024","author":"Tian","key":"ref21"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52733.2024.01432"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/fllm63129.2024.10852498"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/cvpr52733.2024.01297"},{"key":"ref25","article-title":"Memory-efficient fine-tuning of compressed large language models via sub-4-bit integer quantization","volume-title":"Proc. Adv. Neural Inf. Process. Syst. (NeurIPS)","volume":"36","author":"Kim"},{"article-title":"Minillm: Knowledge distillation of large language models","volume-title":"Proc. Int. Conf. Learn. Rep. (ICLR)","author":"Gu","key":"ref26"},{"article-title":"Rethinking the value of network pruning","volume-title":"Proc. Int. Conf. Learn. Rep. (ICLR)","author":"Liu","key":"ref27"},{"key":"ref28","article-title":"Skeletonization: A technique for trimming the fat from a network via relevance assessment","volume-title":"Proc. Adv. Neural Inf. Process. Syst. (NeurIPS)","volume":"1","author":"Mozer"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1103\/PhysRevA.39.6600"},{"key":"ref30","first-page":"605","article-title":"Optimal brain damage, advances in neural information processing systems","volume-title":"Denver 1989","volume":"598","author":"Le Cun","year":"1990"},{"key":"ref31","article-title":"Learning both weights and connections for efficient neural network","volume-title":"Proc. Adv. Neural Inf. Process. Syst. (NeurIPS)","volume":"28","author":"Han"},{"article-title":"Deep compression: Compressing deep neural networks with pruning, trained quantization and huffman coding","volume-title":"Proc. Int. Conf. Learn. Rep. (ICLR)","author":"Han","key":"ref32"},{"article-title":"The state of sparsity in deep neural networks","volume-title":"Proc. Int. Conf. Mach. Learn. (ICML)","author":"Gale","key":"ref33"},{"article-title":"Comparing rewinding and fine-tuning in neural network pruning","volume-title":"Proc. Int. Conf. Learn. Rep. (ICLR)","author":"Renda","key":"ref34"},{"key":"ref35","first-page":"129","article-title":"What is the state of neural network pruning?","volume-title":"Proc. Mach. Learn. syst","volume":"2","author":"Blalock"},{"key":"ref36","first-page":"10323","article-title":"Sparsegpt: Massive language models can be accurately pruned in one-shot","volume-title":"Proc. Int. Conf. Mach. Learn. (ICML)","author":"Frantar"},{"article-title":"A simple and effective pruning approach for large language models","volume-title":"Proc. Int. Conf. Mach. Learn. (ICML)","author":"Sun","key":"ref37"},{"key":"ref38","first-page":"57101","article-title":"Outlier weighed layerwise sparsity (OWL): A missing secret sauce for pruning LLMs to high sparsity","volume-title":"Proc. Int. Conf. Mach. Learn. (ICML)","volume":"235","author":"Yin"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1109\/DAC18074.2021.9586163"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1016\/j.knosys.2024.111522"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1007\/978-981-16-1103-2_22"},{"key":"ref42","first-page":"21702","article-title":"Llm-pruner: On the structural pruning of large language models","volume-title":"Proc. Adv. Neural Inf. Process. Syst. (NeurIPS)","volume":"36","author":"Ma"},{"key":"ref43","first-page":"2943","article-title":"Rigging the lottery: Making all tickets winners","volume-title":"Proc. Int. Conf. Mach. Learn. (ICML)","author":"Evci"},{"key":"ref44","first-page":"6989","article-title":"Do we actually need dense over-parameterization? in-time over-parameterization in sparse training","volume-title":"Proc. Int. Conf. Mach. Learn. (ICML)","author":"Liu"},{"article-title":"Dynamic sparse no training: Training-free fine-tuning for sparse LLMs","volume-title":"Proc. Int. Conf. Learn. Rep. (ICLR)","author":"Zhang","key":"ref45"},{"article-title":"Junk dna hypothesis: Pruning small pre-trained weights Irreversibly and Monotonically impairs \u201ddifficult\" downstream tasks in llms","volume-title":"Proc. Int. Conf. Mach. Learn. (ICML)","author":"Yin","key":"ref46"},{"issue":"140","key":"ref47","first-page":"1","article-title":"Exploring the limits of transfer learning with a unified text-to-text transformer","volume":"21","author":"Raffel","year":"2020","journal-title":"J. Mach. Learn. Res. (JMLR)"},{"key":"ref48","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2024.naacl-long.365"},{"key":"ref49","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.newsum-1.1"},{"key":"ref50","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.emnlp-main.153"},{"key":"ref51","article-title":"The emergence of essential sparsity in large pre-trained models: The weights that matter","volume-title":"Proc. Adv. Neur. Inf. Process. Sys. (NeurIPS)","volume":"36","author":"Jaiswal"},{"article-title":"How can we be so dense? the benefits of using highly sparse representations","year":"2019","author":"Ahmad","key":"ref52"},{"article-title":"Can subnetwork structure be the key to out-of-distribution generalization?","volume-title":"Proc. Int. Conf. Mach. Learn. (ICML)","author":"Zhang","key":"ref53"}],"event":{"name":"2025 International Joint Conference on Neural Networks (IJCNN)","start":{"date-parts":[[2025,6,30]]},"location":"Rome, Italy","end":{"date-parts":[[2025,7,5]]}},"container-title":["2025 International Joint Conference on Neural Networks (IJCNN)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/11227166\/11227148\/11227233.pdf?arnumber=11227233","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,11,15]],"date-time":"2025-11-15T07:09:21Z","timestamp":1763190561000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11227233\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,6,30]]},"references-count":53,"URL":"https:\/\/doi.org\/10.1109\/ijcnn64981.2025.11227233","relation":{},"subject":[],"published":{"date-parts":[[2025,6,30]]}}}