{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,11,15]],"date-time":"2025-11-15T07:13:58Z","timestamp":1763190838977,"version":"3.45.0"},"reference-count":27,"publisher":"IEEE","license":[{"start":{"date-parts":[[2025,6,30]],"date-time":"2025-06-30T00:00:00Z","timestamp":1751241600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,6,30]],"date-time":"2025-06-30T00:00:00Z","timestamp":1751241600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/100007787","name":"National Research Centre","doi-asserted-by":"publisher","id":[{"id":"10.13039\/100007787","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025,6,30]]},"DOI":"10.1109\/ijcnn64981.2025.11228856","type":"proceedings-article","created":{"date-parts":[[2025,11,14]],"date-time":"2025-11-14T18:46:15Z","timestamp":1763145975000},"page":"1-10","source":"Crossref","is-referenced-by-count":0,"title":["Localizing and Exploiting Concept Areas in LLMs for Downstream Classification Tasks"],"prefix":"10.1109","author":[{"given":"Alfredo","family":"Nascita","sequence":"first","affiliation":[{"name":"University of Napoli Federico II,Italy"}]},{"given":"Jonatan","family":"Krolikowski","sequence":"additional","affiliation":[{"name":"Huawei Technologies Co. Ltd,France"}]},{"given":"Valerio","family":"Persico","sequence":"additional","affiliation":[{"name":"University of Napoli Federico II,Italy"}]},{"given":"Antonio","family":"Pescap\u00e9","sequence":"additional","affiliation":[{"name":"University of Napoli Federico II,Italy"}]},{"given":"Dario","family":"Rossi","sequence":"additional","affiliation":[{"name":"Huawei Technologies Co. Ltd,France"}]}],"member":"263","reference":[{"key":"ref1","article-title":"Language models are few-shot learners","author":"Brown","year":"2020","journal-title":"NeurIPS"},{"article-title":"BERT: pre-training of deep bidirectional transformers for language understanding","volume-title":"NAACL-HLT","author":"Devlin","key":"ref2"},{"key":"ref3","article-title":"Switch transformers: Scaling to trillion parameter models with simple and efficient sparsity","author":"Fedus","year":"2022","journal-title":"Journal of Machine Learning Research"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1145\/3287560.3287574"},{"year":"2021","key":"ref5","article-title":"EU AI Act"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1145\/3442188.3445922"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.findings-emnlp.148"},{"journal-title":"Towards a rigorous science of interpretable machine learning","year":"2017","author":"Doshi-Velez","key":"ref8"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.emnlp-main.765"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.acl-long.581"},{"key":"ref11","article-title":"Locating and editing factual associations in GPT","author":"Meng","year":"2022","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.emnlp-main.446"},{"article-title":"BERTnesia: Investigating the capture and forgetting of knowledge in BERT","volume-title":"Third BlackboxNLP Workshop on Analyzing and Interpreting Neural Networks for NLP","author":"Wallat","key":"ref13"},{"article-title":"Mass editing memory in a transformer","volume-title":"ICLR","author":"Meng","key":"ref14"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.emnlp-main.174"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.nlposs-1.19"},{"journal-title":"Can large language models explain themselves? A study of LLM-generated self-explanations","year":"2023","author":"Huang","key":"ref17"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/D19-1002"},{"article-title":"Attention is not Explanation","volume-title":"Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers)","author":"Jain","key":"ref19"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.findings-emnlp.538"},{"journal-title":"Benchmarking llms via uncertainty quantification","year":"2024","author":"Ye","key":"ref21"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.emnlp-main.751"},{"article-title":"Machine unlearning for random forests","volume-title":"ICML","author":"Brophy","key":"ref23"},{"key":"ref24","article-title":"Descent-to-delete: Gradient-based methods for machine unlearning","author":"Neel","year":"2021","journal-title":"Algorithmic Learning Theory"},{"article-title":"Axiomatic attribution for deep networks","volume-title":"ICML","author":"Sundararajan","key":"ref25"},{"journal-title":"Roberta: A robustly optimized bert pretraining approach","year":"2019","author":"Liu","key":"ref26"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1145\/3696379"}],"event":{"name":"2025 International Joint Conference on Neural Networks (IJCNN)","start":{"date-parts":[[2025,6,30]]},"location":"Rome, Italy","end":{"date-parts":[[2025,7,5]]}},"container-title":["2025 International Joint Conference on Neural Networks (IJCNN)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/11227166\/11227148\/11228856.pdf?arnumber=11228856","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,11,15]],"date-time":"2025-11-15T07:12:30Z","timestamp":1763190750000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11228856\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,6,30]]},"references-count":27,"URL":"https:\/\/doi.org\/10.1109\/ijcnn64981.2025.11228856","relation":{},"subject":[],"published":{"date-parts":[[2025,6,30]]}}}