{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,16]],"date-time":"2026-03-16T20:10:23Z","timestamp":1773691823131,"version":"3.50.1"},"reference-count":46,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","license":[{"start":{"date-parts":[[2026,1,1]],"date-time":"2026-01-01T00:00:00Z","timestamp":1767225600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2026,1,1]],"date-time":"2026-01-01T00:00:00Z","timestamp":1767225600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2026,1,1]],"date-time":"2026-01-01T00:00:00Z","timestamp":1767225600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62176178"],"award-info":[{"award-number":["62176178"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62441235"],"award-info":[{"award-number":["62441235"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. on Image Process."],"published-print":{"date-parts":[[2026]]},"DOI":"10.1109\/tip.2026.3654473","type":"journal-article","created":{"date-parts":[[2026,1,21]],"date-time":"2026-01-21T21:12:53Z","timestamp":1769029973000},"page":"930-942","source":"Crossref","is-referenced-by-count":1,"title":["Interpretable Few-Shot Image Classification via Prototypical Concept-Guided Mixture of LoRA Experts"],"prefix":"10.1109","volume":"35","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-2197-3739","authenticated-orcid":false,"given":"Zhong","family":"Ji","sequence":"first","affiliation":[{"name":"School of Electrical and Information Engineering, Tianjin Key Laboratory of Brain-Inspired Intelligence Technology, Tianjin University, Tianjin, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0007-8117-5361","authenticated-orcid":false,"given":"Rongshuai","family":"Wei","sequence":"additional","affiliation":[{"name":"School of Electrical and Information Engineering, Tianjin Key Laboratory of Brain-Inspired Intelligence Technology, Tianjin University, Tianjin, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0009-0163-4105","authenticated-orcid":false,"given":"Jingren","family":"Liu","sequence":"additional","affiliation":[{"name":"School of Electrical and Information Engineering, Tianjin Key Laboratory of Brain-Inspired Intelligence Technology, Tianjin University, Tianjin, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-6670-3727","authenticated-orcid":false,"given":"Yanwei","family":"Pang","sequence":"additional","affiliation":[{"name":"School of Electrical and Information Engineering, Tianjin Key Laboratory of Brain-Inspired Intelligence Technology, Tianjin University, Tianjin, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-4361-956X","authenticated-orcid":false,"given":"Jungong","family":"Han","sequence":"additional","affiliation":[{"name":"Department of Automation, Tsinghua University, Beijing, China"}]}],"member":"263","reference":[{"key":"ref1","first-page":"8930","article-title":"This looks like that: Deep learning for interpretable image recognition","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Chen"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2024.3459626"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2023.3268001"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2020.2965275"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2024.3492733"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1016\/j.inffus.2019.12.012"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2023.3297404"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2023.3286259"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00093"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1007\/s10444-023-10090-8"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.00269"},{"key":"ref12","article-title":"ProtoS-ViT: Visual foundation models for sparse self-explainable classifications","author":"Turb\u00e9","year":"2024","journal-title":"arXiv:2406.10025"},{"key":"ref13","first-page":"1","article-title":"An image is worth 16\u00d716 words: Transformers for image recognition at scale","volume-title":"Proc. Int. Conf. Learn. Represent.","author":"Dosovitskiy"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.5555\/3294996.3295163"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1007\/s11263-022-01653-1"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01631"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.00653"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.01435"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1007\/s11263-023-01891-x"},{"key":"ref20","article-title":"Tip-adapter: Training-free CLIP-adapter for better vision-language modeling","author":"Zhang","year":"2021","journal-title":"arXiv:2111.03930"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.01049"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1162\/neco.1991.3.1.79"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1162\/neco.1994.6.2.181"},{"issue":"63","key":"ref24","first-page":"1829","article-title":"Nonlinear models using Dirichlet process mixtures","volume":"10","author":"Shahbaba","year":"2007","journal-title":"J. Mach. Learn. Res."},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1162\/089976602753633402"},{"key":"ref26","article-title":"Outrageously large neural networks: The sparsely-gated mixture-of-experts layer","author":"Shazeer","year":"2017","journal-title":"arXiv:1701.06538"},{"key":"ref27","article-title":"GShard: Scaling giant models with conditional computation and automatic sharding","author":"Lepikhin","year":"2020","journal-title":"arXiv:2006.16668"},{"issue":"120","key":"ref28","first-page":"1","article-title":"Switch transformers: Scaling to trillion parameter models with simple and efficient sparsity","volume":"23","author":"Fedus","year":"2022","journal-title":"J. Mach. Learn. Res."},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.emnlp-main.217"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2024.acl-long.70"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00951"},{"key":"ref32","article-title":"DINOv2: Learning robust visual features without supervision","author":"Oquab","year":"2023","journal-title":"arXiv:2304.07193"},{"key":"ref33","first-page":"3","article-title":"LoRA: Low-rank adaptation of large language models","volume-title":"Proc. Int. Conf. Learn. Representation","author":"Hu"},{"key":"ref34","article-title":"AdaLoRA: Adaptive budget allocation for parameter-efficient fine-tuning","author":"Zhang","year":"2023","journal-title":"arXiv:2303.10512"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.emnlp-main.243"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.acllong.353"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.emnlp-main.252"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2024.3468874"},{"key":"ref39","volume-title":"The Caltech-UCSD Birds-200-2011 Dataset","author":"Wah","year":"2011"},{"key":"ref40","article-title":"Fine-grained visual classification of aircraft","author":"Maji","year":"2013","journal-title":"arXiv:1306.5151"},{"key":"ref41","first-page":"3630","article-title":"Matching networks for one shot learning","volume-title":"Proc. Adv. Neural Inf. Process. Syst. (NIPS)","author":"Vinyals"},{"key":"ref42","article-title":"Meta-learning with differentiable closed-form solvers","author":"Bertinetto","year":"2018","journal-title":"arXiv:1805.08136"},{"key":"ref43","article-title":"Few-shot learning with metric-agnostic conditional embeddings","author":"Hilliard","year":"2018","journal-title":"arXiv:1802.04376"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2014.461"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.00192"},{"key":"ref46","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.00197"}],"container-title":["IEEE Transactions on Image Processing"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/83\/11355710\/11360578.pdf?arnumber=11360578","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,1,29]],"date-time":"2026-01-29T21:27:16Z","timestamp":1769722036000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11360578\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2026]]},"references-count":46,"URL":"https:\/\/doi.org\/10.1109\/tip.2026.3654473","relation":{},"ISSN":["1057-7149","1941-0042"],"issn-type":[{"value":"1057-7149","type":"print"},{"value":"1941-0042","type":"electronic"}],"subject":[],"published":{"date-parts":[[2026]]}}}