{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,12,18]],"date-time":"2025-12-18T14:30:21Z","timestamp":1766068221205,"version":"3.41.2"},"reference-count":59,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","license":[{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62201407","62271377"],"award-info":[{"award-number":["62201407","62271377"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"name":"Key Research and Development Program of Shannxi Program","award":["2024GX-ZDCYL-02-17","2024GX-ZDCYL-02-08","2023YBGY244","2023QCYLL28","2022ZDLGY01-12","2021ZDLGY01-06"],"award-info":[{"award-number":["2024GX-ZDCYL-02-17","2024GX-ZDCYL-02-08","2023YBGY244","2023QCYLL28","2022ZDLGY01-12","2021ZDLGY01-06"]}]},{"DOI":"10.13039\/501100002858","name":"China Postdoctoral Science Foundation","doi-asserted-by":"publisher","award":["2022M722496"],"award-info":[{"award-number":["2022M722496"]}],"id":[{"id":"10.13039\/501100002858","id-type":"DOI","asserted-by":"publisher"}]},{"name":"National Key Research and Development Program of China","award":["2021ZD0110400","2021ZD0110404"],"award-info":[{"award-number":["2021ZD0110400","2021ZD0110404"]}]},{"name":"Key Scientific Technological Innovation Research Project"},{"name":"Ministry of Education"},{"name":"State Key Program and the Foundation for Innovative Research Groups of the National Natural Science Foundation of China","award":["61836009"],"award-info":[{"award-number":["61836009"]}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["U22B2054"],"award-info":[{"award-number":["U22B2054"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"name":"Fund for Foreign Scholars in University Research and Teaching Program's 111 Project","award":["B07048"],"award-info":[{"award-number":["B07048"]}]},{"name":"Program for Cheung Kong Scholars and Innovative Research Team in University","award":["IRT 15R53"],"award-info":[{"award-number":["IRT 15R53"]}]},{"name":"Key Research and Development Program of Xi'an","award":["23ZDCYJSGG0001-2022"],"award-info":[{"award-number":["23ZDCYJSGG0001-2022"]}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Geosci. Remote Sensing"],"published-print":{"date-parts":[[2025]]},"DOI":"10.1109\/tgrs.2025.3584094","type":"journal-article","created":{"date-parts":[[2025,6,30]],"date-time":"2025-06-30T13:38:24Z","timestamp":1751290704000},"page":"1-13","source":"Crossref","is-referenced-by-count":1,"title":["Burden-Free Distillation From Foundation Model for Efficient Remote Sensing Change Detection"],"prefix":"10.1109","volume":"63","author":[{"ORCID":"https:\/\/orcid.org\/0000-0003-4940-1211","authenticated-orcid":false,"given":"Shuang","family":"Wang","sequence":"first","affiliation":[{"name":"Key Laboratory of Intelligent Perception and Image Understanding of Ministry of Education of China, Xidian University, Xi&#x2019;an, China"}]},{"given":"Chonghua","family":"Lv","sequence":"additional","affiliation":[{"name":"Key Laboratory of Intelligent Perception and Image Understanding of Ministry of Education of China, Xidian University, Xi&#x2019;an, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-6943-4657","authenticated-orcid":false,"given":"Dou","family":"Quan","sequence":"additional","affiliation":[{"name":"Key Laboratory of Intelligent Perception and Image Understanding of Ministry of Education of China, Xidian University, Xi&#x2019;an, China"}]},{"given":"Ning","family":"Huyan","sequence":"additional","affiliation":[{"name":"Department of Automation, Tsinghua University, Beijing, China"}]},{"given":"Xianwei","family":"Cao","sequence":"additional","affiliation":[{"name":"Key Laboratory of Intelligent Perception and Image Understanding of Ministry of Education of China, Xidian University, Xi&#x2019;an, China"}]},{"given":"Jingxi","family":"Sun","sequence":"additional","affiliation":[{"name":"Sichuan Aerospace System Engineering Institute, Chengdu, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-3354-9617","authenticated-orcid":false,"given":"Licheng","family":"Jiao","sequence":"additional","affiliation":[{"name":"Key Laboratory of Intelligent Perception and Image Understanding of Ministry of Education of China, Xidian University, Xi&#x2019;an, China"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.3390\/rs12101662"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/TGRS.2018.2858817"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/CVPRW56347.2022.00153"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.3390\/rs12101688"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1080\/01431168908903939"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2004.838698"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1016\/j.isprsjprs.2021.05.002"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1080\/01431168908903937"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1016\/0034-4257(94)90144-9"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1016\/0034-4257(84)90025-7"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/36.843009"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/LGRS.2020.2988032"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/TGRS.2022.3174276"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1016\/j.isprsjprs.2020.06.003"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/TGRS.2022.3159544"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/TGRS.2023.3344083"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/TGRS.2022.3165851"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/IGARSS46834.2022.9883686"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/TGRS.2022.3160007"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/TGRS.2024.3365825"},{"key":"ref21","first-page":"8748","article-title":"Learning transferable visual models from natural language supervision","volume-title":"Proc. Int. Conf. Mach. Learn.","volume":"139","author":"Radford"},{"key":"ref22","article-title":"On the opportunities and risks of foundation models","author":"Bommasani","year":"2021","journal-title":"arXiv:2108.07258"},{"key":"ref23","first-page":"2790","article-title":"Parameter-efficient transfer learning for NLP","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Houlsby"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.01926"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/ICIP.2018.8451652"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/LGRS.2021.3056416"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/TGRS.2021.3106697"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.48550\/arXiv.2010.11929"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/TGRS.2021.3095166"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1109\/TGRS.2023.3296383"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1109\/TGRS.2023.3281711"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.00371"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1109\/TGRS.2024.3390838"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1109\/TGRS.2024.3368168"},{"key":"ref35","article-title":"Fast segment anything","author":"Zhao","year":"2023","journal-title":"arXiv:2306.12156"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.48550\/arXiv.1503.02531"},{"key":"ref37","article-title":"FitNets: Hints for thin deep nets","author":"Romero","year":"2014","journal-title":"arXiv:1412.6550"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v35i8.16865"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.00497"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00526"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00271"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58571-6_21"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.3390\/rs11111382"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.00276"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.00288"},{"key":"ref46","doi-asserted-by":"publisher","DOI":"10.5555\/3524938.3525087"},{"key":"ref47","article-title":"Augmentation-free dense contrastive knowledge distillation for efficient semantic segmentation","author":"Fan","year":"2023","journal-title":"arXiv:2312.04168"},{"key":"ref48","first-page":"12077","article-title":"SegFormer: Simple and efficient design for semantic segmentation with transformers","volume-title":"Proc. Neural Inf. Process. Syst. (NeurIPS)","author":"Xie"},{"key":"ref49","doi-asserted-by":"publisher","DOI":"10.1109\/JSTARS.2023.3310208"},{"key":"ref50","doi-asserted-by":"publisher","DOI":"10.3390\/rs13245094"},{"key":"ref51","article-title":"Revisiting consistency regularization for semi-supervised change detection in remote sensing images","author":"Bandara","year":"2022","journal-title":"arXiv:2204.08454"},{"key":"ref52","article-title":"Open-CD: A comprehensive toolbox for change detection","author":"Li","year":"2024","journal-title":"arXiv:2407.15317"},{"key":"ref53","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00262"},{"key":"ref54","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2019.2960224"},{"key":"ref55","doi-asserted-by":"publisher","DOI":"10.1109\/TGRS.2020.3011913"},{"key":"ref56","article-title":"How to train your ViT? Data, augmentation, and regularization in vision transformers","author":"Steiner","year":"2021","journal-title":"arXiv:2106.10270"},{"key":"ref57","article-title":"RS5M and GeoRSCLIP: A large scale vision-language dataset and a large vision-language model for remote sensing","author":"Zhang","year":"2023","journal-title":"arXiv:2306.11300"},{"key":"ref58","article-title":"ImageNet-21K pretraining for the masses","author":"Ridnik","year":"2021","journal-title":"arXiv:2104.10972"},{"key":"ref59","article-title":"LAION-400M: Open dataset of CLIP-filtered 400 million image-text pairs","author":"Schuhmann","year":"2021","journal-title":"arXiv:2111.02114"}],"container-title":["IEEE Transactions on Geoscience and Remote Sensing"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/36\/10807682\/11059258.pdf?arnumber=11059258","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,7,21]],"date-time":"2025-07-21T18:08:35Z","timestamp":1753121315000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11059258\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025]]},"references-count":59,"URL":"https:\/\/doi.org\/10.1109\/tgrs.2025.3584094","relation":{},"ISSN":["0196-2892","1558-0644"],"issn-type":[{"type":"print","value":"0196-2892"},{"type":"electronic","value":"1558-0644"}],"subject":[],"published":{"date-parts":[[2025]]}}}