{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,28]],"date-time":"2026-03-28T16:48:45Z","timestamp":1774716525443,"version":"3.50.1"},"reference-count":46,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","license":[{"start":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T00:00:00Z","timestamp":1704067200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T00:00:00Z","timestamp":1704067200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T00:00:00Z","timestamp":1704067200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62372348"],"award-info":[{"award-number":["62372348"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"name":"Shaanxi Outstanding Youth Science Fund"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Geosci. Remote Sensing"],"published-print":{"date-parts":[[2024]]},"DOI":"10.1109\/tgrs.2024.3371681","type":"journal-article","created":{"date-parts":[[2024,2,29]],"date-time":"2024-02-29T18:55:22Z","timestamp":1709232922000},"page":"1-10","source":"Crossref","is-referenced-by-count":11,"title":["Two-Way Assistant: A Knowledge Distillation Object Detection Method for Remote Sensing Images"],"prefix":"10.1109","volume":"62","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-5791-3674","authenticated-orcid":false,"given":"Xi","family":"Yang","sequence":"first","affiliation":[{"name":"State Key Laboratory of Integrated Services Networks, School of Telecommunications Engineering, Xidian University, Xi&#x2019;an, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0008-1782-2541","authenticated-orcid":false,"given":"Sheng","family":"Zhang","sequence":"additional","affiliation":[{"name":"Hangzhou Institute of Technology, Xidian University, Hangzhou, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0002-6079-970X","authenticated-orcid":false,"given":"Weichao","family":"Yang","sequence":"additional","affiliation":[{"name":"Xi&#x2019;an Institute of Space Radio Technology, Xi&#x2019;an, China"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.48550\/arXiv.1503.02531"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1007\/s11263-022-01575-y"},{"key":"ref3","first-page":"1135","article-title":"Learning both weights and connections for efficient neural network","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"28","author":"Han"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/TGRS.2022.3186155"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00495"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2018\/161"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v34i04.5963"},{"key":"ref8","article-title":"Paying more attention to attention: Improving the performance of convolutional neural networks via attention transfer","author":"Zagoruyko","year":"2016","journal-title":"arXiv:1612.03928"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2021.3083113"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v33i01.33013779"},{"key":"ref11","first-page":"2760","article-title":"Paraphrasing complex network: Network compression via factor transfer","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"31","author":"Kim"},{"key":"ref12","first-page":"5213","article-title":"Distilling object detectors with feature richness","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"34","author":"Zhixing"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00507"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/TGRS.2022.3174276"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00460"},{"key":"ref16","first-page":"1","article-title":"Improve object detection with feature-based knowledge distillation: Towards accurate and efficient detectors","volume-title":"Proc. Int. Conf. Learn. Represent.","author":"Zhang"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.91"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-46448-0_2"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.690"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.324"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.322"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2014.81"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2015.169"},{"key":"ref24","article-title":"Very deep convolutional networks for large-scale image recognition","author":"Simonyan","year":"2014","journal-title":"arXiv:1409.1556"},{"key":"ref25","article-title":"MobileNets: Efficient convolutional neural networks for mobile vision applications","author":"Howard","year":"2017","journal-title":"arXiv:1704.04861"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00474"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/TGRS.2020.3023928"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/TGRS.2021.3089170"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/TGRS.2016.2601622"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1109\/TGRS.2017.2783902"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1109\/TGRS.2023.3238801"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.00914"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00145"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1016\/j.eswa.2023.120519"},{"key":"ref35","first-page":"742","article-title":"Learning efficient object detection models with knowledge distillation","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"30","author":"Chen"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.776"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1109\/TGRS.2022.3175213"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1109\/TGRS.2022.3192013"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.01079"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1155\/2022\/2206917"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2017.2773199"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1109\/BIGSARDATA.2017.8124934"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-20083-0_4"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.48550\/arXiv.1906.07155"},{"key":"ref45","first-page":"21002","article-title":"Generalized focal loss: Learning qualified and distributed bounding boxes for dense object detection","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"33","author":"Li"},{"key":"ref46","doi-asserted-by":"publisher","DOI":"10.1109\/tpami.2016.2577031"}],"container-title":["IEEE Transactions on Geoscience and Remote Sensing"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/36\/10354519\/10453608.pdf?arnumber=10453608","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,3,14]],"date-time":"2024-03-14T17:55:37Z","timestamp":1710438937000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10453608\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024]]},"references-count":46,"URL":"https:\/\/doi.org\/10.1109\/tgrs.2024.3371681","relation":{},"ISSN":["0196-2892","1558-0644"],"issn-type":[{"value":"0196-2892","type":"print"},{"value":"1558-0644","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024]]}}}