{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,17]],"date-time":"2026-03-17T18:56:55Z","timestamp":1773773815364,"version":"3.50.1"},"reference-count":69,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"6","license":[{"start":{"date-parts":[[2024,6,1]],"date-time":"2024-06-01T00:00:00Z","timestamp":1717200000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2024,6,1]],"date-time":"2024-06-01T00:00:00Z","timestamp":1717200000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2024,6,1]],"date-time":"2024-06-01T00:00:00Z","timestamp":1717200000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100004826","name":"Beijing Natural Science Foundation","doi-asserted-by":"publisher","award":["Z200002"],"award-info":[{"award-number":["Z200002"]}],"id":[{"id":"10.13039\/501100004826","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["U19B2036"],"award-info":[{"award-number":["U19B2036"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62225601"],"award-info":[{"award-number":["62225601"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"name":"Scholarships from the China Scholarship Council","award":["CSC 202006470036"],"award-info":[{"award-number":["CSC 202006470036"]}]},{"name":"Beijing University of Posts and Telecommunications (BUPT) Excellent Ph.D. Students Foundation","award":["CX2020105"],"award-info":[{"award-number":["CX2020105"]}]},{"name":"Program for Youth Innovative Research Team of BUPT","award":["2023QNTD02"],"award-info":[{"award-number":["2023QNTD02"]}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Circuits Syst. Video Technol."],"published-print":{"date-parts":[[2024,6]]},"DOI":"10.1109\/tcsvt.2023.3326862","type":"journal-article","created":{"date-parts":[[2023,10,23]],"date-time":"2023-10-23T18:30:20Z","timestamp":1698085820000},"page":"4159-4174","source":"Crossref","is-referenced-by-count":13,"title":["Mind the Gap: Open Set Domain Adaptation via Mutual-to-Separate Framework"],"prefix":"10.1109","volume":"34","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-4081-3001","authenticated-orcid":false,"given":"Dongliang","family":"Chang","sequence":"first","affiliation":[{"name":"Pattern Recognition and Intelligent System Laboratory, School of Artificial Intelligence, Beijing University of Posts and Telecommunications, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-7789-3060","authenticated-orcid":false,"given":"Aneeshan","family":"Sain","sequence":"additional","affiliation":[{"name":"Centre for Vision, Speech and Signal Processing, University of Surrey, Guildford, U.K."}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-2950-2488","authenticated-orcid":false,"given":"Zhanyu","family":"Ma","sequence":"additional","affiliation":[{"name":"Pattern Recognition and Intelligent System Laboratory, School of Artificial Intelligence, Beijing University of Posts and Telecommunications, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-5908-3275","authenticated-orcid":false,"given":"Yi-Zhe","family":"Song","sequence":"additional","affiliation":[{"name":"Centre for Vision, Speech and Signal Processing, University of Surrey, Guildford, U.K."}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-1830-2595","authenticated-orcid":false,"given":"Ruiping","family":"Wang","sequence":"additional","affiliation":[{"name":"Key Laboratory of Intelligent Information Processing, Institute of Computing Technology, Chinese Academy of Sciences (CAS), Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-9045-1339","authenticated-orcid":false,"given":"Jun","family":"Guo","sequence":"additional","affiliation":[{"name":"Pattern Recognition and Intelligent System Laboratory, School of Artificial Intelligence, Beijing University of Posts and Telecommunications, Beijing, China"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2011.6126504"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/WACV51458.2022.00379"},{"issue":"1","key":"ref3","first-page":"3760","article-title":"Distributionmatching embedding for visual domain adaptation","volume":"17","author":"Baktashmotlagh","year":"2016","journal-title":"J. Mach. Learn. Res."},{"key":"ref4","article-title":"Cooperative learning with visual attributes","author":"Batra","year":"2017","journal-title":"arXiv:1705.05512"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1007\/s10994-009-5152-4"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.173"},{"key":"ref7","first-page":"1","article-title":"Exploiting weakly-labeled web images to improve object classification: A domain adaptation approach","volume-title":"Proc. NeurIPS","author":"Bergamo"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58517-4_25"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2020.2968484"},{"key":"ref10","first-page":"1","article-title":"An image is worth 16\u00d716 words: Transformers for image recognition at scale","volume-title":"Proc. ICLR","author":"Dosovitskiy"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2020.3017213"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00808"},{"key":"ref13","article-title":"Unsupervised domain adaptation by backpropagation","author":"Ganin","year":"2014","journal-title":"arXiv:1409.7495"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2012.6247911"},{"key":"ref15","first-page":"1","article-title":"Generative adversarial nets","volume-title":"Proc. NeurIPS","author":"Goodfellow"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.7551\/mitpress\/7503.003.0069"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.90"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.48550\/arXiv.1503.02531"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1145\/3343031.3351072"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-10578-9_26"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v35i9.16977"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2021.3056208"},{"key":"ref23","first-page":"1","article-title":"ImageNet classification with deep convolutional neural networks","volume-title":"Proc. NeurIPS","author":"Krizhevsky"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.01239"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.591"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2019.2923639"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2023.3249200"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.324"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00304"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2021.3134673"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00924"},{"key":"ref32","first-page":"1","article-title":"Learning transferable features with deep adaptation networks","volume-title":"Proc. ICML","author":"Long"},{"key":"ref33","first-page":"1","article-title":"Conditional adversarial domain adaptation","volume-title":"Proc. NeurIPS","author":"Long"},{"key":"ref34","first-page":"1","article-title":"Unsupervised domain adaptation with residual transfer networks","volume-title":"Proc. NeurIPS","author":"Long"},{"key":"ref35","first-page":"1","article-title":"Progressive graph learning for open-set domain adaptation","volume-title":"Proc. ICML","author":"Luo"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2023.3267765"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2023.3275034"},{"issue":"11","key":"ref38","first-page":"2579","article-title":"Visualizing data using t-SNE","volume":"9","author":"Van der Maaten","year":"2008","journal-title":"J. Mach. Learn. Res."},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2023.3260246"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00473"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1109\/TNN.2010.2091281"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00234"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.88"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00149"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.1109\/CVPRW.2018.00271"},{"key":"ref46","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-642-15561-1_16"},{"key":"ref47","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00887"},{"key":"ref48","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-01228-1_10"},{"key":"ref49","doi-asserted-by":"publisher","DOI":"10.1109\/CVPRW.2018.00139"},{"key":"ref50","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.89"},{"key":"ref51","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2021.3073937"},{"key":"ref52","first-page":"1","article-title":"Mean teachers are better role models: Weight-averaged consistency targets improve semi-supervised deep learning results","volume-title":"Proc. NeurIPS","author":"Tarvainen"},{"key":"ref53","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2022.3192135"},{"key":"ref54","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2015.463"},{"key":"ref55","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.316"},{"key":"ref56","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.572"},{"key":"ref57","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2021.3104835"},{"key":"ref58","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2022.3223950"},{"key":"ref59","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.01499"},{"key":"ref60","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2020\/352"},{"key":"ref61","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2019.2963318"},{"key":"ref62","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2021.3105614"},{"key":"ref63","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2015.7298826"},{"key":"ref64","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00283"},{"key":"ref65","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2018.2842206"},{"key":"ref66","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00454"},{"key":"ref67","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2021.3119965"},{"key":"ref68","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2023.3265853"},{"key":"ref69","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2021.3081729"}],"container-title":["IEEE Transactions on Circuits and Systems for Video Technology"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/76\/10550083\/10292696.pdf?arnumber=10292696","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,6,25]],"date-time":"2024-06-25T21:16:25Z","timestamp":1719350185000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10292696\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,6]]},"references-count":69,"journal-issue":{"issue":"6"},"URL":"https:\/\/doi.org\/10.1109\/tcsvt.2023.3326862","relation":{},"ISSN":["1051-8215","1558-2205"],"issn-type":[{"value":"1051-8215","type":"print"},{"value":"1558-2205","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,6]]}}}