{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,4]],"date-time":"2026-04-04T17:59:33Z","timestamp":1775325573217,"version":"3.50.1"},"reference-count":77,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","license":[{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"name":"NSFC Program","award":["62206052"],"award-info":[{"award-number":["62206052"]}]},{"name":"NSFC Program","award":["62125602"],"award-info":[{"award-number":["62125602"]}]},{"name":"NSFC Program","award":["62076063"],"award-info":[{"award-number":["62076063"]}]},{"name":"Jiangsu Natural Science Foundation Project","award":["BK20210224"],"award-info":[{"award-number":["BK20210224"]}]},{"DOI":"10.13039\/501100002858","name":"China Postdoctoral Science Foundation","doi-asserted-by":"publisher","award":["2024M750424"],"award-info":[{"award-number":["2024M750424"]}],"id":[{"id":"10.13039\/501100002858","id-type":"DOI","asserted-by":"publisher"}]},{"name":"Postdoctoral Fellowship Program of CPSF","award":["GZC20240252"],"award-info":[{"award-number":["GZC20240252"]}]},{"name":"Jiangsu Funding Program for Excellent Postdoctoral Talent","award":["2024ZB242"],"award-info":[{"award-number":["2024ZB242"]}]},{"name":"Xplorer Prize"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Multimedia"],"published-print":{"date-parts":[[2025]]},"DOI":"10.1109\/tmm.2024.3521671","type":"journal-article","created":{"date-parts":[[2025,1,6]],"date-time":"2025-01-06T19:39:44Z","timestamp":1736192384000},"page":"120-132","source":"Crossref","is-referenced-by-count":6,"title":["DPStyler: Dynamic PromptStyler for Source-Free Domain Generalization"],"prefix":"10.1109","volume":"27","author":[{"given":"Yunlong","family":"Tang","sequence":"first","affiliation":[{"name":"Key Laboratory of New Generation Artificial Intelligence Technology and Its Interdisciplinary Applications, Ministry of Education, Southeast University, Nanjing, China"}]},{"given":"Yuxuan","family":"Wan","sequence":"additional","affiliation":[{"name":"Key Laboratory of New Generation Artificial Intelligence Technology and Its Interdisciplinary Applications, Ministry of Education, Southeast University, Nanjing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-7091-0702","authenticated-orcid":false,"given":"Lei","family":"Qi","sequence":"additional","affiliation":[{"name":"Key Laboratory of New Generation Artificial Intelligence Technology and Its Interdisciplinary Applications, Ministry of Education, Southeast University, Nanjing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-7729-0622","authenticated-orcid":false,"given":"Xin","family":"Geng","sequence":"additional","affiliation":[{"name":"Key Laboratory of New Generation Artificial Intelligence Technology and Its Interdisciplinary Applications, Ministry of Education, Southeast University, Nanjing, China"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00949"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00897"},{"key":"ref3","first-page":"1","article-title":"A meta-transfer objective for learning to disentangle causal mechanisms","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Bengio","year":"2019"},{"key":"ref4","first-page":"5389","article-title":"Do imagenet classifiers generalize to imagenet?","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Recht","year":"2019"},{"key":"ref5","first-page":"1","article-title":"Benchmarking neural network robustness to common corruptions and perturbationss","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Hendrycks","year":"2018"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2021.3063616"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.7551\/mitpress\/7503.003.0022"},{"key":"ref8","first-page":"1994","article-title":"CyCADA: Cycle-consistent adversarial domain adaptation","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Hoffman","year":"2018"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01834"},{"key":"ref10","first-page":"1","article-title":"Surgical fine-tuning improves adaptation to distribution shifts","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Lee","year":"2022"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00814"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v30i1.10306"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.316"},{"key":"ref14","first-page":"7523","article-title":"On learning invariant representations for domain adaptation","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Zhao","year":"2019"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2022.3146744"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2021.3073258"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.00682"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00219"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58542-6_5"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00699"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-19821-2_9"},{"key":"ref22","first-page":"1","article-title":"Cycle-consistent masked autoencoder for unsupervised domain generalization","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Yang","year":"2022"},{"key":"ref23","first-page":"1","article-title":"ManyDG: Many-domain generalization for healthcare applications","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Yang","year":"2022"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2023.3263549"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2021.3104379"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.01439"},{"key":"ref27","first-page":"327","article-title":"Towards data-free domain generalization","volume-title":"Proc. Asian Conf. Mach. Learn.","author":"Frikha","year":"2023"},{"key":"ref28","article-title":"Domain-unified prompt representations for source-free domain generalization","author":"Niu","year":"2022"},{"key":"ref29","first-page":"8748","article-title":"Learning transferable visual models from natural language supervision","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Radford","year":"2021"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1109\/TKDE.2022.3178128"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00233"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-20050-2_26"},{"key":"ref33","first-page":"1","article-title":"In search of lost domain generalization","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Gulrajani","year":"2020"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00948"},{"key":"ref35","first-page":"1","article-title":"Domain generalization with mixstyle","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Zhou","year":"2020"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v34i07.7003"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.00858"},{"key":"ref38","first-page":"5339","article-title":"Generalizing to unseen domains via adversarial data augmentation","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Volpi","year":"2018"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58517-4_33"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00566"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.01026"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00696"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00153"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.00811"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.00029"},{"key":"ref46","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.01257"},{"key":"ref47","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00087"},{"key":"ref48","first-page":"12888","article-title":"BLIP: Bootstrapping language-image pre-training for unified vision-language understanding and generation","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Li","year":"2022"},{"key":"ref49","first-page":"19730","article-title":"BLIP-2: Bootstrapping language-image pre-training with frozen image encoders and large language models","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Li","year":"2023"},{"key":"ref50","first-page":"4904","article-title":"Scaling up visual and vision-language representation learning with noisy text supervision","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Jia","year":"2021"},{"key":"ref51","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01522"},{"key":"ref52","first-page":"1","article-title":"Using language to extend to unseen domains","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Dunlap","year":"2022"},{"key":"ref53","doi-asserted-by":"publisher","DOI":"10.1145\/3528223.3530164"},{"key":"ref54","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00209"},{"key":"ref55","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01753"},{"key":"ref56","first-page":"1","article-title":"An image is worth one word: Personalizing text-to-image generation using textual inversion","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Gal","year":"2022"},{"key":"ref57","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-58347-1_10"},{"key":"ref58","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58536-5_8"},{"key":"ref59","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v32i1.11596"},{"key":"ref60","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-19836-6_3"},{"key":"ref61","doi-asserted-by":"publisher","DOI":"10.1007\/978-981-99-8543-2_10"},{"key":"ref62","first-page":"28656","article-title":"Model ratatouille: Recycling diverse models for out-of-distribution generalization","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Ram","year":"2023"},{"key":"ref63","first-page":"1","article-title":"Optimal representations for covariate shift","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Ruan","year":"2021"},{"key":"ref64","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00482"},{"key":"ref65","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.00367"},{"key":"ref66","doi-asserted-by":"publisher","DOI":"10.1109\/TAI.2024.3377173"},{"key":"ref67","doi-asserted-by":"publisher","DOI":"10.1016\/j.patcog.2022.109086"},{"key":"ref68","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.591"},{"key":"ref69","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2013.208"},{"key":"ref70","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.572"},{"key":"ref71","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00149"},{"key":"ref72","first-page":"1","article-title":"In search of lost domain generalization","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Gulrajani","year":"2021"},{"key":"ref73","doi-asserted-by":"publisher","DOI":"10.48550\/ARXIV.1706.03762"},{"key":"ref74","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.90"},{"key":"ref75","first-page":"2579","article-title":"Viualizing data using t-SNE","volume":"9","author":"Maaten","year":"2008","journal-title":"J. Mach. Learn. Res."},{"key":"ref76","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01631"},{"key":"ref77","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.01539"}],"container-title":["IEEE Transactions on Multimedia"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/6046\/10844992\/10824918.pdf?arnumber=10824918","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,1,20]],"date-time":"2025-01-20T18:57:47Z","timestamp":1737399467000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10824918\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025]]},"references-count":77,"URL":"https:\/\/doi.org\/10.1109\/tmm.2024.3521671","relation":{},"ISSN":["1520-9210","1941-0077"],"issn-type":[{"value":"1520-9210","type":"print"},{"value":"1941-0077","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025]]}}}