{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,20]],"date-time":"2026-03-20T23:30:06Z","timestamp":1774049406991,"version":"3.50.1"},"publisher-location":"New York, NY, USA","reference-count":41,"publisher":"ACM","funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62176162"],"award-info":[{"award-number":["62176162"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"name":"Guangdong Basic and Applied Basic Research Foundation","award":["2023A1515012875"],"award-info":[{"award-number":["2023A1515012875"]}]},{"name":"Guangdong Basic and Applied Basic Research Foundation","award":["2022A1515140099"],"award-info":[{"award-number":["2022A1515140099"]}]}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2025,10,27]]},"DOI":"10.1145\/3746027.3754547","type":"proceedings-article","created":{"date-parts":[[2025,10,25]],"date-time":"2025-10-25T07:38:54Z","timestamp":1761377934000},"page":"2625-2633","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":1,"title":["Domain-aware Visual Context Prompt for Multi-Source Domain Adaptation"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0000-0003-1215-4915","authenticated-orcid":false,"given":"Yuwu","family":"Lu","sequence":"first","affiliation":[{"name":"South China Normal University, Foshan, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0006-5648-4237","authenticated-orcid":false,"given":"Haoyu","family":"Huang","sequence":"additional","affiliation":[{"name":"South China Normal University, Guangzhou, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0001-1027-6065","authenticated-orcid":false,"given":"Xue","family":"Hu","sequence":"additional","affiliation":[{"name":"South China Normal University, Guangzhou, China"}]}],"member":"320","published-online":{"date-parts":[[2025,10,27]]},"reference":[{"key":"e_1_3_2_1_1_1","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v38i2.27830"},{"key":"e_1_3_2_1_2_1","first-page":"192","article-title":"Imageclef 2014","author":"Caputo Barbara","year":"2014","unstructured":"Barbara Caputo, Henning M\u00fcller, Jesus Martinez-Gomez, Mauricio Villegas, Burak Acar, Novi Patricia, Neda Marvasti, Suzan \u00dcsk\u00fcdarli, Roberto Paredes, and Miguel Cazorla. 2014. Imageclef 2014: Overview and Analysis of the Results. In ICCLEF. 192-211.","journal-title":"Overview and Analysis of the Results. In ICCLEF."},{"key":"e_1_3_2_1_3_1","first-page":"74127","article-title":"Multi-prompt alignment for multi-source unsupervised domain adaptation","author":"Chen Haoran","year":"2023","unstructured":"Haoran Chen, Xintong Han, Zuxuan Wu, and Yu-Gang Jiang. 2023. Multi-prompt alignment for multi-source unsupervised domain adaptation. In NeurIPS. 74127-74139.","journal-title":"NeurIPS."},{"key":"e_1_3_2_1_4_1","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2022.3186531"},{"key":"e_1_3_2_1_5_1","volume-title":"An image is worth 16x16 words: Transformers for image recognition at scale. arXiv preprint arXiv:2010.11929","author":"Dosovitskiy Alexey","year":"2020","unstructured":"Alexey Dosovitskiy. 2020. An image is worth 16x16 words: Transformers for image recognition at scale. arXiv preprint arXiv:2010.11929 (2020)."},{"key":"e_1_3_2_1_6_1","first-page":"23375","article-title":"Domain-agnostic mutual prompting for unsupervised domain adaptation","author":"Du Zhekai","year":"2024","unstructured":"Zhekai Du, Xinyao Li, Fengling Li, Ke Lu, Lei Zhu, and Jingjing Li. 2024. Domain-agnostic mutual prompting for unsupervised domain adaptation. In CVPR. 23375-23384.","journal-title":"CVPR."},{"key":"e_1_3_2_1_7_1","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2023.3327962"},{"key":"e_1_3_2_1_8_1","volume-title":"Agent attention: On the integration of softmax and linear attention. arXiv preprint arXiv:2312.08874","author":"Han Dongchen","year":"2023","unstructured":"Dongchen Han, Tianzhu Ye, Yizeng Han, Zhuofan Xia, Siyuan Pan, Pengfei Wan, Shiji Song, and Gao Huang. 2023. Agent attention: On the integration of softmax and linear attention. arXiv preprint arXiv:2312.08874 (2023)."},{"key":"e_1_3_2_1_9_1","first-page":"770","article-title":"Deep Residual Learning for Image Recognition","author":"He Kaiming","year":"2016","unstructured":"Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. 2016. Deep Residual Learning for Image Recognition. In CVPR. 770-778.","journal-title":"CVPR."},{"key":"e_1_3_2_1_10_1","first-page":"4904","article-title":"Scaling up visual and vision-language representation learning with noisy text supervision","author":"Jia Chao","year":"2021","unstructured":"Chao Jia, Yinfei Yang, Ye Xia, Yi-Ting Chen, Zarana Parekh, Hieu Pham, Quoc Le, Yun-Hsuan Sung, Zhen Li, and Tom Duerig. 2021. Scaling up visual and vision-language representation learning with noisy text supervision. In ICML. 4904-4916.","journal-title":"ICML."},{"key":"e_1_3_2_1_11_1","first-page":"766","volume-title":"IEEE TPAMI","volume":"43","author":"Kouw Wouter M","year":"2019","unstructured":"Wouter M Kouw and Marco Loog. 2019. A review of domain adaptation without target labels. IEEE TPAMI, Vol. 43, 3 (2019), 766-785."},{"key":"e_1_3_2_1_12_1","first-page":"16155","article-title":"Padclip: Pseudo-labeling with adaptive debiasing in clip for unsupervised domain adaptation","author":"Lai Zhengfeng","year":"2023","unstructured":"Zhengfeng Lai, Noranart Vesdapunt, Ning Zhou, Jun Wu, Cong Phuoc Huynh, Xuelu Li, Kah Kuen Fu, and Chen-Nee Chuah. 2023. Padclip: Pseudo-labeling with adaptive debiasing in clip for unsupervised domain adaptation. In ICCV. 16155-16165.","journal-title":"ICCV."},{"key":"e_1_3_2_1_13_1","first-page":"12888","article-title":"Blip: Bootstrapping language-image pre-training for unified vision-language understanding and generation","author":"Li Junnan","year":"2022","unstructured":"Junnan Li, Dongxu Li, Caiming Xiong, and Steven Hoi. 2022. Blip: Bootstrapping language-image pre-training for unified vision-language understanding and generation. In ICML. 12888-12900.","journal-title":"ICML."},{"key":"e_1_3_2_1_14_1","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2023.3307789"},{"key":"e_1_3_2_1_15_1","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2024.3370978"},{"key":"e_1_3_2_1_16_1","first-page":"4727","article-title":"Dynamic classifier alignment for unsupervised multi-source domain adaptation","volume":"35","author":"Li Keqiuyin","year":"2023","unstructured":"Keqiuyin Li, Jie Lu, Hua Zuo, and Guangquan Zhang. 2023a. Dynamic classifier alignment for unsupervised multi-source domain adaptation. IEEE TKDE, Vol. 35, 5 (2023), 4727-4740.","journal-title":"IEEE TKDE"},{"key":"e_1_3_2_1_17_1","first-page":"4100","article-title":"Progressive spatio-temporal prototype matching for text-video retrieval","author":"Li Pandeng","year":"2023","unstructured":"Pandeng Li, Chen-Wei Xie, Liming Zhao, Hongtao Xie, Jiannan Ge, Yun Zheng, Deli Zhao, and Yongdong Zhang. 2023b. Progressive spatio-temporal prototype matching for text-video retrieval. In CVPR. 4100-4110.","journal-title":"CVPR."},{"key":"e_1_3_2_1_18_1","first-page":"10998","article-title":"Dynamic transfer for multi-source domain adaptation","author":"Li Yunsheng","year":"2021","unstructured":"Yunsheng Li, Lu Yuan, Yinpeng Chen, Pei Wang, and Nuno Vasconcelos. 2021a. Dynamic transfer for multi-source domain adaptation. In CVPR. 10998-11007.","journal-title":"CVPR."},{"key":"e_1_3_2_1_19_1","first-page":"10998","article-title":"Dynamic transfer for multi-source domain adaptation","author":"Li Yunsheng","year":"2021","unstructured":"Yunsheng Li, Lu Yuan, Yinpeng Chen, Pei Wang, and Nuno Vasconcelos. 2021b. Dynamic transfer for multi-source domain adaptation. In CVPR. 10998-11007.","journal-title":"CVPR."},{"key":"e_1_3_2_1_20_1","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2018.2868685"},{"key":"e_1_3_2_1_21_1","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2024.3358062"},{"key":"e_1_3_2_1_22_1","first-page":"1406","article-title":"Moment matching for multi-source domain adaptation","author":"Peng Xingchao","year":"2019","unstructured":"Xingchao Peng, Qinxun Bai, Xide Xia, Zijun Huang, Kate Saenko, and Bo Wang. 2019. Moment matching for multi-source domain adaptation. In ICCV. 1406-1415.","journal-title":"ICCV."},{"key":"e_1_3_2_1_23_1","volume-title":"VCP-CLIP: A visual context prompting model for zero-shot anomaly segmentation. arXiv preprint arXiv:2407.12276","author":"Qu Zhen","year":"2024","unstructured":"Zhen Qu, Xian Tao, Mukesh Prasad, Fei Shen, Zhengtao Zhang, Xinyi Gong, and Guiguang Ding. 2024. VCP-CLIP: A visual context prompting model for zero-shot anomaly segmentation. arXiv preprint arXiv:2407.12276 (2024)."},{"key":"e_1_3_2_1_24_1","first-page":"8748","article-title":"Learning transferable visual models from natural language supervision","author":"Radford Alec","year":"2021","unstructured":"Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al., 2021. Learning transferable visual models from natural language supervision. In ICML. 8748-8763.","journal-title":"ICML."},{"key":"e_1_3_2_1_25_1","volume-title":"Adapting visual category models to new domains","author":"Saenko Kate","unstructured":"Kate Saenko, Brian Kulis, Mario Fritz, and Trevor Darrell. 2010. Adapting visual category models to new domains. In ECCV. Springer, 213-226."},{"key":"e_1_3_2_1_26_1","doi-asserted-by":"publisher","DOI":"10.1016\/j.inffus.2014.12.003"},{"key":"e_1_3_2_1_27_1","first-page":"7191","article-title":"Safe self-refinement for transformer-based domain adaptation","author":"Sun Tao","year":"2022","unstructured":"Tao Sun, Cheng Lu, Tianshuo Zhang, and Haibin Ling. 2022. Safe self-refinement for transformer-based domain adaptation. In CVPR. 7191-7200.","journal-title":"CVPR."},{"key":"e_1_3_2_1_28_1","unstructured":"Ashish Vaswani Noam Shazeer Niki Parmar Jakob Uszkoreit Llion Jones Aidan N Gomez \u0141 !!ukasz Kaiser and Illia Polosukhin. 2017. Attention is All you Need. In NeurIPS I. Guyon U. Von Luxburg S. Bengio H. Wallach R. Fergus S. Vishwanathan and R. Garnett (Eds.). 5998-6008."},{"key":"e_1_3_2_1_29_1","first-page":"5018","article-title":"Deep Hashing Network for Unsupervised Domain Adaptation","author":"Venkateswara Hemanth","year":"2017","unstructured":"Hemanth Venkateswara, Jose Eusebio, Shayok Chakraborty, and Sethuraman Panchanathan. 2017. Deep Hashing Network for Unsupervised Domain Adaptation. In CVPR. 5018-5027.","journal-title":"CVPR."},{"key":"e_1_3_2_1_30_1","doi-asserted-by":"publisher","DOI":"10.1016\/j.neunet.2023.12.022"},{"key":"e_1_3_2_1_31_1","first-page":"10704","article-title":"Cap4video: What can auxiliary captions do for text-video retrieval?","author":"Wu Wenhao","year":"2023","unstructured":"Wenhao Wu, Haipeng Luo, Bo Fang, Jingdong Wang, and Wanli Ouyang. 2023. Cap4video: What can auxiliary captions do for text-video retrieval?. In CVPR. 10704-10713.","journal-title":"CVPR."},{"key":"e_1_3_2_1_32_1","first-page":"6518","article-title":"A Collaborative Alignment Framework of Transferable Knowledge Extraction for Unsupervised Domain Adaptation","volume":"35","author":"Xie Binhui","year":"2023","unstructured":"Binhui Xie, Shuang Li, Fangrui Lv, Chi Harold Liu, Guoren Wang, and Dapeng Wu. 2023. A Collaborative Alignment Framework of Transferable Knowledge Extraction for Unsupervised Domain Adaptation. IEEE TKDE, Vol. 35, 7 (2023), 6518-6533.","journal-title":"IEEE TKDE"},{"key":"e_1_3_2_1_33_1","volume-title":"Cdtrans: Cross-domain transformer for unsupervised domain adaptation. In ICLR.","author":"Xu Tongkun","year":"2022","unstructured":"Tongkun Xu, Weihua Chen, Pichao Wang, Fan Wang, Hao Li, and Rong Jin. 2022. Cdtrans: Cross-domain transformer for unsupervised domain adaptation. In ICLR."},{"key":"e_1_3_2_1_34_1","first-page":"520","article-title":"Tvt: Transferable vision transformer for unsupervised domain adaptation","author":"Yang Jinyu","year":"2023","unstructured":"Jinyu Yang, Jingjing Liu, Ning Xu, and Junzhou Huang. 2023. Tvt: Transferable vision transformer for unsupervised domain adaptation. In WACV. 520-530.","journal-title":"WACV."},{"key":"e_1_3_2_1_35_1","first-page":"8559","article-title":"Adversarial multiple source domain adaptation","author":"Zhao Han","year":"2018","unstructured":"Han Zhao, Shanghang Zhang, Guanhang Wu, Jos\u00e9 MF Moura, Joao P Costeira, and Geoffrey J Gordon. 2018. Adversarial multiple source domain adaptation. In NeurIPS. 8559-8570.","journal-title":"NeurIPS."},{"key":"e_1_3_2_1_36_1","first-page":"16816","article-title":"Conditional prompt learning for vision-language models","author":"Zhou Kaiyang","year":"2022","unstructured":"Kaiyang Zhou, Jingkang Yang, Chen Change Loy, and Ziwei Liu. 2022a. Conditional prompt learning for vision-language models. In CVPR. 16816-16825.","journal-title":"CVPR."},{"key":"e_1_3_2_1_37_1","doi-asserted-by":"publisher","DOI":"10.1007\/s11263-022-01653-1"},{"key":"e_1_3_2_1_38_1","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2024.3387116"},{"key":"e_1_3_2_1_39_1","volume-title":"Saswot: Real-time semantic segmentation architecture search without training. In AAAI","author":"Zhu Chendi","year":"2024","unstructured":"Chendi Zhu, Lujun Li, Yuli Wu, and Zhengxing Sun. 2024. Saswot: Real-time semantic segmentation architecture search without training. In AAAI, , Vol. 38. 7722-7730."},{"key":"e_1_3_2_1_40_1","first-page":"3561","article-title":"Patch-mix transformer for unsupervised domain adaptation: A game perspective","author":"Zhu Jinjing","year":"2023","unstructured":"Jinjing Zhu, Haotian Bai, and Lin Wang. 2023. Patch-mix transformer for unsupervised domain adaptation: A game perspective. In CVPR. 3561-3571.","journal-title":"CVPR."},{"key":"e_1_3_2_1_41_1","volume-title":"Spot-the-difference self-supervised pre-training for anomaly detection and segmentation","author":"Zou Yang","unstructured":"Yang Zou, Jongheon Jeong, Latha Pemula, Dongqing Zhang, and Onkar Dabeer. 2022. Spot-the-difference self-supervised pre-training for anomaly detection and segmentation. In ECCV. Springer, 392-408."}],"event":{"name":"MM '25: The 33rd ACM International Conference on Multimedia","location":"Dublin Ireland","acronym":"MM '25","sponsor":["SIGMM ACM Special Interest Group on Multimedia"]},"container-title":["Proceedings of the 33rd ACM International Conference on Multimedia"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3746027.3754547","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,12,10]],"date-time":"2025-12-10T04:13:26Z","timestamp":1765340006000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3746027.3754547"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,10,27]]},"references-count":41,"alternative-id":["10.1145\/3746027.3754547","10.1145\/3746027"],"URL":"https:\/\/doi.org\/10.1145\/3746027.3754547","relation":{},"subject":[],"published":{"date-parts":[[2025,10,27]]},"assertion":[{"value":"2025-10-27","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}