{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,17]],"date-time":"2026-02-17T14:11:25Z","timestamp":1771337485782,"version":"3.50.1"},"publisher-location":"New York, NY, USA","reference-count":60,"publisher":"ACM","license":[{"start":{"date-parts":[[2024,10,28]],"date-time":"2024-10-28T00:00:00Z","timestamp":1730073600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.acm.org\/publications\/policies\/copyright_policy#Background"}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2024,10,28]]},"DOI":"10.1145\/3664647.3680730","type":"proceedings-article","created":{"date-parts":[[2024,10,26]],"date-time":"2024-10-26T06:59:41Z","timestamp":1729925981000},"page":"8805-8814","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":21,"title":["Chain of Visual Perception: Harnessing Multimodal Large Language Models for Zero-shot Camouflaged Object Detection"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0000-0001-7359-1057","authenticated-orcid":false,"given":"Lv","family":"Tang","sequence":"first","affiliation":[{"name":"vivo Mobile Communication Co., Ltd, Shanghai, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-1786-4943","authenticated-orcid":false,"given":"Peng-Tao","family":"Jiang","sequence":"additional","affiliation":[{"name":"vivo Mobile Communication Co., Ltd, Shanghai, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-7197-8597","authenticated-orcid":false,"given":"Zhi-Hao","family":"Shen","sequence":"additional","affiliation":[{"name":"vivo Mobile Communication Co., Ltd, Shanghai, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0007-1175-5918","authenticated-orcid":false,"given":"Hao","family":"Zhang","sequence":"additional","affiliation":[{"name":"vivo Mobile Communication Co., Ltd, Shanghai, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0008-2596-0389","authenticated-orcid":false,"given":"Jin-Wei","family":"Chen","sequence":"additional","affiliation":[{"name":"vivo Mobile Communication Co., Ltd., Shanghai, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-7817-0665","authenticated-orcid":false,"given":"Bo","family":"Li","sequence":"additional","affiliation":[{"name":"vivo Mobile Communication Co., Ltd., Shanghai, China"}]}],"member":"320","published-online":{"date-parts":[[2024,10,28]]},"reference":[{"key":"e_1_3_2_1_1_1","first-page":"5708","article-title":"Rethinking Camouflaged Object Detection: Models and Datasets","volume":"32","author":"Bi Hongbo","year":"2022","unstructured":"Hongbo Bi, Cong Zhang, Kang Wang, Jinghui Tong, and Feng Zheng. 2022. Rethinking Camouflaged Object Detection: Models and Datasets. IEEE TCSVT, Vol. 32, 9 (2022), 5708--5724.","journal-title":"IEEE TCSVT"},{"key":"e_1_3_2_1_2_1","unstructured":"Tom B. Brown Benjamin Mann Nick Ryder Melanie Subbiah Jared Kaplan Prafulla Dhariwal Arvind Neelakantan Pranav Shyam Girish Sastry Amanda Askell Sandhini Agarwal Ariel Herbert-Voss Gretchen Krueger Tom Henighan Rewon Child Aditya Ramesh Daniel M. Ziegler Jeffrey Wu Clemens Winter Christopher Hesse Mark Chen Eric Sigler Mateusz Litwin Scott Gray Benjamin Chess Jack Clark Christopher Berner Sam McCandlish Alec Radford Ilya Sutskever and Dario Amodei. 2020. Language Models are Few-Shot Learners. In NeurIPS."},{"key":"e_1_3_2_1_3_1","volume-title":"Towards Generic Anomaly Detection and Understanding: Large-scale Visual-linguistic Model (GPT-4V) Takes the Lead. CoRR","author":"Cao Yunkang","year":"2023","unstructured":"Yunkang Cao, Xiaohao Xu, Chen Sun, Xiaonan Huang, and Weiming Shen. 2023. Towards Generic Anomaly Detection and Understanding: Large-scale Visual-linguistic Model (GPT-4V) Takes the Lead. CoRR, Vol. abs\/2311.02782 (2023)."},{"key":"e_1_3_2_1_4_1","volume-title":"Shikra: Unleashing Multimodal LLM's Referential Dialogue Magic. CoRR","author":"Chen Keqin","year":"2023","unstructured":"Keqin Chen, Zhao Zhang, Weili Zeng, Richong Zhang, Feng Zhu, and Rui Zhao. 2023. Shikra: Unleashing Multimodal LLM's Referential Dialogue Magic. CoRR, Vol. abs\/2306.15195 (2023)."},{"key":"e_1_3_2_1_5_1","volume-title":"Implicit Motion Handling for Video Camouflaged Object Detection","author":"Cheng Xuelian","unstructured":"Xuelian Cheng, Huan Xiong, Deng-Ping Fan, Yiran Zhong, Mehrtash Harandi, Tom Drummond, and Zongyuan Ge. 2022. Implicit Motion Handling for Video Camouflaged Object Detection. In CVPR. IEEE, 13854--13863."},{"key":"e_1_3_2_1_6_1","volume-title":"Junqi Zhao, Weisheng Wang, Boyang Li, Pascale Fung, and Steven C. H. Hoi.","author":"Dai Wenliang","year":"2023","unstructured":"Wenliang Dai, Junnan Li, Dongxu Li, Anthony Meng Huat Tiong, Junqi Zhao, Weisheng Wang, Boyang Li, Pascale Fung, and Steven C. H. Hoi. 2023. InstructBLIP: Towards General-purpose Vision-Language Models with Instruction Tuning. CoRR, Vol. abs\/2305.06500 (2023)."},{"key":"e_1_3_2_1_7_1","volume-title":"BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding. In NAACL-HLT (1)","author":"Devlin Jacob","year":"2019","unstructured":"Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding. In NAACL-HLT (1). Association for Computational Linguistics, 4171--4186."},{"key":"e_1_3_2_1_8_1","volume-title":"A survey for in-context learning. CoRR","author":"Dong Qingxiu","year":"2022","unstructured":"Qingxiu Dong, Lei Li, Damai Dai, Ce Zheng, Zhiyong Wu, Baobao Chang, Xu Sun, Jingjing Xu, and Zhifang Sui. 2022. A survey for in-context learning. CoRR, Vol. abs\/2301.00234 (2022)."},{"key":"e_1_3_2_1_9_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.487"},{"key":"e_1_3_2_1_10_1","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2021.3085766"},{"key":"e_1_3_2_1_11_1","volume-title":"Camouflaged Object Detection","author":"Fan Deng-Ping","unstructured":"Deng-Ping Fan, Ge-Peng Ji, Guolei Sun, Ming-Ming Cheng, Jianbing Shen, and Ling Shao. 2020. Camouflaged Object Detection. In CVPR. IEEE, 2774--2784."},{"key":"e_1_3_2_1_12_1","unstructured":"Yao Fu Hao Peng Ashish Sabharwal Peter Clark and Tushar Khot. 2023. Complexity-Based Prompting for Multi-step Reasoning. In ICLR. OpenReview.net."},{"key":"e_1_3_2_1_13_1","volume-title":"Lau","author":"He Ruozhen","year":"2023","unstructured":"Ruozhen He, Qihua Dong, Jiaying Lin, and Rynson W. H. Lau. 2023. Weakly-Supervised Camouflaged Object Detection with Scribble Annotations. In AAAI. AAAI Press, 781--789."},{"key":"e_1_3_2_1_14_1","volume-title":"Relax Image-Specific Prompt Requirement in SAM: A Single Generic Prompt for Segmenting Camouflaged Objects","author":"Hu Jian","unstructured":"Jian Hu, Jiayi Lin, Shaogang Gong, and Weitong Cai. 2024. Relax Image-Specific Prompt Requirement in SAM: A Single Generic Prompt for Segmenting Camouflaged Objects. In AAAI. AAAI Press, 12511--12518."},{"key":"e_1_3_2_1_15_1","volume-title":"High-Resolution Iterative Feedback Network for Camouflaged Object Detection","author":"Hu Xiaobin","unstructured":"Xiaobin Hu, Shuo Wang, Xuebin Qin, Hang Dai, Wenqi Ren, Donghao Luo, Ying Tai, and Ling Shao. 2023. High-Resolution Iterative Feedback Network for Camouflaged Object Detection. In AAAI. AAAI Press, 881--889."},{"key":"e_1_3_2_1_16_1","volume-title":"Barun Patra, Qiang Liu, Kriti Aggarwal, Zewen Chi, Johan Bjorck, Vishrav Chaudhary, Subhojit Som, Xia Song, and Furu Wei.","author":"Huang Shaohan","year":"2023","unstructured":"Shaohan Huang, Li Dong, Wenhui Wang, Yaru Hao, Saksham Singhal, Shuming Ma, Tengchao Lv, Lei Cui, Owais Khan Mohammed, Barun Patra, Qiang Liu, Kriti Aggarwal, Zewen Chi, Johan Bjorck, Vishrav Chaudhary, Subhojit Som, Xia Song, and Furu Wei. 2023. Language Is Not All You Need: Aligning Perception with Language Models. CoRR, Vol. abs\/2302.14045 (2023)."},{"key":"e_1_3_2_1_17_1","volume-title":"Feature Shrinkage Pyramid for Camouflaged Object Detection with Transformers","author":"Huang Zhou","unstructured":"Zhou Huang, Hang Dai, Tian-Zhu Xiang, Shuo Wang, Huai-Xin Chen, Jie Qin, and Huan Xiong. 2023. Feature Shrinkage Pyramid for Camouflaged Object Detection with Transformers. In CVPR. IEEE, 5557--5566."},{"key":"e_1_3_2_1_18_1","first-page":"108414","article-title":"Fast Camouflaged Object Detection via Edge-based Reversible Re-calibration Network","volume":"123","author":"Ji Ge-Peng","year":"2022","unstructured":"Ge-Peng Ji, Lei Zhu, Mingchen Zhuge, and Keren Fu. 2022. Fast Camouflaged Object Detection via Edge-based Reversible Re-calibration Network. PR, Vol. 123 (2022), 108414.","journal-title":"PR"},{"key":"e_1_3_2_1_19_1","first-page":"92","article-title":"Deep gradient learning for efficient camouflaged object detection","volume":"20","author":"Ji Ge-Peng","year":"2023","unstructured":"Ge-Peng Ji, Deng-Ping Fan, Yu-Cheng Chou, Dengxin Dai, Alexander Liniger, and Luc Van Gool. 2023. Deep gradient learning for efficient camouflaged object detection. MIR, Vol. 20, 1 (2023), 92--108.","journal-title":"MIR"},{"key":"e_1_3_2_1_20_1","volume-title":"Segment Anything in High Quality. CoRR","author":"Ke Lei","year":"2023","unstructured":"Lei Ke, Mingqiao Ye, Martin Danelljan, Yifan Liu, Yu-Wing Tai, Chi-Keung Tang, and Fisher Yu. 2023. Segment Anything in High Quality. CoRR, Vol. abs\/2306.01567 (2023)."},{"key":"e_1_3_2_1_21_1","volume-title":"Girshick","author":"Kirillov Alexander","year":"2023","unstructured":"Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chlo\u00e9 Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alexander C. Berg, Wan-Yen Lo, Piotr Doll\u00e1r, and Ross B. Girshick. 2023. Segment Anything. CoRR, Vol. abs\/2304.02643 (2023)."},{"key":"e_1_3_2_1_22_1","doi-asserted-by":"crossref","unstructured":"Alexander Kirillov Eric Mintun Nikhila Ravi Hanzi Mao Chloe Rolland Laura Gustafson Tete Xiao Spencer Whitehead Alexander C Berg Wan-Yen Lo et al. 2023. Segment Anything. arXiv preprint arXiv:2304.02643 (2023).","DOI":"10.1109\/ICCV51070.2023.00371"},{"key":"e_1_3_2_1_23_1","volume-title":"Machel Reid, Yutaka Matsuo, and Yusuke Iwasawa.","author":"Kojima Takeshi","year":"2022","unstructured":"Takeshi Kojima, Shixiang Shane Gu, Machel Reid, Yutaka Matsuo, and Yusuke Iwasawa. 2022. Large Language Models are Zero-Shot Reasoners. In NeurIPS."},{"key":"e_1_3_2_1_24_1","volume-title":"LISA: Reasoning Segmentation via Large Language Model. CoRR","author":"Lai Xin","year":"2023","unstructured":"Xin Lai, Zhuotao Tian, Yukang Chen, Yanwei Li, Yuhui Yuan, Shu Liu, and Jiaya Jia. 2023. LISA: Reasoning Segmentation via Large Language Model. CoRR, Vol. abs\/2308.00692 (2023)."},{"key":"e_1_3_2_1_25_1","doi-asserted-by":"publisher","DOI":"10.1016\/j.cviu.2019.04.006"},{"key":"e_1_3_2_1_26_1","doi-asserted-by":"crossref","unstructured":"Bo Li Zhengxing Sun Quan Wang and Qian Li. 2019. Co-saliency Detection Based on Hierarchical Consistency. In ACM Multimedia. ACM 1392--1400.","DOI":"10.1145\/3343031.3351016"},{"key":"e_1_3_2_1_27_1","volume-title":"2023 d. Otter: A Multi-Modal Model with In-Context Instruction Tuning. CoRR","author":"Li Bo","year":"2023","unstructured":"Bo Li, Yuanhan Zhang, Liangyu Chen, Jinghao Wang, Jingkang Yang, and Ziwei Liu. 2023 d. Otter: A Multi-Modal Model with In-Context Instruction Tuning. CoRR, Vol. abs\/2305.03726 (2023)."},{"key":"e_1_3_2_1_28_1","first-page":"5126","article-title":"Zero-Shot Camouflaged Object Detection","volume":"32","author":"Li Haoran","year":"2023","unstructured":"Haoran Li, Chun-Mei Feng, Yong Xu, Tao Zhou, Lina Yao, and Xiaojun Chang. 2023. Zero-Shot Camouflaged Object Detection. IEEE TIP., Vol. 32 (2023), 5126--5137.","journal-title":"IEEE TIP."},{"key":"e_1_3_2_1_29_1","volume-title":"ICML","volume":"202","author":"Li Junnan","year":"1973","unstructured":"Junnan Li, Dongxu Li, Silvio Savarese, and Steven C. H. Hoi. 2023. BLIP-2: Bootstrapping Language-Image Pre-training with Frozen Image Encoders and Large Language Models. In ICML, Vol. 202. PMLR, 19730--19742."},{"key":"e_1_3_2_1_30_1","volume-title":"Hoi","author":"Li Junnan","year":"2022","unstructured":"Junnan Li, Dongxu Li, Caiming Xiong, and Steven C. H. Hoi. 2022. BLIP: Bootstrapping Language-Image Pre-training for Unified Vision-Language Understanding and Generation. In ICML, Vol. 162. PMLR, 12888--12900."},{"key":"e_1_3_2_1_31_1","volume-title":"CLIP Surgery for Better Explainability with Enhancement in Open-Vocabulary Tasks. CoRR","author":"Li Yi","year":"2023","unstructured":"Yi Li, Hualiang Wang, Yiqun Duan, and Xiaomeng Li. 2023. CLIP Surgery for Better Explainability with Enhancement in Open-Vocabulary Tasks. CoRR, Vol. abs\/2304.05653 (2023)."},{"key":"e_1_3_2_1_32_1","volume-title":"Visual Instruction Tuning. CoRR","author":"Liu Haotian","year":"2023","unstructured":"Haotian Liu, Chunyuan Li, Qingyang Wu, and Yong Jae Lee. 2023. Visual Instruction Tuning. CoRR, Vol. abs\/2304.08485 (2023)."},{"key":"e_1_3_2_1_33_1","volume-title":"Matcher: Segment Anything with One Shot Using All-Purpose Feature Matching. CoRR","author":"Liu Yang","year":"2023","unstructured":"Yang Liu, Muzhi Zhu, Hengtao Li, Hao Chen, Xinlong Wang, and Chunhua Shen. 2023. Matcher: Segment Anything with One Shot Using All-Purpose Feature Matching. CoRR, Vol. abs\/2305.13310 (2023)."},{"key":"e_1_3_2_1_34_1","unstructured":"Jiasen Lu Dhruv Batra Devi Parikh and Stefan Lee. 2019. ViLBERT: Pretraining Task-Agnostic Visiolinguistic Representations for Vision-and-Language Tasks. In NeurIPS. 13--23."},{"key":"e_1_3_2_1_35_1","volume-title":"Segment and Rank the Camouflaged Objects","author":"Lv Yunqiu","unstructured":"Yunqiu Lv, Jing Zhang, Yuchao Dai, Aixuan Li, Bowen Liu, Nick Barnes, and Deng-Ping Fan. 2021. Simultaneously Localize, Segment and Rank the Camouflaged Objects. In CVPR. IEEE, 11591--11601."},{"key":"e_1_3_2_1_36_1","doi-asserted-by":"publisher","DOI":"10.48550\/ARXIV.2304.12306"},{"key":"e_1_3_2_1_37_1","doi-asserted-by":"publisher","DOI":"10.1142\/S021946782050028X"},{"key":"e_1_3_2_1_38_1","unstructured":"OpenAI. 2023. GPT-4V(ision) System Card. In https:\/\/openai.com."},{"key":"e_1_3_2_1_39_1","volume-title":"DINOv2: Learning Robust Visual Features without Supervision. CoRR","author":"Oquab Maxime","year":"2023","unstructured":"Maxime Oquab, Timoth\u00e9e Darcet, Th\u00e9o Moutakanni, Huy Vo, Marc Szafraniec, Vasil Khalidov, Pierre Fernandez, Daniel Haziza, Francisco Massa, Alaaeldin El-Nouby, Mahmoud Assran, Nicolas Ballas, Wojciech Galuba, Russell Howes, Po-Yao Huang, Shang-Wen Li, Ishan Misra, Michael G. Rabbat, Vasu Sharma, Gabriel Synnaeve, Hu Xu, Herv\u00e9 J\u00e9gou, Julien Mairal, Patrick Labatut, Armand Joulin, and Piotr Bojanowski. 2023. DINOv2: Learning Robust Visual Features without Supervision. CoRR, Vol. abs\/2304.07193 (2023)."},{"key":"e_1_3_2_1_40_1","volume-title":"Zoom In and Out: A Mixed-scale Triplet Network for Camouflaged Object Detection","author":"Pang Youwei","unstructured":"Youwei Pang, Xiaoqi Zhao, Tian-Zhu Xiang, Lihe Zhang, and Huchuan Lu. 2022. Zoom In and Out: A Mixed-scale Triplet Network for Camouflaged Object Detection. In CVPR. IEEE, 2150--2160."},{"key":"e_1_3_2_1_41_1","volume-title":"Open-Vocabulary Camouflaged Object Segmentation. CoRR","author":"Pang Youwei","year":"2023","unstructured":"Youwei Pang, Xiaoqi Zhao, Jiaming Zuo, Lihe Zhang, and Huchuan Lu. 2023. Open-Vocabulary Camouflaged Object Segmentation. CoRR, Vol. abs\/2311.11241 (2023)."},{"key":"e_1_3_2_1_42_1","volume-title":"Kosmos-2: Grounding Multimodal Large Language Models to the World. CoRR","author":"Peng Zhiliang","year":"2023","unstructured":"Zhiliang Peng, Wenhui Wang, Li Dong, Yaru Hao, Shaohan Huang, Shuming Ma, and Furu Wei. 2023. Kosmos-2: Grounding Multimodal Large Language Models to the World. CoRR, Vol. abs\/2306.14824 (2023)."},{"key":"e_1_3_2_1_43_1","volume-title":"DetGPT: Detect What You Need via Reasoning. CoRR","author":"Pi Renjie","year":"2023","unstructured":"Renjie Pi, Jiahui Gao, Shizhe Diao, Rui Pan, Hanze Dong, Jipeng Zhang, Lewei Yao, Jianhua Han, Hang Xu, Lingpeng Kong, and Tong Zhang. 2023. DetGPT: Detect What You Need via Reasoning. CoRR, Vol. abs\/2305.14167 (2023)."},{"key":"e_1_3_2_1_44_1","volume-title":"ClassWise-SAM-Adapter: Parameter Efficient Fine-tuning Adapts Segment Anything to SAR Domain for Semantic Segmentation. CoRR","author":"Pu Xinyang","year":"2024","unstructured":"Xinyang Pu, Hecheng Jia, Linghao Zheng, Feng Wang, and Feng Xu. 2024. ClassWise-SAM-Adapter: Parameter Efficient Fine-tuning Adapts Segment Anything to SAR Domain for Semantic Segmentation. CoRR, Vol. abs\/2401.02326 (2024)."},{"key":"e_1_3_2_1_45_1","volume-title":"ICML","volume":"139","author":"Radford Alec","year":"2021","unstructured":"Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, and Ilya Sutskever. 2021. Learning Transferable Visual Models From Natural Language Supervision. In ICML, Vol. 139. PMLR, 8748--8763."},{"key":"e_1_3_2_1_46_1","volume-title":"Warp Consistency for Unsupervised Learning of Dense Correspondences","author":"Truong Prune","unstructured":"Prune Truong, Martin Danelljan, Fisher Yu, and Luc Van Gool. 2021. Warp Consistency for Unsupervised Learning of Dense Correspondences. In ICCV. IEEE."},{"key":"e_1_3_2_1_47_1","volume-title":"Quoc V. Le, and Denny Zhou.","author":"Wei Jason","year":"2022","unstructured":"Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Brian Ichter, Fei Xia, Ed H. Chi, Quoc V. Le, and Denny Zhou. 2022. Chain-of-Thought Prompting Elicits Reasoning in Large Language Models. In NeurIPS."},{"key":"e_1_3_2_1_48_1","volume-title":"Self-supervised Video Object Segmentation by Motion Grouping","author":"Yang Charig","unstructured":"Charig Yang, Hala Lamdouar, Erika Lu, Andrew Zisserman, and Weidi Xie. 2021. Self-supervised Video Object Segmentation by Motion Grouping. In ICCV. IEEE, 7157--7168."},{"key":"e_1_3_2_1_49_1","volume-title":"Uncertainty-Guided Transformer Reasoning for Camouflaged Object Detection","author":"Yang Fan","unstructured":"Fan Yang, Qiang Zhai, Xin Li, Rui Huang, Ao Luo, Hong Cheng, and Deng-Ping Fan. 2021. Uncertainty-Guided Transformer Reasoning for Camouflaged Object Detection. In ICCV. IEEE, 4126--4135."},{"key":"e_1_3_2_1_50_1","volume-title":"Mutual Graph Learning for Camouflaged Object Detection","author":"Zhai Qiang","unstructured":"Qiang Zhai, Xin Li, Fan Yang, Chenglizhao Chen, Hong Cheng, and Deng-Ping Fan. 2021. Mutual Graph Learning for Camouflaged Object Detection. In CVPR. IEEE, 12997--13007."},{"key":"e_1_3_2_1_51_1","first-page":"103450","article-title":"Camouflaged object detection via Neighbor Connection and Hierarchical Information Transfer","volume":"221","author":"Zhang Cong","year":"2022","unstructured":"Cong Zhang, Kang Wang, Hongbo Bi, Ziqi Liu, and Lina Yang. 2022. Camouflaged object detection via Neighbor Connection and Hierarchical Information Transfer. CVIU., Vol. 221 (2022), 103450.","journal-title":"CVIU."},{"key":"e_1_3_2_1_52_1","volume-title":"Customized Segment Anything Model for Medical Image Segmentation. CoRR","author":"Zhang Kaidong","year":"2023","unstructured":"Kaidong Zhang and Dong Liu. 2023. Customized Segment Anything Model for Medical Image Segmentation. CoRR, Vol. abs\/2304.13785 (2023)."},{"key":"e_1_3_2_1_53_1","volume-title":"TPRNet: camouflaged object detection via transformer-induced progressive refinement network. The Visual Computer","author":"Zhang Qiao","year":"2022","unstructured":"Qiao Zhang, Yanliang Ge, Cong Zhang, and Hongbo Bi. 2022. TPRNet: camouflaged object detection via transformer-induced progressive refinement network. The Visual Computer (2022), 1--15."},{"key":"e_1_3_2_1_54_1","volume-title":"LLaMA-Adapter: Efficient Fine-tuning of Language Models with Zero-init Attention. CoRR","author":"Zhang Renrui","year":"2023","unstructured":"Renrui Zhang, Jiaming Han, Aojun Zhou, Xiangfei Hu, Shilin Yan, Pan Lu, Hongsheng Li, Peng Gao, and Yu Qiao. 2023. LLaMA-Adapter: Efficient Fine-tuning of Language Models with Zero-init Attention. CoRR, Vol. abs\/2303.16199 (2023)."},{"key":"e_1_3_2_1_55_1","volume-title":"Personalize Segment Anything Model with One Shot. CoRR","author":"Zhang Renrui","year":"2023","unstructured":"Renrui Zhang, Zhengkai Jiang, Ziyu Guo, Shilin Yan, Junting Pan, Hao Dong, Peng Gao, and Hongsheng Li. 2023. Personalize Segment Anything Model with One Shot. CoRR, Vol. abs\/2305.03048 (2023)."},{"key":"e_1_3_2_1_56_1","volume-title":"GPT4RoI: Instruction Tuning Large Language Model on Region-of-Interest. CoRR","author":"Zhang Shilong","year":"2023","unstructured":"Shilong Zhang, Peize Sun, Shoufa Chen, Min Xiao, Wenqi Shao, Wenwei Zhang, Kai Chen, and Ping Luo. 2023. GPT4RoI: Instruction Tuning Large Language Model on Region-of-Interest. CoRR, Vol. abs\/2307.03601 (2023)."},{"key":"e_1_3_2_1_57_1","volume-title":"ChatSpot: Bootstrapping Multimodal LLMs via Precise Referring Instruction Tuning. CoRR","author":"Zhao Liang","year":"2023","unstructured":"Liang Zhao, En Yu, Zheng Ge, Jinrong Yang, Haoran Wei, Hongyu Zhou, Jianjian Sun, Yuang Peng, Runpei Dong, Chunrui Han, and Xiangyu Zhang. 2023. ChatSpot: Bootstrapping Multimodal LLMs via Precise Referring Instruction Tuning. CoRR, Vol. abs\/2307.09474 (2023)."},{"key":"e_1_3_2_1_58_1","volume-title":"Detecting Camouflaged Object in Frequency Domain","author":"Zhong Yijie","unstructured":"Yijie Zhong, Bo Li, Lv Tang, Senyun Kuang, Shuang Wu, and Shouhong Ding. 2022. Detecting Camouflaged Object in Frequency Domain. In CVPR. IEEE, 4494--4503."},{"key":"e_1_3_2_1_59_1","volume-title":"MiniGPT-4: Enhancing Vision-Language Understanding with Advanced Large Language Models. CoRR","author":"Zhu Deyao","year":"2023","unstructured":"Deyao Zhu, Jun Chen, Xiaoqian Shen, Xiang Li, and Mohamed Elhoseiny. 2023. MiniGPT-4: Enhancing Vision-Language Understanding with Advanced Large Language Models. CoRR, Vol. abs\/2304.10592 (2023)."},{"key":"e_1_3_2_1_60_1","first-page":"3738","article-title":"Salient Object Detection via Integrity Learning","volume":"45","author":"Zhuge Mingchen","year":"2023","unstructured":"Mingchen Zhuge, Deng-Ping Fan, Nian Liu, Dingwen Zhang, Dong Xu, and Ling Shao. 2023. Salient Object Detection via Integrity Learning. IEEE TPAMI., Vol. 45, 3 (2023), 3738--3752.","journal-title":"IEEE TPAMI."}],"event":{"name":"MM '24: The 32nd ACM International Conference on Multimedia","location":"Melbourne VIC Australia","acronym":"MM '24","sponsor":["SIGMM ACM Special Interest Group on Multimedia"]},"container-title":["Proceedings of the 32nd ACM International Conference on Multimedia"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3664647.3680730","content-type":"unspecified","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3664647.3680730","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,6,19]],"date-time":"2025-06-19T00:06:24Z","timestamp":1750291584000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3664647.3680730"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,10,28]]},"references-count":60,"alternative-id":["10.1145\/3664647.3680730","10.1145\/3664647"],"URL":"https:\/\/doi.org\/10.1145\/3664647.3680730","relation":{},"subject":[],"published":{"date-parts":[[2024,10,28]]},"assertion":[{"value":"2024-10-28","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}