{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,10,31]],"date-time":"2025-10-31T07:29:27Z","timestamp":1761895767807,"version":"build-2065373602"},"reference-count":26,"publisher":"IEEE","license":[{"start":{"date-parts":[[2025,6,30]],"date-time":"2025-06-30T00:00:00Z","timestamp":1751241600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,6,30]],"date-time":"2025-06-30T00:00:00Z","timestamp":1751241600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025,6,30]]},"DOI":"10.1109\/icme59968.2025.11209970","type":"proceedings-article","created":{"date-parts":[[2025,10,30]],"date-time":"2025-10-30T17:57:42Z","timestamp":1761847062000},"page":"1-6","source":"Crossref","is-referenced-by-count":0,"title":["Boosting Audio-Visual Segmentation via Triple-Modalities Alignment"],"prefix":"10.1109","author":[{"given":"Yujian","family":"Lee","sequence":"first","affiliation":[{"name":"Beijing Normal-Hong Kong Baptist University,Guangdong Provincial\/Zhuhai Key Laboratory of IRADS,Zhuhai,China"}]},{"given":"Peng","family":"Gao","sequence":"additional","affiliation":[{"name":"Hong Kong Baptist University,Dept. Computer Science,Hong Kong, SAR"}]},{"given":"Zailong","family":"Chen","sequence":"additional","affiliation":[{"name":"University of Wollongong,Dept. Computer Science,Wollongong,Australia"}]},{"given":"Wentao","family":"Fan","sequence":"additional","affiliation":[{"name":"Beijing Normal-Hong Kong Baptist University,Guangdong Provincial\/Zhuhai Key Laboratory of IRADS,Zhuhai,China"}]},{"given":"Guquan","family":"Jing","sequence":"additional","affiliation":[{"name":"Hong Kong Baptist University,Dept. Computer Science,Hong Kong, SAR"}]},{"given":"Yiyang","family":"Hu","sequence":"additional","affiliation":[{"name":"Hong Kong Baptist University,Dept. Computer Science,Hong Kong, SAR"}]}],"member":"263","reference":[{"doi-asserted-by":"publisher","key":"ref1","DOI":"10.1007\/978-3-030-01246-5_27"},{"doi-asserted-by":"publisher","key":"ref2","DOI":"10.1007\/978-3-031-19836-6_22"},{"doi-asserted-by":"publisher","key":"ref3","DOI":"10.1007\/s11263-024-02261-x"},{"doi-asserted-by":"publisher","key":"ref4","DOI":"10.1609\/aaai.v38i6.28426"},{"doi-asserted-by":"publisher","key":"ref5","DOI":"10.24963\/ijcai.2023\/97"},{"doi-asserted-by":"publisher","key":"ref6","DOI":"10.1145\/3581783.3611724"},{"year":"2023","author":"Liu","article-title":"Audio-aware query-enhanced transformer for audio-visual segmentation","key":"ref7"},{"doi-asserted-by":"publisher","key":"ref8","DOI":"10.1609\/aaai.v38i11.29104"},{"doi-asserted-by":"publisher","key":"ref9","DOI":"10.1109\/WACV57701.2024.00551"},{"doi-asserted-by":"publisher","key":"ref10","DOI":"10.1609\/aaai.v38i6.28465"},{"doi-asserted-by":"publisher","key":"ref11","DOI":"10.1609\/aaai.v38i6.28378"},{"doi-asserted-by":"publisher","key":"ref12","DOI":"10.1007\/978-3-031-72904-1_20"},{"doi-asserted-by":"publisher","key":"ref13","DOI":"10.1007\/978-3-031-73464-9_19"},{"key":"ref14","first-page":"8748","article-title":"Learning transferable visual models from natural language supervision","volume-title":"International conference on machine learning","author":"Radford"},{"doi-asserted-by":"publisher","key":"ref15","DOI":"10.1109\/TPAMI.2018.2798607"},{"key":"ref16","first-page":"2","article-title":"Bert: Pre-training of deep bidirectional transformers for language understanding","volume-title":"Proceedings of naacL-HLT","volume":"1","author":"Ming-Wei Chang Kenton"},{"doi-asserted-by":"publisher","key":"ref17","DOI":"10.1109\/ISIT.2004.1365067"},{"doi-asserted-by":"publisher","key":"ref18","DOI":"10.1109\/ICCV48922.2021.00986"},{"doi-asserted-by":"publisher","key":"ref19","DOI":"10.1109\/ICASSP.2017.7952261"},{"doi-asserted-by":"publisher","key":"ref20","DOI":"10.18653\/v1\/N18-2074"},{"key":"ref21","first-page":"19730","article-title":"Blip-2: Bootstrapping language-image pre-training with frozen image encoders and large language models","volume-title":"International conference on machine learning","author":"Li"},{"doi-asserted-by":"publisher","key":"ref22","DOI":"10.1109\/CVPR52729.2023.01046"},{"year":"2024","author":"Yao","article-title":"Minicpm-v: A gpt-4v level mllm on your phone","key":"ref23"},{"key":"ref24","first-page":"12888","article-title":"Blip: Bootstrapping language-image pre-training for unified vision-language understanding and generation","volume-title":"International conference on machine learning","author":"Li"},{"year":"2024","author":"Ji","article-title":"Wavtokenizer: an efficient acoustic discrete codec tokenizer for audio language modeling","key":"ref25"},{"doi-asserted-by":"publisher","key":"ref26","DOI":"10.1109\/CVPR52688.2022.00135"}],"event":{"name":"2025 IEEE International Conference on Multimedia and Expo (ICME)","start":{"date-parts":[[2025,6,30]]},"location":"Nantes, France","end":{"date-parts":[[2025,7,4]]}},"container-title":["2025 IEEE International Conference on Multimedia and Expo (ICME)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/11208895\/11208897\/11209970.pdf?arnumber=11209970","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,10,31]],"date-time":"2025-10-31T05:58:38Z","timestamp":1761890318000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11209970\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,6,30]]},"references-count":26,"URL":"https:\/\/doi.org\/10.1109\/icme59968.2025.11209970","relation":{},"subject":[],"published":{"date-parts":[[2025,6,30]]}}}