{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,6,19]],"date-time":"2025-06-19T13:27:05Z","timestamp":1750339625990,"version":"3.37.3"},"reference-count":21,"publisher":"IEEE","license":[{"start":{"date-parts":[[2024,4,14]],"date-time":"2024-04-14T00:00:00Z","timestamp":1713052800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2024,4,14]],"date-time":"2024-04-14T00:00:00Z","timestamp":1713052800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/100006190","name":"Research and Development","doi-asserted-by":"publisher","id":[{"id":"10.13039\/100006190","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100017610","name":"Shenzhen Science and Technology Innovation Program","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100017610","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2024,4,14]]},"DOI":"10.1109\/icassp48485.2024.10448111","type":"proceedings-article","created":{"date-parts":[[2024,3,18]],"date-time":"2024-03-18T18:56:31Z","timestamp":1710788191000},"page":"12421-12425","source":"Crossref","is-referenced-by-count":1,"title":["DialCLIP: Empowering Clip As Multi-Modal Dialog Retriever"],"prefix":"10.1109","author":[{"given":"Zhichao","family":"Yin","sequence":"first","affiliation":[{"name":"University of Science and Technology of China"}]},{"given":"Binyuan","family":"Hui","sequence":"additional","affiliation":[{"name":"DAMO Academy,Alibaba Group"}]},{"given":"Min","family":"Yang","sequence":"additional","affiliation":[{"name":"Shenzhen Institute of Advanced Technology,Chinese Academy of Sciences"}]},{"given":"Fei","family":"Huang","sequence":"additional","affiliation":[{"name":"DAMO Academy,Alibaba Group"}]},{"given":"Yongbin","family":"Li","sequence":"additional","affiliation":[{"name":"DAMO Academy,Alibaba Group"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.emnlp-main.398"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.acl-long.204"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.acl-main.219"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.acl-long.479"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.acl-long.405"},{"journal-title":"ArXiv","article-title":"Gpt-4 technical report","year":"2023","key":"ref6"},{"key":"ref7","first-page":"8748","article-title":"Learning transferable visual models from natural language supervision","volume-title":"International conference on machine learning","author":"Radford"},{"key":"ref8","first-page":"23716","article-title":"Flamingo: a visual language model for few-shot learning","volume":"35","author":"Alayrac","year":"2022","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref9","first-page":"23318","article-title":"OFA: unifying architectures, tasks, and modalities through a simple sequence-to-sequence learning framework","volume-title":"ICML. 2022, vol. 162 of Proceedings of Machine Learning Research","author":"Wang"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.01838"},{"key":"ref11","first-page":"1877","article-title":"Language models are few-shot learners","volume":"33","author":"Brown","year":"2020","journal-title":"Advances in neural information processing systems"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.emnlp-main.243"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1016\/j.aiopen.2023.08.012"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1145\/3604613"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.findings-emnlp.95"},{"key":"ref16","first-page":"12888","article-title":"Blip: Bootstrapping language-image pre-training for unified vision-language understanding and generation","volume-title":"International Conference on Machine Learning","author":"Li"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.acl-long.353"},{"key":"ref18","first-page":"32897","article-title":"Vlmo: Unified vision-language pre-training with mixture-of-modalityexperts","volume":"35","author":"Bao","year":"2022","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.acl-long.749"},{"article-title":"Scaling instruction-finetuned language models","year":"2022","author":"Chung","key":"ref20"},{"article-title":"Decoupled weight decay regularization","year":"2017","author":"Loshchilov","key":"ref21"}],"event":{"name":"ICASSP 2024 - 2024 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)","start":{"date-parts":[[2024,4,14]]},"location":"Seoul, Korea, Republic of","end":{"date-parts":[[2024,4,19]]}},"container-title":["ICASSP 2024 - 2024 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/10445798\/10445803\/10448111.pdf?arnumber=10448111","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,8,2]],"date-time":"2024-08-02T06:47:09Z","timestamp":1722581229000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10448111\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,4,14]]},"references-count":21,"URL":"https:\/\/doi.org\/10.1109\/icassp48485.2024.10448111","relation":{},"subject":[],"published":{"date-parts":[[2024,4,14]]}}}