{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,11]],"date-time":"2026-03-11T16:50:20Z","timestamp":1773247820300,"version":"3.50.1"},"reference-count":38,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","license":[{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by\/4.0\/legalcode"}],"funder":[{"name":"StockFolio Inc., and the Ministry of Science and Information and Communication Technology (MSIT), South Korea, through the Information Technology Research Center (ITRC) Support Program","award":["IITP-2025-2020-0-01789"],"award-info":[{"award-number":["IITP-2025-2020-0-01789"]}]},{"name":"Artificial Intelligence Convergence Innovation Human Resources Development, Supervised by the Institute for Information and Communications Technology Planning and Evaluation","award":["IITP-2025-RS-2023-00254592"],"award-info":[{"award-number":["IITP-2025-RS-2023-00254592"]}]},{"name":"National Research Foundation of Korea (NRF) Grant funded by Korean Government","award":["RS-2025-00556289"],"award-info":[{"award-number":["RS-2025-00556289"]}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Access"],"published-print":{"date-parts":[[2025]]},"DOI":"10.1109\/access.2025.3561581","type":"journal-article","created":{"date-parts":[[2025,4,16]],"date-time":"2025-04-16T13:52:23Z","timestamp":1744811543000},"page":"71053-71065","source":"Crossref","is-referenced-by-count":2,"title":["LM-CLIP: Adapting Positive Asymmetric Loss for Long-Tailed Multi-Label Classification"],"prefix":"10.1109","volume":"13","author":[{"ORCID":"https:\/\/orcid.org\/0009-0000-5576-3222","authenticated-orcid":false,"given":"Christoph","family":"Timmermann","sequence":"first","affiliation":[{"name":"Graduate School of Computer Science and Artificial Intelligence, Dongguk University, Jung-gu, Seoul, Republic of Korea"}]},{"ORCID":"https:\/\/orcid.org\/0009-0007-9463-1194","authenticated-orcid":false,"given":"Seunghyeon","family":"Jung","sequence":"additional","affiliation":[{"name":"Graduate School of Computer Science and Artificial Intelligence, Dongguk University, Jung-gu, Seoul, Republic of Korea"}]},{"ORCID":"https:\/\/orcid.org\/0009-0007-7880-0840","authenticated-orcid":false,"given":"Miso","family":"Kim","sequence":"additional","affiliation":[{"name":"Graduate School of Computer Science and Artificial Intelligence, Dongguk University, Jung-gu, Seoul, Republic of Korea"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-7432-4185","authenticated-orcid":false,"given":"Woojin","family":"Lee","sequence":"additional","affiliation":[{"name":"Graduate School of Computer Science and Artificial Intelligence, Dongguk University, Jung-gu, Seoul, Republic of Korea"}]}],"member":"263","reference":[{"key":"ref1","first-page":"8748","article-title":"Learning transferable visual models from natural language supervision","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Radford"},{"key":"ref2","article-title":"CLIP-adapter: Better vision-language models with feature adapters","author":"Gao","year":"2021","journal-title":"arXiv:2110.04544"},{"key":"ref3","article-title":"Tip-adapter: Training-free CLIP-adapter for better vision-language modeling","author":"Zhang","year":"2021","journal-title":"arXiv:2111.03930"},{"key":"ref4","first-page":"12888","article-title":"BLIP: Bootstrapping language-image pre-training for unified vision-language understanding and generation","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Li"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01069"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.00130"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/ICCVW60793.2023.00505"},{"key":"ref8","first-page":"26342","article-title":"CHiLS: Zero-shot image classification with hierarchical label sets","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Novack"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.00280"},{"key":"ref10","article-title":"Open vocabulary multi-label classification with dual-modal decoder on aligned visual-textual features","author":"Xu","year":"2022","journal-title":"arXiv:2208.09562"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2023.3268118"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.324"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00015"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58548-8_10"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.01484"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v37i2.25244"},{"key":"ref17","article-title":"LMPT: Prompt tuning with class-specific embedding loss for long-tailed multi-label visual recognition","author":"Xia","year":"2023","journal-title":"arXiv:2305.04536"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.30534\/ijatcse\/2020\/175942020"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1007\/s11263-022-01653-1"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01631"},{"key":"ref21","first-page":"30569","article-title":"DualCoOp: Fast adaptation to multi-label recognition with limited annotations","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Sun"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.00275"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1016\/j.media.2024.103224"},{"key":"ref24","article-title":"Multi-label classification for multi-temporal, multi-spatial coral reef condition monitoring using vision foundation model with adapter learning","author":"Shao","year":"2025","journal-title":"arXiv:2503.23012"},{"key":"ref25","article-title":"Fully fine-tuned CLIP models are efficient few-shot learners","author":"Liu","year":"2024","journal-title":"arXiv:2407.04003"},{"key":"ref26","article-title":"LOBG: Less overfitting for better generalization in vision-language model","author":"Ding","year":"2024","journal-title":"arXiv:2410.10247"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-73414-4_24"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.199"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-19806-9_5"},{"key":"ref30","article-title":"Decoupling representation and classifier for long-tailed recognition","author":"Kang","year":"2019","journal-title":"arXiv:1910.09217"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1145\/3697353"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.308"},{"key":"ref33","article-title":"A simple long-tailed recognition baseline via vision-language model","author":"Ma","year":"2021","journal-title":"arXiv:2111.14745"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1007\/s11263-022-01653-1"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1007\/s11263-009-0275-4"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-10602-1_48"},{"issue":"86","key":"ref37","first-page":"2579","article-title":"Visualizing data using t-SNE","volume":"9","author":"Maaten","year":"2008","journal-title":"J. Mach. Learn. Res."},{"key":"ref38","article-title":"CLIP the bias: How useful is balancing data in multimodal learning?","author":"Alabdulmohsin","year":"2024","journal-title":"arXiv:2403.04547"}],"container-title":["IEEE Access"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/6287639\/10820123\/10966869.pdf?arnumber=10966869","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,9,18]],"date-time":"2025-09-18T17:47:21Z","timestamp":1758217641000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10966869\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025]]},"references-count":38,"URL":"https:\/\/doi.org\/10.1109\/access.2025.3561581","relation":{},"ISSN":["2169-3536"],"issn-type":[{"value":"2169-3536","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025]]}}}