{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,7]],"date-time":"2026-04-07T16:19:26Z","timestamp":1775578766379,"version":"3.50.1"},"reference-count":88,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"8","license":[{"start":{"date-parts":[[2025,8,1]],"date-time":"2025-08-01T00:00:00Z","timestamp":1754006400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2025,8,1]],"date-time":"2025-08-01T00:00:00Z","timestamp":1754006400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,8,1]],"date-time":"2025-08-01T00:00:00Z","timestamp":1754006400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/100000001","name":"Shanghai Natural Science Foundation","doi-asserted-by":"publisher","award":["23ZR1402900"],"award-info":[{"award-number":["23ZR1402900"]}],"id":[{"id":"10.13039\/100000001","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62071127"],"award-info":[{"award-number":["62071127"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100012166","name":"National Key Research and Development Program of China","doi-asserted-by":"publisher","award":["2022ZD0160101"],"award-info":[{"award-number":["2022ZD0160101"]}],"id":[{"id":"10.13039\/501100012166","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Circuits Syst. Video Technol."],"published-print":{"date-parts":[[2025,8]]},"DOI":"10.1109\/tcsvt.2025.3544680","type":"journal-article","created":{"date-parts":[[2025,2,24]],"date-time":"2025-02-24T13:44:06Z","timestamp":1740404646000},"page":"7925-7939","source":"Crossref","is-referenced-by-count":2,"title":["Dynamic Model Merging With Mixture of Weights"],"prefix":"10.1109","volume":"35","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-6272-2792","authenticated-orcid":false,"given":"Peng","family":"Ye","sequence":"first","affiliation":[{"name":"School of Information Science and Technology, Fudan University, Shanghai, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-0100-8488","authenticated-orcid":false,"given":"Chenyu","family":"Huang","sequence":"additional","affiliation":[{"name":"School of Information Science and Technology, Fudan University, Shanghai, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-1357-556X","authenticated-orcid":false,"given":"Mingzhu","family":"Shen","sequence":"additional","affiliation":[{"name":"Imperial College London, London, U.K."}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-0779-9818","authenticated-orcid":false,"given":"Tao","family":"Chen","sequence":"additional","affiliation":[{"name":"School of Information Science and Technology, Fudan University, Shanghai, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0009-7190-4983","authenticated-orcid":false,"given":"Yongqi","family":"Huang","sequence":"additional","affiliation":[{"name":"School of Information Science and Technology, Fudan University, Shanghai, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-9163-2761","authenticated-orcid":false,"given":"Wanli","family":"Ouyang","sequence":"additional","affiliation":[{"name":"Shanghai Artificial Intelligence Laboratory, Shanghai, China"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/ICCCSP52374.2021.9465499"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1016\/j.dib.2023.108941"},{"key":"ref3","volume-title":"Intel Image Classification","author":"Bansal","year":"2019"},{"key":"ref4","article-title":"On the opportunities and risks of foundation models","author":"Bommasani","year":"2021","journal-title":"arXiv:2108.07258"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-10599-4_29"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.00026"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/S17-2001"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/JPROC.2017.2675998"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2014.461"},{"key":"ref10","first-page":"215","article-title":"An analysis of single-layer networks in unsupervised feature learning","volume-title":"Proc. 14th Int. Conf. Artif. Intell. Statist.","volume":"15","author":"Coates"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00020"},{"key":"ref12","volume-title":"Dogs vs. Cats","author":"Cukierski","year":"2013"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2009.5206848"},{"key":"ref14","article-title":"BERT: Pre-training of deep bidirectional transformers for language understanding","author":"Devlin","year":"2018","journal-title":"arXiv:1810.04805"},{"key":"ref15","article-title":"Fine-tuning pretrained language models: Weight initializations, data orders, and early stopping","author":"Dodge","year":"2020","journal-title":"arXiv:2002.06305"},{"key":"ref16","first-page":"9","article-title":"Automatically constructing a corpus of sentential paraphrases","volume-title":"Proc. 3rd Int. Workshop Paraphrasing (IWP)","author":"Dolan"},{"key":"ref17","article-title":"ColD fusion: Collaborative descent for distributed multitask finetuning","author":"Don-Yehiya","year":"2022","journal-title":"arXiv:2212.01378"},{"key":"ref18","article-title":"An image is worth 16\u00d716 words: Transformers for image recognition at scale","author":"Dosovitskiy","year":"2020","journal-title":"arXiv:2010.11929"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2013.208"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1098\/rsta.1922.0009"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.3115\/1654536.1654538"},{"key":"ref22","article-title":"In search of lost domain generalization","author":"Gulrajani","year":"2020","journal-title":"arXiv:2007.01434"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/JSTARS.2019.2918242"},{"key":"ref24","article-title":"LoRA: Low-rank adaptation of large language models","volume-title":"Proc. Int. Conf. Learn. Represent.","author":"Hu"},{"key":"ref25","first-page":"122741","article-title":"EMR-merging: Tuning-free high-performance model merging","volume-title":"Proc. 38th Annu. Conf. Neural Inf. Process. Syst.","author":"Huang"},{"key":"ref26","article-title":"Editing models with task arithmetic","volume-title":"Proc. 11th Int. Conf. Learn. Represent.","author":"Ilharco"},{"key":"ref27","volume-title":"First quora dataset release: Question pairs","author":"Iyer","year":"2017"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1162\/neco.1991.3.1.79"},{"key":"ref29","article-title":"Dataless knowledge fusion by merging weights of language models","volume-title":"Proc. 11th Int. Conf. Learn. Represent.","author":"Jin"},{"key":"ref30","article-title":"Sparse upcycling: Training mixture-of-experts from dense checkpoints","author":"Komatsuzaki","year":"2022","journal-title":"arXiv:2212.05055"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1109\/ICCVW.2013.77"},{"key":"ref32","article-title":"Learning multiple layers of features from tiny images","author":"Krizhevsky","year":"2009"},{"key":"ref33","first-page":"1","article-title":"ImageNet classification with deep convolutional neural networks","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"25","author":"Krizhevsky"},{"key":"ref34","volume-title":"Bean Disease Dataset","year":"2020"},{"key":"ref35","volume-title":"The MNIST Database of Handwritten Digits","author":"LeCun","year":"1998"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.591"},{"key":"ref37","first-page":"1950","article-title":"Few-shot parameter-efficient fine-tuning is better and cheaper than in-context learning","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"35","author":"Liu"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.48550\/ARXIV.1907.11692"},{"key":"ref39","first-page":"78905","article-title":"Twin-merging: Dynamic integration of modular expertise in model merging","volume-title":"Proc. 38th Annu. Conf. Neural Inf. Process. Syst.","author":"Lu"},{"key":"ref40","volume-title":"Torchvision: Pytorch\u2019s Computer Vision Library"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1007\/s10462-012-9338-y"},{"key":"ref42","first-page":"17703","article-title":"Merging models with Fisher-weighted averaging","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"35","author":"Matena"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00081"},{"key":"ref44","article-title":"Soft merging of experts with adaptive routing","author":"Muqeeth","year":"2023","journal-title":"arXiv:2306.03745"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2023.3327605"},{"key":"ref46","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2023.3260310"},{"key":"ref47","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2023.3289170"},{"key":"ref48","doi-asserted-by":"publisher","DOI":"10.48550\/ARXIV.2105.07581"},{"key":"ref49","doi-asserted-by":"publisher","DOI":"10.1145\/3083187.3083212"},{"key":"ref50","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2023.3304724"},{"key":"ref51","first-page":"8748","article-title":"Learning transferable visual models from natural language supervision","volume-title":"Proc. Int. Conf. Mach. Learn.","volume":"139","author":"Radford"},{"issue":"8","key":"ref52","first-page":"9","article-title":"Language models are unsupervised multitask learners","volume":"1","author":"Radford","year":"2019","journal-title":"OpenAI Blog"},{"key":"ref53","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/D16-1264"},{"key":"ref54","first-page":"28656","article-title":"Model ratatouille: Recycling diverse models for out-of-distribution generalization","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Ram\u00e9"},{"key":"ref55","first-page":"10821","article-title":"Diverse weight averaging for out-of-distribution generalization","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Ram\u00e9"},{"key":"ref56","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00474"},{"key":"ref57","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.74"},{"key":"ref58","article-title":"Efficient and effective weight-ensembling mixture of experts for multi-task model merging","author":"Shen","year":"2024","journal-title":"arXiv:2410.21804"},{"key":"ref59","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/D13-1170"},{"key":"ref60","doi-asserted-by":"publisher","DOI":"10.1109\/IJCNN.2011.6033395"},{"key":"ref61","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.acl-main.195"},{"key":"ref62","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.findings-emnlp.105"},{"key":"ref63","article-title":"SMILE: Zero-shot sparse mixture of low-rank experts construction from pre-trained foundation models","author":"Tang","year":"2024","journal-title":"arXiv:2408.10174"},{"key":"ref64","first-page":"47778","article-title":"Merging multi-task models via weight-ensembling mixture of experts","volume-title":"Proc. 41st Int. Conf. Mach. Learn.","author":"Tang"},{"issue":"86","key":"ref65","first-page":"2579","article-title":"Visualizing data using t-SNE","volume":"9","author":"Van der Maaten","year":"2008","journal-title":"J. Mach. Learn. Res."},{"key":"ref66","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2021.3054719"},{"key":"ref67","doi-asserted-by":"publisher","DOI":"10.48550\/ARXIV.1706.03762"},{"key":"ref68","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.572"},{"key":"ref69","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/W18-5446"},{"key":"ref70","doi-asserted-by":"publisher","DOI":"10.1162\/tacl_a_00290"},{"key":"ref71","volume-title":"PyTorch Image Models","author":"Wightman","year":"2019"},{"key":"ref72","article-title":"A broad-coverage challenge corpus for sentence understanding through inference","author":"Williams","year":"2017","journal-title":"arXiv:1704.05426"},{"key":"ref73","article-title":"HuggingFace\u2019s transformers: State-of-the-art natural language processing","author":"Wolf","year":"2019","journal-title":"arXiv:1910.03771"},{"key":"ref74","first-page":"23965","article-title":"Model soups: Averaging weights of multiple fine-tuned models improves accuracy without increasing inference time","volume-title":"Proc. 39th Int. Conf. Mach. Learn.","author":"Wortsman"},{"key":"ref75","doi-asserted-by":"publisher","DOI":"10.1029\/2020EA001604"},{"key":"ref76","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2010.5539970"},{"key":"ref77","article-title":"A survey on model MoErging: Recycling and routing among specialized experts for collaborative learning","author":"Yadav","year":"2024","journal-title":"arXiv:2408.07057"},{"key":"ref78","first-page":"7093","article-title":"TIES-merging: Resolving interference when merging models","volume-title":"Proc. Thirty-seventh Conf. Neural Inf. Process. Syst.","author":"Yadav"},{"key":"ref79","article-title":"Representation surgery for multi-task model merging","author":"Yang","year":"2024","journal-title":"arXiv:2402.02705"},{"key":"ref80","article-title":"SurgeryV2: Bridging the gap between model merging and multi-task learning with deep representation surgery","author":"Yang","year":"2024","journal-title":"arXiv:2410.14389"},{"key":"ref81","article-title":"AdaMerging: Adaptive model merging for multi-task learning","volume-title":"Proc. 12th Int. Conf. Learn. Represent.","author":"Yang"},{"key":"ref82","first-page":"57755","article-title":"Language models are super mario: Absorbing abilities from homologous models as a free lunch","volume-title":"Proc. 41st Int. Conf. Mach. Learn.","author":"Yu"},{"key":"ref83","doi-asserted-by":"publisher","DOI":"10.2118\/18761-MS"},{"key":"ref84","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.00893"},{"key":"ref85","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2023.3336371"},{"key":"ref86","article-title":"BAM! Just like that: Simple and efficient parameter upcycling for mixture of experts","author":"Zhang","year":"2024","journal-title":"arXiv:2408.08274"},{"key":"ref87","doi-asserted-by":"publisher","DOI":"10.1109\/TKDE.2021.3070203"},{"key":"ref88","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2024.emnlp-main.890"}],"container-title":["IEEE Transactions on Circuits and Systems for Video Technology"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/76\/11114024\/10900479.pdf?arnumber=10900479","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,1,5]],"date-time":"2026-01-05T18:40:51Z","timestamp":1767638451000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10900479\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,8]]},"references-count":88,"journal-issue":{"issue":"8"},"URL":"https:\/\/doi.org\/10.1109\/tcsvt.2025.3544680","relation":{},"ISSN":["1051-8215","1558-2205"],"issn-type":[{"value":"1051-8215","type":"print"},{"value":"1558-2205","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025,8]]}}}