{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,10,31]],"date-time":"2025-10-31T07:23:05Z","timestamp":1761895385464,"version":"build-2065373602"},"reference-count":24,"publisher":"IEEE","license":[{"start":{"date-parts":[[2025,6,30]],"date-time":"2025-06-30T00:00:00Z","timestamp":1751241600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,6,30]],"date-time":"2025-06-30T00:00:00Z","timestamp":1751241600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/100006190","name":"Research and Development","doi-asserted-by":"publisher","id":[{"id":"10.13039\/100006190","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025,6,30]]},"DOI":"10.1109\/icme59968.2025.11210183","type":"proceedings-article","created":{"date-parts":[[2025,10,30]],"date-time":"2025-10-30T17:57:42Z","timestamp":1761847062000},"page":"1-6","source":"Crossref","is-referenced-by-count":0,"title":["DRMOE: Towards Better Mixture of Experts via Dual Routing Strategy"],"prefix":"10.1109","author":[{"given":"Haiyang","family":"Liu","sequence":"first","affiliation":[{"name":"South China Agriculture University,Guangzhou,China"}]},{"given":"Shaojian","family":"Qiu","sequence":"additional","affiliation":[{"name":"South China Agriculture University,Guangzhou,China"}]},{"given":"Hai","family":"Lin","sequence":"additional","affiliation":[{"name":"China Unicom Software Research Institute Co., Ltd.,Guangzhou,China"}]},{"given":"Yingjie","family":"Kuang","sequence":"additional","affiliation":[{"name":"South China Agriculture University,Guangzhou,China"}]},{"given":"Shunpeng","family":"Li","sequence":"additional","affiliation":[{"name":"South China Agriculture University,Guangzhou,China"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1016\/j.aiopen.2023.08.012"},{"key":"ref2","article-title":"Language models are few-shot learners","volume":"1","author":"Mann","year":"2020"},{"article-title":"Bloom: A 176b-parameter open-access multilingual language model","year":"2023","author":"Le Scao","key":"ref3"},{"article-title":"Llama: Open and efficient foundation language models","year":"2023","author":"Touvron","key":"ref4"},{"article-title":"Parameter-efficient fine-tuning for large models: A comprehensive survey","year":"2024","author":"Han","key":"ref5"},{"key":"ref6","first-page":"2790","article-title":"Parameter-efficient transfer learning for nlp","volume-title":"International conference on machine learning","author":"Houlsby"},{"article-title":"Lora: Low-rank adaptation of large language models","year":"2021","author":"Hu","key":"ref7"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.emnlp-main.388"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.acllong.353"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1145\/3663363"},{"article-title":"Higher layers need more lora experts","year":"2024","author":"Gao","key":"ref11"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1145\/3626772.3657722"},{"key":"ref13","doi-asserted-by":"crossref","DOI":"10.36227\/techrxiv.172055626.64129172\/v1","article-title":"A survey on mixture of experts","author":"Cai","year":"2024"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1017\/9781139061773.020"},{"article-title":"Taming sparsely activated transformer with stochastic experts","year":"2021","author":"Zuo","key":"ref15"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/ICME57554.2024.10688018"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/IPDPSW55747.2022.00171"},{"article-title":"Outrageously large neural networks: The sparsely-gated mixture-of-experts layer","year":"2017","author":"Shazeer","key":"ref18"},{"journal-title":"Available at SSRN 4685921","article-title":"Promptcblue: A chinese prompt tuning benchmark for the medical domain","author":"Zhu","key":"ref19"},{"key":"ref20","first-page":"2","article-title":"Bert: Pre-training of deep bidirectional transformers for language understanding","volume-title":"Proceedings of naacL-HLT","volume":"1","author":"Kenton"},{"key":"ref21","first-page":"1877","article-title":"Language models are few-shot learners","volume":"33","author":"Brown","year":"2020","journal-title":"Advances in neural information processing systems"},{"article-title":"Huatuo: Tuning llama model with chinese medical knowledge","year":"2023","author":"Wang","key":"ref22"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2024.emnlp-main.64"},{"article-title":"The llama 3 herd of models","year":"2024","author":"Dubey","key":"ref24"}],"event":{"name":"2025 IEEE International Conference on Multimedia and Expo (ICME)","start":{"date-parts":[[2025,6,30]]},"location":"Nantes, France","end":{"date-parts":[[2025,7,4]]}},"container-title":["2025 IEEE International Conference on Multimedia and Expo (ICME)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/11208895\/11208897\/11210183.pdf?arnumber=11210183","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,10,31]],"date-time":"2025-10-31T05:49:01Z","timestamp":1761889741000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11210183\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,6,30]]},"references-count":24,"URL":"https:\/\/doi.org\/10.1109\/icme59968.2025.11210183","relation":{},"subject":[],"published":{"date-parts":[[2025,6,30]]}}}