{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,4]],"date-time":"2026-04-04T17:59:46Z","timestamp":1775325586108,"version":"3.50.1"},"reference-count":36,"publisher":"IEEE","license":[{"start":{"date-parts":[[2024,6,30]],"date-time":"2024-06-30T00:00:00Z","timestamp":1719705600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2024,6,30]],"date-time":"2024-06-30T00:00:00Z","timestamp":1719705600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2024,6,30]]},"DOI":"10.1109\/ijcnn60899.2024.10650737","type":"proceedings-article","created":{"date-parts":[[2024,9,9]],"date-time":"2024-09-09T17:35:05Z","timestamp":1725903305000},"page":"1-8","source":"Crossref","is-referenced-by-count":3,"title":["Efficient Routing in Sparse Mixture-of-Experts"],"prefix":"10.1109","author":[{"given":"Masoumeh","family":"Zareapoor","sequence":"first","affiliation":[{"name":"Shanghai Jiao Tong University,Shanghai,China"}]},{"given":"Pourya","family":"Shamsolmoali","sequence":"additional","affiliation":[{"name":"Queen&#x2019;s University Belfast,Belfast,UK"}]},{"given":"Fateme","family":"Vesaghati","sequence":"additional","affiliation":[{"name":"Azad University,Tehran,Iran"}]}],"member":"263","reference":[{"key":"ref1","article-title":"Outrageously large neural networks: The sparsely-gated mixture-of-experts layer","author":"Shazeer","year":"2018"},{"key":"ref2","article-title":"From sparse to soft mixtures of experts","author":"Puigcerver","year":"2023"},{"key":"ref3","first-page":"8583","article-title":"Scaling vision with sparse mixture of experts","volume":"34","author":"Riquelme","year":"2021","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref4","first-page":"7103","article-title":"Mixture-of-experts with expert choice routing","volume":"35","author":"Zhou","year":"2022","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref5","article-title":"Coca: Contrastive captioners are image-text foundation models","author":"Yu","year":"2022"},{"key":"ref6","first-page":"6265","article-title":"Base layers: Simplifying training of large, sparse models","volume-title":"International Conference on Machine Learning","author":"Lewis"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1038\/s41586-020-03051-4"},{"key":"ref8","first-page":"17 555","article-title":"Hash layers for large sparse models","volume":"34","author":"Roller","year":"2021","journal-title":"Advances in Neural Information Processing Systems"},{"issue":"1","key":"ref9","first-page":"5232","article-title":"Switch transformers: Scaling to trillion parameter models with simple and efficient sparsity","volume":"23","author":"Fedus","year":"2022","journal-title":"The Journal of Machine Learning Research"},{"key":"ref10","article-title":"Joint distribution optimal transportation for domain adaptation","volume":"30","author":"Courty","year":"2017","journal-title":"Advances in neural information processing systems"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/TCYB.2022.3166761"},{"key":"ref12","article-title":"Structured transforms across spaces with cost-regularized optimal transport","author":"Sebbouh","year":"2023"},{"key":"ref13","article-title":"Sinkhorn distances: Lightspeed computation of optimal transport","volume":"26","author":"Cuturi","year":"2013","journal-title":"Advances in neural information processing systems"},{"key":"ref14","article-title":"Gshard: Scaling giant models with conditional computation and automatic sharding","author":"Lepikhin","year":"2020"},{"key":"ref15","first-page":"12 449","article-title":"wav2vec 2.0: A framework for self-supervised learning of speech representations","volume":"33","author":"Baevski","year":"2020","journal-title":"Advances in neural information processing systems"},{"key":"ref16","article-title":"An image is worth 16x16 words: Transformers for image recognition at scale","author":"Dosovitskiy","year":"2020"},{"key":"ref17","article-title":"A review of sparse expert models in deep learning","author":"Fedus","year":"2022"},{"key":"ref18","first-page":"29 919","article-title":"Fast, differentiable and sparse top-k: a convex analysis perspective","volume-title":"International Conference on Machine Learning","author":"Sander"},{"key":"ref19","article-title":"Sparse upcycling: Training mixture-of-experts from dense checkpoints","author":"Komatsuzaki","year":"2022"},{"key":"ref20","first-page":"880","article-title":"Smooth and sparse optimal transport","volume-title":"International conference on artificial intelligence and statistics","author":"Blondel"},{"key":"ref21","article-title":"Improving and generalizing flow-based generative models with minibatch optimal transport","volume-title":"ICML Workshop on New Frontiers in Learning, Control, and Dynamical Systems","author":"Tong"},{"key":"ref22","first-page":"25 858","article-title":"Action matching: Learning stochastic dynamics from samples","volume-title":"International Conference on Machine Learning","author":"Neklyudov"},{"key":"ref23","first-page":"6808","article-title":"Wasserstein adversarial examples via projected sinkhorn iterations","volume-title":"International Conference on Machine Learning","author":"Wong"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2023.3348657"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1016\/j.neuroimage.2020.116847"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1038\/s41592-023-01969-x"},{"key":"ref27","article-title":"Training mixture-of-experts: A focus on expert-token matching","author":"Vesaghati","journal-title":"The Second Tiny Papers Track at ICLR 2024."},{"key":"ref28","article-title":"Set-former is what you need for vision and language","volume-title":"Proceedings of the AAAI Conference on Artificial Intelligence","author":"Shamsolmoali"},{"key":"ref29","first-page":"4057","article-title":"Unified scaling laws for routed language models","volume-title":"International Conference on Machine Learning","author":"Clark"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/P19-1285"},{"key":"ref31","first-page":"933","article-title":"Language modeling with gated convolutional networks","volume-title":"International conference on machine learning","author":"Dauphin"},{"key":"ref32","article-title":"Gaussian error linear units (gelus)","author":"Hendrycks","year":"2016"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2009.5206848"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.97"},{"key":"ref35","article-title":"Superglue: A stickier benchmark for general-purpose language understanding systems","volume":"32","author":"Wang","year":"2019","journal-title":"Advances in neural information processing systems"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/W18-5446"}],"event":{"name":"2024 International Joint Conference on Neural Networks (IJCNN)","location":"Yokohama, Japan","start":{"date-parts":[[2024,6,30]]},"end":{"date-parts":[[2024,7,5]]}},"container-title":["2024 International Joint Conference on Neural Networks (IJCNN)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/10649807\/10649898\/10650737.pdf?arnumber=10650737","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,9,26]],"date-time":"2024-09-26T17:40:30Z","timestamp":1727372430000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10650737\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,6,30]]},"references-count":36,"URL":"https:\/\/doi.org\/10.1109\/ijcnn60899.2024.10650737","relation":{},"subject":[],"published":{"date-parts":[[2024,6,30]]}}}