{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,15]],"date-time":"2026-01-15T02:42:12Z","timestamp":1768444932179,"version":"3.49.0"},"reference-count":23,"publisher":"Springer Science and Business Media LLC","issue":"12","license":[{"start":{"date-parts":[[2020,3,2]],"date-time":"2020-03-02T00:00:00Z","timestamp":1583107200000},"content-version":"tdm","delay-in-days":0,"URL":"http:\/\/www.springer.com\/tdm"},{"start":{"date-parts":[[2020,3,2]],"date-time":"2020-03-02T00:00:00Z","timestamp":1583107200000},"content-version":"vor","delay-in-days":0,"URL":"http:\/\/www.springer.com\/tdm"}],"funder":[{"name":"Consejer\u00eda de Econom\u00eda e Infraestructura, Junta de Extremadura","award":["IB16118"],"award-info":[{"award-number":["IB16118"]}]},{"DOI":"10.13039\/501100003176","name":"Ministerio de Educaci\u00f3n, Cultura y Deporte","doi-asserted-by":"publisher","award":["FPU14\/02012"],"award-info":[{"award-number":["FPU14\/02012"]}],"id":[{"id":"10.13039\/501100003176","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100003176","name":"Ministerio de Educaci\u00f3n, Cultura y Deporte","doi-asserted-by":"publisher","award":["FPU15\/02090"],"award-info":[{"award-number":["FPU15\/02090"]}],"id":[{"id":"10.13039\/501100003176","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["J Supercomput"],"published-print":{"date-parts":[[2020,12]]},"DOI":"10.1007\/s11227-020-03200-6","type":"journal-article","created":{"date-parts":[[2020,3,2]],"date-time":"2020-03-02T17:19:08Z","timestamp":1583169548000},"page":"9739-9754","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":19,"title":["Training deep neural networks: a static load balancing approach"],"prefix":"10.1007","volume":"76","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-1858-9920","authenticated-orcid":false,"given":"Sergio","family":"Moreno-\u00c1lvarez","sequence":"first","affiliation":[]},{"given":"Juan M.","family":"Haut","sequence":"additional","affiliation":[]},{"given":"Mercedes E.","family":"Paoletti","sequence":"additional","affiliation":[]},{"given":"Juan A.","family":"Rico-Gallego","sequence":"additional","affiliation":[]},{"given":"Juan C.","family":"D\u00edaz-Mart\u00edn","sequence":"additional","affiliation":[]},{"given":"Javier","family":"Plaza","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2020,3,2]]},"reference":[{"issue":"10","key":"3200_CR1","doi-asserted-by":"publisher","first-page":"1033","DOI":"10.1109\/71.963416","volume":"12","author":"O Beaumont","year":"2001","unstructured":"Beaumont O, Boudet V, Rastello F, Robert Y (2001) Matrix multiplication on heterogeneous platforms. IEEE Trans Parallel Distrib Syst 12(10):1033\u20131051. https:\/\/doi.org\/10.1109\/71.963416","journal-title":"IEEE Trans Parallel Distrib Syst"},{"key":"3200_CR2","unstructured":"Ben-Nun T, Hoefler T (2018) Demystifying parallel and distributed deep learning: an in-depth concurrency analysis. arXiv:1802.09941"},{"key":"3200_CR3","doi-asserted-by":"crossref","unstructured":"Chen C, Weng Q, Wang W, Li B, Li B (2018) Fast distributed deep learning via worker-adaptive batch sizing. In: Proceedings of the ACM Symposium on Cloud Computing, SoCC \u201918. ACM, New York, USA, pp 521\u2013521","DOI":"10.1145\/3267809.3275463"},{"key":"3200_CR4","unstructured":"Chen J, Monga R, Bengio S, Jozefowicz R (2016) Revisiting distributed synchronous SGD. In: ICLR Workshop Track"},{"key":"3200_CR5","unstructured":"Chiu C, Sainath TN, Wu Y, Prabhavalkar R, Nguyen P, Chen Z, Kannan A, Weiss RJ, Rao K, Gonina K, Jaitly N, Li B, Chorowski J, Bacchiani M (2017) State-of-the-art speech recognition with sequence-to-sequence models. arXiv:1712.01769"},{"key":"3200_CR6","doi-asserted-by":"crossref","unstructured":"Clarke D, Zhong Z, Rychkov V, Lastovetsky A (2013) Fupermod: a framework for optimal data partitioning for parallel scientific applications on dedicated heterogeneous HPC platforms. In: Parallel Computing Technologies. Springer, Berlin, Heidelberg, pp 182\u2013196","DOI":"10.1007\/978-3-642-39958-9_16"},{"key":"3200_CR7","unstructured":"Dean J, Corrado GS, Monga R, Chen K, Devin M, Le QV, Mao MZ, Ranzato M, Senior A, Tucker P, Yang K, Ng AY (2012) Large scale distributed deep networks. In: NIPS, USA, pp 1223\u20131231"},{"key":"3200_CR8","unstructured":"Forum MPI (2015) MPI: a message-passing interface standard, version 3.1 , June 4, 2015. High-Performance Computing Center Stuttgart, University of Stuttgart"},{"key":"3200_CR9","doi-asserted-by":"crossref","unstructured":"Fox G, Qiu J, Jha S, Ekanayake S, Kamburugamuve S (2016) Big data, simulations and HPC convergence. In: Big Data Benchmarking. Springer, Cham, pp 3\u201317","DOI":"10.1007\/978-3-319-49748-8_1"},{"key":"3200_CR10","doi-asserted-by":"crossref","unstructured":"Gupta S, Zhang W, Wang F (2017) Model accuracy and runtime tradeoff in distributed deep learning: a systematic study. In: IJCAI, pp 4854\u20134858","DOI":"10.24963\/ijcai.2017\/681"},{"key":"3200_CR11","unstructured":"He K, Zhang X, Ren S, Sun J (2015) Deep residual learning for image recognition. arXiv:1512.03385"},{"issue":"2","key":"3200_CR12","doi-asserted-by":"publisher","first-page":"251","DOI":"10.1016\/0893-6080(91)90009-T","volume":"4","author":"K Hornik","year":"1991","unstructured":"Hornik K (1991) Approximation capabilities of multilayer feedforward networks. Neural Netw 4(2):251\u2013257","journal-title":"Neural Netw"},{"key":"3200_CR13","unstructured":"Huang Y, Cheng Y, Chen D, Lee H, Ngiam J, Le QV, Chen Z (2018) Gpipe: efficient training of giant neural networks using pipeline parallelism. arXiv:1811.06965"},{"issue":"3","key":"3200_CR14","doi-asserted-by":"publisher","first-page":"31","DOI":"10.1109\/2.485891","volume":"29","author":"AK Jain","year":"1996","unstructured":"Jain AK, Mao J, Mohiuddin KM (1996) Artificial neural networks: a tutorial. Computer 29(3):31\u201344","journal-title":"Computer"},{"key":"3200_CR15","doi-asserted-by":"crossref","unstructured":"Jiang J, Cui B, Zhang C, Yu L (2017) Heterogeneity-aware distributed parameter servers. In: Proceedings of the 2017 ACM International Conference on Management of Data, SIGMOD \u201917. ACM, NY, USA, pp 463\u2013478","DOI":"10.1145\/3035918.3035933"},{"key":"3200_CR16","unstructured":"Krizhevsky A (2014) One weird trick for parallelizing convolutional neural networks. arXiv:1404.5997"},{"key":"3200_CR17","unstructured":"Krizhevsky A, Sutskever I, Hinton GE (2012) Imagenet classification with deep convolutional neural networks. In: Advances in Neural Information Processing Systems 25. Curran Associates, Inc., pp 1097\u20131105"},{"key":"3200_CR18","unstructured":"Le QV, Ngiam J, Coates A, Lahiri A, Prochnow B, Ng AY (2011) On optimization methods for deep learning. In: Proceedings of the 28th International Conference on International Conference on Machine Learning, ICML\u201911. Omnipress, USA, pp 265\u2013272"},{"key":"3200_CR19","doi-asserted-by":"publisher","first-page":"436","DOI":"10.1038\/nature14539","volume":"521","author":"Y LeCun","year":"2015","unstructured":"LeCun Y, Bengio Y, Hinton G (2015) Deep learning. Nature 521:436","journal-title":"Nature"},{"key":"3200_CR20","doi-asserted-by":"publisher","first-page":"279","DOI":"10.1016\/j.isprsjprs.2019.09.006","volume":"158","author":"M Paoletti","year":"2019","unstructured":"Paoletti M, Haut J, Plaza J, Plaza A (2019) Deep learning classifiers for hyperspectral imaging: a review. ISPRS J Photogramm Remote Sens 158:279\u2013317","journal-title":"ISPRS J Photogramm Remote Sens"},{"issue":"3","key":"3200_CR21","doi-asserted-by":"publisher","first-page":"1654","DOI":"10.1007\/s11227-018-2724-8","volume":"75","author":"JA Rico-Gallego","year":"2019","unstructured":"Rico-Gallego JA, D\u00edaz-Mart\u00edn JC, Calvo-Jurado C, Moreno-\u00c1lvarez S, Garc\u00eda-Zapata JL (2019) Analytical communication performance models as a metric in the partitioning of data-parallel kernels on heterogeneous platforms. J Supercomput 75(3):1654\u20131669","journal-title":"J Supercomput"},{"key":"3200_CR22","doi-asserted-by":"publisher","first-page":"85","DOI":"10.1016\/j.neunet.2014.09.003","volume":"61","author":"J Schmidhuber","year":"2015","unstructured":"Schmidhuber J (2015) Deep learning in neural networks: an overview. Neural Netw 61:85\u2013117","journal-title":"Neural Netw"},{"key":"3200_CR23","unstructured":"Sergeev A, Balso MD (2018) Horovod: fast and easy distributed deep learning in TensorFlow. arXiv:1802.05799"}],"container-title":["The Journal of Supercomputing"],"original-title":[],"language":"en","link":[{"URL":"http:\/\/link.springer.com\/content\/pdf\/10.1007\/s11227-020-03200-6.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"http:\/\/link.springer.com\/article\/10.1007\/s11227-020-03200-6\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"http:\/\/link.springer.com\/content\/pdf\/10.1007\/s11227-020-03200-6.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2021,3,2]],"date-time":"2021-03-02T00:57:58Z","timestamp":1614646678000},"score":1,"resource":{"primary":{"URL":"http:\/\/link.springer.com\/10.1007\/s11227-020-03200-6"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2020,3,2]]},"references-count":23,"journal-issue":{"issue":"12","published-print":{"date-parts":[[2020,12]]}},"alternative-id":["3200"],"URL":"https:\/\/doi.org\/10.1007\/s11227-020-03200-6","relation":{},"ISSN":["0920-8542","1573-0484"],"issn-type":[{"value":"0920-8542","type":"print"},{"value":"1573-0484","type":"electronic"}],"subject":[],"published":{"date-parts":[[2020,3,2]]},"assertion":[{"value":"2 March 2020","order":1,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}}]}}