{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,5]],"date-time":"2026-03-05T15:56:05Z","timestamp":1772726165956,"version":"3.50.1"},"reference-count":48,"publisher":"IEEE","license":[{"start":{"date-parts":[[2021,10,25]],"date-time":"2021-10-25T00:00:00Z","timestamp":1635120000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2021,10,25]],"date-time":"2021-10-25T00:00:00Z","timestamp":1635120000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2021,10,25]]},"DOI":"10.23919\/cnsm52442.2021.9615524","type":"proceedings-article","created":{"date-parts":[[2021,12,2]],"date-time":"2021-12-02T20:30:34Z","timestamp":1638477034000},"page":"207-215","source":"Crossref","is-referenced-by-count":4,"title":["Network Traffic Characteristics of Machine Learning Frameworks Under the Microscope"],"prefix":"10.23919","author":[{"given":"Johannes","family":"Zerwas","sequence":"first","affiliation":[]},{"given":"Kaan","family":"Aykurt","sequence":"additional","affiliation":[]},{"given":"Stefan","family":"Schmid","sequence":"additional","affiliation":[]},{"given":"Andreas","family":"Blenk","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref39","article-title":"Tictac: Accelerating distributed deep learning with communication scheduling","author":"hashemi","year":"2018","journal-title":"ArXiv Preprint"},{"key":"ref38","first-page":"iii-1337","article-title":"Deep learning with cots hpc systems","author":"coates","year":"0","journal-title":"Proc ICML"},{"key":"ref33","year":"0","journal-title":"Gloo"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-540-30218-6_19"},{"key":"ref31","year":"0","journal-title":"Google's remote procedure call library"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1109\/CCGRID.2019.00064"},{"key":"ref37","first-page":"3043","article-title":"Asynchronous decentralized parallel stochastic gradient descent","author":"lian","year":"0","journal-title":"Proc ICML"},{"key":"ref36","article-title":"Mxnet: A flexible and efficient machine learning library for heterogeneous distributed systems","author":"chen","year":"2015","journal-title":"ArXiv Preprint"},{"key":"ref35","author":"chollet","year":"2015","journal-title":"Keras"},{"key":"ref34","year":"0","journal-title":"Nvidia nccl"},{"key":"ref10","article-title":"TensorFlow: Large-scale machine learning on heterogeneous systems","author":"abadi","year":"2015"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1147\/JRD.2019.2947013"},{"key":"ref11","year":"0","journal-title":"Apache SINGA"},{"key":"ref12","first-page":"8024","article-title":"Pytorch: An imperative style, highperformance deep learning library","author":"paszke","year":"0","journal-title":"Advances in NIPS 32"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/SC41405.2020.00024"},{"key":"ref14","article-title":"Deep gradient compression: Reducing the communication bandwidth for distributed training","author":"lin","year":"2017","journal-title":"ArXiv Preprint"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1145\/3377454"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/INFOCOM41043.2020.9155282"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/TCC.2020.3040312"},{"key":"ref18","article-title":"In-network aggregation for shared machine learning clusters","author":"gebara","year":"0","journal-title":"Proc MLSys 2021)"},{"key":"ref19","first-page":"741","article-title":"ATP: In-network aggregation for multi-tenant learning","author":"lao","year":"0","journal-title":"Proc USENIX NSDI"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/INFOCOM.2019.8737595"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2018.2842103"},{"key":"ref27","first-page":"947","article-title":"Analysis of large-scale multi-tenant gpu clusters for dnn training workloads","author":"jeon","year":"0","journal-title":"Proc USENIX ATC"},{"key":"ref3","article-title":"What's New In Gartner's Hype Cycle For AI","author":"columbus","year":"2020","journal-title":"Forbes"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1145\/3405671.3405810"},{"key":"ref29","first-page":"1111","article-title":"A reliable effective terascale linear learning system","volume":"15","author":"agarwal","year":"2014","journal-title":"Journal of Machine Learning Research"},{"key":"ref5","first-page":"103","article-title":"Gpipe: Efficient training of giant neural networks using pipeline parallelism","volume":"32","author":"huang","year":"2019","journal-title":"Advances in NIPS"},{"key":"ref8","article-title":"Horovod: fast and easy distributed deep learning in tensorflow","author":"sergeev","year":"2018","journal-title":"ArXiv Preprint"},{"key":"ref7","first-page":"181","article-title":"Poseidon: An efficient communication architecture for distributed deep learning on GPU clusters","author":"zhang","year":"0","journal-title":"Proc USENIX ATC '17"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1186\/s13040-017-0154-4"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1145\/2641190.2641198"},{"key":"ref9","first-page":"937","article-title":"Kungfu: Making training in distributed machine learning adaptive","author":"mai","year":"0","journal-title":"Proc of USENIX OSDI"},{"key":"ref20","article-title":"Scaling distributed machine learning with in-network aggregation","author":"sapio","year":"2019","journal-title":"ArXiv Preprint"},{"key":"ref46","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.90"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.243"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/JLT.2021.3120868"},{"key":"ref48","article-title":"Adam: A method for stochastic optimization","author":"kingma","year":"0","journal-title":"Proc ICML"},{"key":"ref21","author":"khani","year":"0","journal-title":"Terarack A tbps rack for machine learning training"},{"key":"ref47","author":"krizhevsky","year":"2009","journal-title":"Learning multiple layers of features from tiny images"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/CCIS.2018.8691150"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1145\/3320060"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1016\/j.future.2020.01.004"},{"key":"ref41","first-page":"463","article-title":"A unified architecture for accelerating distributed DNN training in heterogeneous gpu\/cpu clusters","author":"jiang","year":"0","journal-title":"Proc USENIX OSDI"},{"key":"ref26","first-page":"1","article-title":"Expanding across time to deliver bandwidth efficiency and low latency","author":"mellette","year":"0","journal-title":"Proc USENIX NSDI"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00474"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1145\/3387514.3406221"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.1109\/TNSE.2021.3068155"}],"event":{"name":"2021 17th International Conference on Network and Service Management (CNSM)","location":"Izmir, Turkey","start":{"date-parts":[[2021,10,25]]},"end":{"date-parts":[[2021,10,29]]}},"container-title":["2021 17th International Conference on Network and Service Management (CNSM)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9615441\/9615442\/09615524.pdf?arnumber=9615524","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,3,21]],"date-time":"2022-03-21T20:53:34Z","timestamp":1647896014000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9615524\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2021,10,25]]},"references-count":48,"URL":"https:\/\/doi.org\/10.23919\/cnsm52442.2021.9615524","relation":{},"subject":[],"published":{"date-parts":[[2021,10,25]]}}}