{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,5]],"date-time":"2026-03-05T15:34:19Z","timestamp":1772724859031,"version":"3.50.1"},"reference-count":47,"publisher":"IEEE","license":[{"start":{"date-parts":[[2023,2,1]],"date-time":"2023-02-01T00:00:00Z","timestamp":1675209600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2023,2,1]],"date-time":"2023-02-01T00:00:00Z","timestamp":1675209600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/100005801","name":"Facebook","doi-asserted-by":"publisher","id":[{"id":"10.13039\/100005801","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/100007225","name":"Ministry of Science and Technology","doi-asserted-by":"publisher","id":[{"id":"10.13039\/100007225","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/100002465","name":"Delta","doi-asserted-by":"publisher","id":[{"id":"10.13039\/100002465","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/100009950","name":"Ministry of Education","doi-asserted-by":"publisher","id":[{"id":"10.13039\/100009950","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2023,2]]},"DOI":"10.1109\/hpca56546.2023.10071043","type":"proceedings-article","created":{"date-parts":[[2023,3,24]],"date-time":"2023-03-24T17:42:55Z","timestamp":1679679775000},"page":"1140-1152","source":"Crossref","is-referenced-by-count":4,"title":["Tensor Movement Orchestration in Multi-GPU Training Systems"],"prefix":"10.1109","author":[{"given":"Shao-Fu","family":"Lin","sequence":"first","affiliation":[{"name":"National Taiwan University,Department of Computer Science and Information Engineering,Taipei,Taiwan"}]},{"given":"Yi-Jung","family":"Chen","sequence":"additional","affiliation":[{"name":"National Chi Nan University,Department of Computer Science and Information Engineering,Nantou,Taiwan"}]},{"given":"Hsiang-Yun","family":"Cheng","sequence":"additional","affiliation":[{"name":"Academia Sinica,Research Center for Information Technology Innovation,Taipei,Taiwan"}]},{"given":"Chia-Lin","family":"Yang","sequence":"additional","affiliation":[{"name":"National Taiwan University,Department of Computer Science and Information Engineering,Taipei,Taiwan"}]}],"member":"263","reference":[{"key":"ref1","article-title":"TensorFlow: A system for large-scale machine learning","volume-title":"Proceedings of the 12th USENIX Conference on Operating Systems Design and Implementation (OSDI)","author":"Abadi"},{"key":"ref2","article-title":"TensorFlow Eager: A multi-stage, python-embedded DSL for machine learning","author":"Agrawal","year":"2019"},{"key":"ref3","article-title":"Wav2vec 2.0: A framework for self-supervised learning of speech representations","volume-title":"Proceedings of the 34th International Conference on Neural Information Processing Systems (NIPS)","author":"Baevski"},{"key":"ref4","article-title":"Language models are few-shot learners","volume-title":"Proceedings of the 34th International Conference on Neural Information Processing Systems (NIPS)","author":"Brown"},{"key":"ref5","article-title":"Revisiting distributed synchronous SGD","author":"Chen","year":"2016"},{"key":"ref6","article-title":"Training deep nets with sublinear memory cost","author":"Chen","year":"2016"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1145\/2901318.2901323"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/MM.2020.3039925"},{"key":"ref9","article-title":"BERT: Pre-training of deep bidirectional transformers for language understanding","author":"Devlin","year":"2018"},{"key":"ref10","article-title":"An image is worth 16x16 words: Transformers for image recognition at scale","author":"Dosovitskiy","year":"2020"},{"key":"ref11","article-title":"AI and memory wall","author":"Gholami","year":"2021"},{"key":"ref12","article-title":"User manual, dual LGA3647 sockets motherboard for Intel Xeon scalable family processors","year":"2019"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2020-3015"},{"key":"ref14","article-title":"Gurobi optimizer reference manual","author":"Gurobi Optimization","year":"2022"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/cvpr.2016.90"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1145\/3373376.3378465"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/cvpr.2017.243"},{"key":"ref18","article-title":"GPipe: Efficient training of giant neural networks using pipeline parallelism","author":"Huang","year":"2018"},{"key":"ref19","article-title":"IBM PyTorch large model support","year":"2019"},{"key":"ref20","article-title":"IBM TensorFlow large model support V2","year":"2021"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/ISCA.2018.00070"},{"key":"ref22","article-title":"One weird trick for parallelizing convolutional neural networks","author":"Krizhevsky","year":"2014"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.14778\/3415478.3415530"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.14778\/3551793.3551828"},{"key":"ref25","article-title":"RoBERTa: A robustly optimized BERT pretraining approach","author":"Liu","year":"2019"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01167"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/SC.2016.62"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1007\/BF01580665"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1145\/3341301.3359646"},{"key":"ref30","article-title":"Memory-efficient pipeline-parallel DNN training","author":"Narayanan","year":"2020"},{"key":"ref31","article-title":"Deep learning recommendation model for personalization and recommendation systems","author":"Naumov","year":"2019"},{"key":"ref32","article-title":"NVIDIA Grace Hopper superchip","year":"2022"},{"key":"ref33","article-title":"NVIDIA H100 tensor core GPU","year":"2022"},{"key":"ref34","article-title":"CUDA","author":"NVIDIA","year":"2022"},{"key":"ref35","article-title":"PyTorch: An imperative style, high-performance deep learning library","author":"Paszke","year":"2019"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1145\/3373376.3378505"},{"key":"ref37","article-title":"PyTorch profiler","year":"2022"},{"key":"ref38","article-title":"Language models are unsupervised multitask learners","author":"Radford","year":"2019"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1109\/SC41405.2020.00024"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1145\/3458817.3476205"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1109\/HPCA51647.2021.00057"},{"key":"ref42","article-title":"ZeRO-Offload: Democratizing billion-scale model training","volume-title":"Proceedings of USENIX Annual Technical Conference (ATC)","author":"Ren"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.1109\/MICRO.2016.7783721"},{"key":"ref44","article-title":"Mesh-TensorFlow: Deep learning for supercomputers","volume-title":"Proceedings of the 32nd International Conference on Neural Information Processing Systems (NIPS)","author":"Shazeer"},{"key":"ref45","article-title":"Megatron-LM: Training multi-billion parameter language models using model parallelism","author":"Shoeybi","year":"2019"},{"key":"ref46","doi-asserted-by":"publisher","DOI":"10.1145\/3292500.3330756"},{"key":"ref47","doi-asserted-by":"publisher","DOI":"10.1145\/3178487.3178491"}],"event":{"name":"2023 IEEE International Symposium on High-Performance Computer Architecture (HPCA)","location":"Montreal, QC, Canada","start":{"date-parts":[[2023,2,25]]},"end":{"date-parts":[[2023,3,1]]}},"container-title":["2023 IEEE International Symposium on High-Performance Computer Architecture (HPCA)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/10070856\/10070923\/10071043.pdf?arnumber=10071043","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,2,13]],"date-time":"2024-02-13T13:13:23Z","timestamp":1707830003000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10071043\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,2]]},"references-count":47,"URL":"https:\/\/doi.org\/10.1109\/hpca56546.2023.10071043","relation":{},"subject":[],"published":{"date-parts":[[2023,2]]}}}