{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,6,18]],"date-time":"2025-06-18T04:10:26Z","timestamp":1750219826702,"version":"3.41.0"},"publisher-location":"New York, NY, USA","reference-count":31,"publisher":"ACM","license":[{"start":{"date-parts":[[2023,8,24]],"date-time":"2023-08-24T00:00:00Z","timestamp":1692835200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.acm.org\/publications\/policies\/copyright_policy#Background"}],"funder":[{"name":"Institute of Information & Communications Technology Planning & Evaluation"},{"DOI":"10.13039\/100018058","name":"SNU-SK Hynix Solution Research Center","doi-asserted-by":"publisher","id":[{"id":"10.13039\/100018058","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2023,8,24]]},"DOI":"10.1145\/3609510.3609811","type":"proceedings-article","created":{"date-parts":[[2023,7,31]],"date-time":"2023-07-31T16:08:44Z","timestamp":1690819724000},"page":"50-57","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":0,"title":["Liquid"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0009-0002-1304-3539","authenticated-orcid":false,"given":"Woohyeon","family":"Baek","sequence":"first","affiliation":[{"name":"Seoul National University"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-8001-9451","authenticated-orcid":false,"given":"Jonghyun","family":"Bae","sequence":"additional","affiliation":[{"name":"Lawrence Berkeley National Laboratory and Seoul National University"}]},{"ORCID":"https:\/\/orcid.org\/0009-0004-6925-8824","authenticated-orcid":false,"given":"Donghyun","family":"Lee","sequence":"additional","affiliation":[{"name":"Seoul National University"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-5930-774X","authenticated-orcid":false,"given":"Hyunwoong","family":"Bae","sequence":"additional","affiliation":[{"name":"Seoul National University"}]},{"ORCID":"https:\/\/orcid.org\/0009-0008-1425-0971","authenticated-orcid":false,"given":"Yeonhong","family":"Park","sequence":"additional","affiliation":[{"name":"Seoul National University"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-4266-4919","authenticated-orcid":false,"given":"Jae W.","family":"Lee","sequence":"additional","affiliation":[{"name":"Seoul National University"}]}],"member":"320","published-online":{"date-parts":[[2023,8,24]]},"reference":[{"key":"e_1_3_2_1_1_1","volume-title":"European Conference on Computer Vision (ECCV 22)","author":"Bae Jonghyun","year":"2022","unstructured":"Jonghyun Bae , Woohyeon Baek , Tae Jun Ham , and Jae W. Lee . 2022. L3: accelerator-friendly lossless image format for high-resolution, high-throughput dnn training . In European Conference on Computer Vision (ECCV 22) . Springer. European Computer Vision Association, Tel-Aviv, Israel , ( Oct. 2022 ), 171--188. Jonghyun Bae, Woohyeon Baek, Tae Jun Ham, and Jae W. Lee. 2022. L3: accelerator-friendly lossless image format for high-resolution, high-throughput dnn training. In European Conference on Computer Vision (ECCV 22). Springer. European Computer Vision Association, Tel-Aviv, Israel, (Oct. 2022), 171--188."},{"key":"e_1_3_2_1_2_1","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-01234-2_49"},{"key":"e_1_3_2_1_3_1","doi-asserted-by":"publisher","DOI":"10.1145\/3337821.3337902"},{"key":"e_1_3_2_1_4_1","unstructured":"Project Fiddle. 2022. Coordinated data loader. https:\/\/github.com\/msr-fiddle\/CoorDL. (2022).  Project Fiddle. 2022. Coordinated data loader. https:\/\/github.com\/msr-fiddle\/CoorDL. (2022)."},{"key":"e_1_3_2_1_5_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.350"},{"key":"e_1_3_2_1_6_1","unstructured":"NVIDIA. 2021. The NVIDIA data loading library (DALI). https:\/\/github.com\/NVIDIA\/DALI. (2021).  NVIDIA. 2021. The NVIDIA data loading library (DALI). https:\/\/github.com\/NVIDIA\/DALI. (2021)."},{"key":"e_1_3_2_1_7_1","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2006.884573"},{"key":"e_1_3_2_1_8_1","doi-asserted-by":"publisher","DOI":"10.1145\/3458817.3476181"},{"key":"e_1_3_2_1_9_1","doi-asserted-by":"publisher","DOI":"10.1002\/cpe.4283"},{"key":"e_1_3_2_1_10_1","unstructured":"Kaiming He Xiangyu Zhang Shaoqing Ren and Jian Sun. 2015. Deep residual learning for image recognition. arXiv preprint arXiv:1512.03385. (2015).  Kaiming He Xiangyu Zhang Shaoqing Ren and Jian Sun. 2015. Deep residual learning for image recognition. arXiv preprint arXiv:1512.03385. (2015)."},{"key":"e_1_3_2_1_11_1","unstructured":"Andrew Howard et al. 2019. Searching for mobilenetv3. arXiv preprint arXiv:1905.02244. (2019).  Andrew Howard et al. 2019. Searching for mobilenetv3. arXiv preprint arXiv:1905.02244. (2019)."},{"key":"e_1_3_2_1_12_1","doi-asserted-by":"publisher","DOI":"10.1109\/EDSSC.2008.4760652"},{"key":"e_1_3_2_1_13_1","unstructured":"Forrest N. Iandola Song Han Matthew W. Moskewicz Khalid Ashraf William J. Dally and Kurt Keutzer. 2016. Squeezenet: alexnet-level accuracy with 50x fewer parameters and &lt;0.5mb model size. arXiv preprint arXiv:1602.07360. (2016).  Forrest N. Iandola Song Han Matthew W. Moskewicz Khalid Ashraf William J. Dally and Kurt Keutzer. 2016. Squeezenet: alexnet-level accuracy with 50x fewer parameters and &lt;0.5mb model size. arXiv preprint arXiv:1602.07360. (2016)."},{"key":"e_1_3_2_1_14_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00982"},{"key":"e_1_3_2_1_15_1","doi-asserted-by":"publisher","DOI":"10.1109\/CCECE.2019.8861851"},{"key":"e_1_3_2_1_16_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICIIP.2013.6707617"},{"key":"e_1_3_2_1_17_1","doi-asserted-by":"publisher","DOI":"10.14778\/3446095.3446100"},{"key":"e_1_3_2_1_18_1","doi-asserted-by":"publisher","DOI":"10.14778\/3476311.3476374"},{"key":"e_1_3_2_1_19_1","doi-asserted-by":"publisher","DOI":"10.1109\/MICRO50266.2020.00072"},{"key":"e_1_3_2_1_20_1","unstructured":"PyTorch. 2021. PyTorch. https:\/\/pytorch.org. (2021).  PyTorch. 2021. PyTorch. https:\/\/pytorch.org. (2021)."},{"key":"e_1_3_2_1_21_1","unstructured":"Apache Mesos. 2022. RecordIO Data Format. https:\/\/mesos.apache.org\/documentation\/latest\/recordio. (2022).  Apache Mesos. 2022. RecordIO Data Format. https:\/\/mesos.apache.org\/documentation\/latest\/recordio. (2022)."},{"key":"e_1_3_2_1_22_1","doi-asserted-by":"publisher","DOI":"10.1007\/s11263-015-0816-y"},{"key":"e_1_3_2_1_23_1","doi-asserted-by":"publisher","DOI":"10.1109\/Cluster48925.2021.00062"},{"key":"e_1_3_2_1_24_1","volume-title":"Procceedings of the 2016 45th International Conference on Parallel Processing. IEEE","author":"Sitaridi Evangelia","year":"2016","unstructured":"Evangelia Sitaridi , Rene Mueller , Tim Kaldewey , Guy Lohman , and Kenneth A. Ross . 2016. Massively-parallel lossless data decompression . In Procceedings of the 2016 45th International Conference on Parallel Processing. IEEE , Philadelphia, PA, USA, 242--247. doi: 10.1109\/ICPP. 2016 .35. 10.1109\/ICPP.2016.35 Evangelia Sitaridi, Rene Mueller, Tim Kaldewey, Guy Lohman, and Kenneth A. Ross. 2016. Massively-parallel lossless data decompression. In Procceedings of the 2016 45th International Conference on Parallel Processing. IEEE, Philadelphia, PA, USA, 242--247. doi: 10.1109\/ICPP.2016.35."},{"key":"e_1_3_2_1_25_1","unstructured":"TensorFlow. 2022. TFRecord and tf.train.Example. https:\/\/www.tensorflow.org\/tutorials\/load_data\/tfrecord. (2022).  TensorFlow. 2022. TFRecord and tf.train.Example. https:\/\/www.tensorflow.org\/tutorials\/load_data\/tfrecord. (2022)."},{"volume-title":"Proceedings of the ACM International Conference on Parallel Architectures and Compilation Techniques. Association for Computing Machinery","author":"Jiannan","key":"e_1_3_2_1_26_1","unstructured":"Jiannan Tian et al. 2020. Cusz: an efficient gpu-based error-bounded lossy compression framework for scientific data . In Proceedings of the ACM International Conference on Parallel Architectures and Compilation Techniques. Association for Computing Machinery , New York, NY, USA, 3--15. isbn: 9781450380751. Jiannan Tian et al. 2020. Cusz: an efficient gpu-based error-bounded lossy compression framework for scientific data. In Proceedings of the ACM International Conference on Parallel Architectures and Compilation Techniques. Association for Computing Machinery, New York, NY, USA, 3--15. isbn: 9781450380751."},{"key":"e_1_3_2_1_27_1","doi-asserted-by":"publisher","DOI":"10.1145\/3404397.3404472"},{"key":"e_1_3_2_1_28_1","doi-asserted-by":"publisher","DOI":"10.1145\/3225058.3225076"},{"key":"e_1_3_2_1_29_1","unstructured":"Ultralytics. 2021. Yolov5. https:\/\/github.com\/ultralytics\/yolov5\/. (2021).  Ultralytics. 2021. Yolov5. https:\/\/github.com\/ultralytics\/yolov5\/. (2021)."},{"key":"e_1_3_2_1_30_1","volume-title":"Proceedings of the 2017 IEEE Conference on Computer Vision and Pattern Recognition. IEEE","author":"Zhang S.","year":"2017","unstructured":"S. Zhang , R. Benenson , and B. Schiele . 2017. Citypersons: a diverse dataset for pedestrian detection . In Proceedings of the 2017 IEEE Conference on Computer Vision and Pattern Recognition. IEEE , Los Alamitos, CA, USA , ( July 2017 ), 4457--4465. S. Zhang, R. Benenson, and B. Schiele. 2017. Citypersons: a diverse dataset for pedestrian detection. In Proceedings of the 2017 IEEE Conference on Computer Vision and Pattern Recognition. IEEE, Los Alamitos, CA, USA, (July 2017), 4457--4465."},{"key":"e_1_3_2_1_31_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00716"}],"event":{"name":"APSys '23: 14th ACM SIGOPS Asia-Pacific Workshop on Systems","sponsor":["SIGOPS ACM Special Interest Group on Operating Systems"],"location":"Seoul Republic of Korea","acronym":"APSys '23"},"container-title":["Proceedings of the 14th ACM SIGOPS Asia-Pacific Workshop on Systems"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3609510.3609811","content-type":"unspecified","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3609510.3609811","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,6,17]],"date-time":"2025-06-17T16:46:25Z","timestamp":1750178785000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3609510.3609811"}},"subtitle":["Mix-and-Match Multiple Image Formats to Balance DNN Training Pipeline"],"short-title":[],"issued":{"date-parts":[[2023,8,24]]},"references-count":31,"alternative-id":["10.1145\/3609510.3609811","10.1145\/3609510"],"URL":"https:\/\/doi.org\/10.1145\/3609510.3609811","relation":{},"subject":[],"published":{"date-parts":[[2023,8,24]]},"assertion":[{"value":"2023-08-24","order":2,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}