{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,10]],"date-time":"2026-04-10T10:04:15Z","timestamp":1775815455107,"version":"3.50.1"},"publisher-location":"New York, NY, USA","reference-count":67,"publisher":"ACM","license":[{"start":{"date-parts":[[2022,6,10]],"date-time":"2022-06-10T00:00:00Z","timestamp":1654819200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.acm.org\/publications\/policies\/copyright_policy#Background"}],"funder":[{"name":"the Tencent Wechat Rhino-Bird Focused Research Program"},{"name":"Shanghai Municipal Science and Technology Major Project","award":["2021SHZDZX0102"],"award-info":[{"award-number":["2021SHZDZX0102"]}]},{"name":"the Hong Kong RGC GRF Project","award":["16202218"],"award-info":[{"award-number":["16202218"]}]},{"name":"China NSFC","award":["61729201"],"award-info":[{"award-number":["61729201"]}]},{"name":"HKUST Global Strategic Partnership Fund","award":["2021 SJTU-HKUST"],"award-info":[{"award-number":["2021 SJTU-HKUST"]}]},{"name":"SJTU Global Strategic Partnership Fund","award":["2021 SJTU-HKUST"],"award-info":[{"award-number":["2021 SJTU-HKUST"]}]},{"DOI":"10.13039\/501100012166","name":"National Key Research and Development Program of China","doi-asserted-by":"publisher","award":["2018AAA0101100"],"award-info":[{"award-number":["2018AAA0101100"]}],"id":[{"id":"10.13039\/501100012166","id-type":"DOI","asserted-by":"publisher"}]},{"name":"HKUST-Webank joint research lab"},{"name":"Microsoft Research Asia Collaborative Research Grant"},{"name":"Didi-HKUST joint research lab"},{"name":"the Hong Kong RGC CRF Project","award":["C6030-18G, C1031-18G, C5026-18G"],"award-info":[{"award-number":["C6030-18G, C1031-18G, C5026-18G"]}]},{"name":"Hong Kong ITC ITF grants","award":["ITS\/044\/18FX, ITS\/470\/18FX"],"award-info":[{"award-number":["ITS\/044\/18FX, ITS\/470\/18FX"]}]},{"name":"HKUST-NAVER\/LINE AI Lab"},{"name":"the Hong Kong RGC Theme-based Project TRS","award":["T41-603\/20R"],"award-info":[{"award-number":["T41-603\/20R"]}]},{"name":"the Hong Kong RGC RIF Project","award":["R6020-19"],"award-info":[{"award-number":["R6020-19"]}]},{"name":"Guangdong Basic and Applied Basic Research Foundation","award":["2019B151530001"],"award-info":[{"award-number":["2019B151530001"]}]},{"name":"the Hong Kong RGC AOE Project","award":["AoE\/E-603\/18"],"award-info":[{"award-number":["AoE\/E-603\/18"]}]}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2022,6,10]]},"DOI":"10.1145\/3514221.3517836","type":"proceedings-article","created":{"date-parts":[[2022,6,12]],"date-time":"2022-06-12T02:33:49Z","timestamp":1655001229000},"page":"1271-1285","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":18,"title":["Camel: Managing Data for Efficient Stream Learning"],"prefix":"10.1145","author":[{"given":"Yiming","family":"Li","sequence":"first","affiliation":[{"name":"Hong Kong University of Science and Technology, Hong Kong, China"}]},{"given":"Yanyan","family":"Shen","sequence":"additional","affiliation":[{"name":"Shanghai Jiao Tong University, Shanghai, China"}]},{"given":"Lei","family":"Chen","sequence":"additional","affiliation":[{"name":"Hong Kong University of Science and Technology, Hong Kong, China"}]}],"member":"320","published-online":{"date-parts":[[2022,6,11]]},"reference":[{"key":"e_1_3_2_2_1_1","volume-title":"Hazelwood","author":"Acun Bilge","year":"2021","unstructured":"Bilge Acun, Matthew Murphy, Xiaodong Wang, Jade Nie, Carole-Jean Wu, and Kim M. Hazelwood. 2021. Understanding Training Efficiency of Deep Learning Recommendation Models at Scale. In HPCA. 802--814."},{"key":"e_1_3_2_2_2_1","doi-asserted-by":"crossref","unstructured":"Rahaf Aljundi Eugene Belilovsky Tinne Tuytelaars Laurent Charlin Massimo Caccia Min Lin and Lucas Page-Caccia. 2019 a. Online Continual Learning with Maximal Interfered Retrieval. In NeurIPS. 11849--11860.","DOI":"10.1109\/CVPR.2019.01151"},{"key":"e_1_3_2_2_3_1","unstructured":"Rahaf Aljundi Min Lin Baptiste Goujaud and Yoshua Bengio. 2019 b. Gradient based sample selection for online continual learning. In NeurIPS. 11816--11825."},{"key":"e_1_3_2_2_4_1","volume-title":"The Effectiveness of Memory Replay in Large Scale Continual Learning. CoRR","author":"Balaji Yogesh","year":"2020","unstructured":"Yogesh Balaji, Mehrdad Farajtabar, Dong Yin, Alex Mott, and Ang Li. 2020. The Effectiveness of Memory Replay in Large Scale Continual Learning. CoRR, Vol. abs\/2010.02418 (2020)."},{"key":"e_1_3_2_2_5_1","volume-title":"Annual Conference on Neural Information Processing Systems","author":"Bartlett Peter L.","year":"2017","unstructured":"Peter L. Bartlett, Dylan J. Foster, and Matus Telgarsky. 2017. Spectrally-normalized margin bounds for neural networks. In Annual Conference on Neural Information Processing Systems 2017. 6240--6249."},{"key":"e_1_3_2_2_6_1","doi-asserted-by":"crossref","unstructured":"Yoshua Bengio J\u00e9r\u00f4me Louradour Ronan Collobert and Jason Weston. 2009. Curriculum learning. In ICML. 41--48.","DOI":"10.1145\/1553374.1553380"},{"key":"e_1_3_2_2_7_1","volume-title":"Semantic Redundancies in Image-Classification Datasets: The 10% You Don't Need. CoRR","author":"Birodkar Vighnesh","year":"2019","unstructured":"Vighnesh Birodkar, Hossein Mobahi, and Samy Bengio. 2019. Semantic Redundancies in Image-Classification Datasets: The 10% You Don't Need. CoRR, Vol. abs\/1901.11409 (2019)."},{"key":"e_1_3_2_2_8_1","unstructured":"Zal\u00e1 n Borsos Mojmir Mutny and Andreas Krause. 2020. Coresets via Bilevel Optimization for Continual Learning and Streaming. In NeurIPS."},{"key":"e_1_3_2_2_9_1","doi-asserted-by":"publisher","DOI":"10.1109\/TIT.2013.2272457"},{"key":"e_1_3_2_2_10_1","unstructured":"Pietro Buzzega Matteo Boschini Angelo Porrello Davide Abati and Simone Calderara. 2020. Dark Experience for General Continual Learning: a Strong Simple Baseline. In NeurIPS."},{"key":"e_1_3_2_2_11_1","unstructured":"Arslan Chaudhry Marc'Aurelio Ranzato Marcus Rohrbach and Mohamed Elhoseiny. 2019 a. Efficient Lifelong Learning with A-GEM. In ICLR."},{"key":"e_1_3_2_2_12_1","volume-title":"Philip H. S. Torr, and Marc'Aurelio Ranzato. 2019 b. Continual Learning with Tiny Episodic Memories. CoRR","author":"Chaudhry Arslan","year":"2019","unstructured":"Arslan Chaudhry, Marcus Rohrbach, Mohamed Elhoseiny, Thalaiyasingam Ajanthan, Puneet Kumar Dokania, Philip H. S. Torr, and Marc'Aurelio Ranzato. 2019 b. Continual Learning with Tiny Episodic Memories. CoRR, Vol. abs\/1902.10486 (2019)."},{"key":"e_1_3_2_2_13_1","volume-title":"Advances in Neural Information Processing Systems","author":"Courbariaux Matthieu","year":"2015","unstructured":"Matthieu Courbariaux, Yoshua Bengio, and Jean-Pierre David. 2015. BinaryConnect: Training Deep Neural Networks with binary weights during propagations. In Advances in Neural Information Processing Systems 2015. 3123--3131."},{"key":"e_1_3_2_2_14_1","article-title":"Importance Sampling for Minibatches","volume":"19","author":"Csiba Dominik","year":"2018","unstructured":"Dominik Csiba and Peter Richt\u00e1 rik. 2018. Importance Sampling for Minibatches. J. Mach. Learn. Res., Vol. 19 (2018), 27:1--27:21.","journal-title":"J. Mach. Learn. Res."},{"key":"e_1_3_2_2_15_1","doi-asserted-by":"publisher","DOI":"10.3390\/sym12101666"},{"key":"e_1_3_2_2_16_1","volume-title":"A continual learning survey: Defying forgetting in classification tasks. arXiv preprint arXiv:1909.08383","author":"Lange Matthias De","year":"2019","unstructured":"Matthias De Lange, Rahaf Aljundi, Marc Masana, Sarah Parisot, Xu Jia, Ales Leonardis, Gregory Slabaugh, and Tinne Tuytelaars. 2019. A continual learning survey: Defying forgetting in classification tasks. arXiv preprint arXiv:1909.08383 (2019)."},{"key":"e_1_3_2_2_17_1","doi-asserted-by":"publisher","DOI":"10.1109\/JPROC.2020.2976475"},{"key":"e_1_3_2_2_18_1","volume-title":"Facility location: applications and theory","author":"Drezner Zvi","unstructured":"Zvi Drezner and Horst W Hamacher. 2001. Facility location: applications and theory .Springer Science & Business Media."},{"key":"e_1_3_2_2_19_1","doi-asserted-by":"crossref","unstructured":"Dan Feldman and Michael Langberg. 2011. A unified framework for approximating and clustering data. In STOC.","DOI":"10.1145\/1993636.1993712"},{"key":"e_1_3_2_2_20_1","series-title":"SIAM J. Comput.","volume-title":"PCA, and Projective Clustering","author":"Feldman Dan","year":"2020","unstructured":"Dan Feldman, Melanie Schmidt, and Christian Sohler. 2020. Turning Big Data Into Tiny Data: Constant-Size Coresets for k-Means, PCA, and Projective Clustering. SIAM J. Comput. (2020)."},{"key":"e_1_3_2_2_21_1","volume-title":"Phillips","author":"Ghashami Mina","year":"2014","unstructured":"Mina Ghashami and Jeff M. Phillips. 2014. Relative Errors for Deterministic Low-Rank Matrix Approximations. In SODA. 707--717."},{"key":"e_1_3_2_2_22_1","article-title":"Revisiting the Nystrom Method for Improved Large-scale Machine Learning","volume":"17","author":"Gittens Alex","year":"2016","unstructured":"Alex Gittens and Michael W. Mahoney. 2016. Revisiting the Nystrom Method for Improved Large-scale Machine Learning. J. Mach. Learn. Res., Vol. 17 (2016), 117:1--117:65.","journal-title":"J. Mach. Learn. Res."},{"key":"e_1_3_2_2_23_1","unstructured":"Heitor Murilo Gomes Jesse Read and Albert Bifet. 2019. Streaming Random Patches for Evolving Data Stream Classification. In ICDM. 240--249."},{"key":"e_1_3_2_2_24_1","doi-asserted-by":"crossref","unstructured":"Michael Greenwald and Sanjeev Khanna. 2001. Space-Efficient Online Computation of Quantile Summaries. In SIGMOD.","DOI":"10.1145\/375663.375670"},{"key":"e_1_3_2_2_25_1","volume-title":"Deep compression: Compressing deep neural networks with pruning, trained quantization and huffman coding. arXiv preprint arXiv:1510.00149","author":"Han Song","year":"2015","unstructured":"Song Han, Huizi Mao, and William J Dally. 2015. Deep compression: Compressing deep neural networks with pruning, trained quantization and huffman coding. arXiv preprint arXiv:1510.00149 (2015)."},{"key":"e_1_3_2_2_26_1","doi-asserted-by":"crossref","unstructured":"Sariel Har-Peled and Soham Mazumdar. 2004. On coresets for k-means and k-median clustering. In STOC. 291--300.","DOI":"10.1145\/1007352.1007400"},{"key":"e_1_3_2_2_27_1","doi-asserted-by":"crossref","unstructured":"Tyler L. Hayes Nathan D. Cahill and Christopher Kanan. 2019. Memory Efficient Experience Replay for Streaming Learning. In ICRA. 9769--9776.","DOI":"10.1109\/ICRA.2019.8793982"},{"key":"e_1_3_2_2_28_1","unstructured":"Yanzhang He Tara N. Sainath and Rohit Prabhavalkar et al. 2019. Streaming End-to-end Speech Recognition for Mobile Devices. In ICASSP. 6381--6385."},{"key":"e_1_3_2_2_29_1","volume-title":"Online Learning: A Comprehensive Survey. CoRR","author":"Hoi Steven C. H.","year":"2018","unstructured":"Steven C. H. Hoi, Doyen Sahoo, Jing Lu, and Peilin Zhao. 2018. Online Learning: A Comprehensive Survey. CoRR, Vol. abs\/1802.02871 (2018)."},{"key":"e_1_3_2_2_30_1","volume-title":"MMM 2017, Reykjavik, Iceland, January 4--6, 2017, Proceedings, Part II (Lecture Notes in Computer Science","volume":"305","author":"Hu Jiagao","year":"2017","unstructured":"Jiagao Hu, Zhengxing Sun, Bo Li, Kewei Yang, and Dongyang Li. 2017. Online User Modeling for Interactive Streaming Image Classification. In MultiMedia Modeling - 23rd International Conference, MMM 2017, Reykjavik, Iceland, January 4--6, 2017, Proceedings, Part II (Lecture Notes in Computer Science, Vol. 10133). 293--305."},{"key":"e_1_3_2_2_31_1","unstructured":"Xinting Hu Kaihua Tang Chunyan Miao Xian-Sheng Hua and Hanwang Zhang. 2021. Distilling Causal Effect of Data in Class-Incremental Learning. In CVPR. 3957--3966."},{"key":"e_1_3_2_2_32_1","doi-asserted-by":"crossref","unstructured":"Yanxiang Huang Bin Cui Wenyu Zhang Jie Jiang and Ying Xu. 2015. TencentRec: Real-time Stream Recommendation in Practice. In SIGMOD. 227--238.","DOI":"10.1145\/2723372.2742785"},{"key":"e_1_3_2_2_33_1","doi-asserted-by":"publisher","DOI":"10.1561\/9781680833690"},{"key":"e_1_3_2_2_34_1","doi-asserted-by":"crossref","unstructured":"Jiawei Jiang Fangcheng Fu Tong Yang and Bin Cui. 2018. SketchML: Accelerating Distributed Machine Learning with Data Sketches. In SIGMOD.","DOI":"10.1145\/3183713.3196894"},{"key":"e_1_3_2_2_35_1","doi-asserted-by":"crossref","unstructured":"Ashish Kapoor Simon Baker Sumit Basu and Eric Horvitz. 2012. Memory constrained face recognition. In CVPR. 2539--2546.","DOI":"10.1109\/CVPR.2012.6247971"},{"key":"e_1_3_2_2_36_1","first-page":"2530","article-title":"Not All Samples Are Created Equal: Deep Learning with Importance Sampling","volume":"80","author":"Katharopoulos Angelos","year":"2018","unstructured":"Angelos Katharopoulos and Francc ois Fleuret. 2018. Not All Samples Are Created Equal: Deep Learning with Importance Sampling. In ICML, Vol. 80. 2530--2539.","journal-title":"ICML"},{"key":"e_1_3_2_2_37_1","unstructured":"Ronald Kemker and Christopher Kanan. 2018. FearNet: Brain-Inspired Model for Incremental Learning. In ICLR."},{"key":"e_1_3_2_2_38_1","doi-asserted-by":"publisher","DOI":"10.1109\/TIT.1982.1056489"},{"key":"e_1_3_2_2_39_1","volume-title":"1st Annual Conference on Robot Learning, CoRL","volume":"78","author":"Lomonaco Vincenzo","year":"2017","unstructured":"Vincenzo Lomonaco and Davide Maltoni. 2017. CORe50: a New Dataset and Benchmark for Continuous Object Recognition. In 1st Annual Conference on Robot Learning, CoRL, Vol. 78. 17--26."},{"key":"e_1_3_2_2_40_1","volume-title":"Gradient Episodic Memory for Continual Learning. In Advances in Neural Information Processing Systems 30: Annual Conference on Neural Information Processing Systems","author":"Lopez-Paz David","year":"2017","unstructured":"David Lopez-Paz and Marc'Aurelio Ranzato. 2017. Gradient Episodic Memory for Continual Learning. In Advances in Neural Information Processing Systems 30: Annual Conference on Neural Information Processing Systems 2017. 6467--6476."},{"key":"e_1_3_2_2_41_1","unstructured":"Mario Lucic Matthew Faulkner Andreas Krause and Dan Feldman. 2017. Training Gaussian Mixture Models at Scale via Coresets. J. Mach. Learn. Res. (2017)."},{"key":"e_1_3_2_2_42_1","unstructured":"Dionysis Manousakas Zuheng Xu Cecilia Mascolo and Trevor Campbell. 2020. Bayesian Pseudocoresets. In NeurIPS."},{"key":"e_1_3_2_2_43_1","doi-asserted-by":"publisher","DOI":"10.14778\/3352063.3352135"},{"key":"e_1_3_2_2_44_1","unstructured":"Avner May Jian Zhang Tri Dao and Christopher R\u00e9. 2019. On the Downstream Performance of Compressed Word Embeddings. In NeurIPS. 11782--11793."},{"key":"e_1_3_2_2_45_1","doi-asserted-by":"crossref","unstructured":"Fei Mi and Boi Faltings. 2020. Memory Augmented Neural Model for Incremental Session-based Recommendation. In IJCAI. 2169--2176.","DOI":"10.24963\/ijcai.2020\/300"},{"key":"e_1_3_2_2_46_1","doi-asserted-by":"crossref","unstructured":"Michel Minoux. 1978. Accelerated greedy algorithms for maximizing submodular set functions. In Optimization techniques. 234--243.","DOI":"10.1007\/BFb0006528"},{"key":"e_1_3_2_2_47_1","volume-title":"Jan Vondr\u00e1 k, and Andreas Krause","author":"Mirzasoleiman Baharan","year":"2015","unstructured":"Baharan Mirzasoleiman, Ashwinkumar Badanidiyuru, Amin Karbasi, Jan Vondr\u00e1 k, and Andreas Krause. 2015. Lazier Than Lazy Greedy. In AAAI. 1812--1818."},{"key":"e_1_3_2_2_48_1","unstructured":"Baharan Mirzasoleiman Jeff A. Bilmes and Jure Leskovec. 2020. Coresets for Data-efficient Training of Machine Learning Models. In ICML."},{"key":"e_1_3_2_2_49_1","volume-title":"Woodruff","author":"Munteanu Alexander","year":"2018","unstructured":"Alexander Munteanu, Chris Schwiegelshohn, Christian Sohler, and David P. Woodruff. 2018. On Coresets for Logistic Regression. In NeurIPS."},{"key":"e_1_3_2_2_50_1","volume-title":"Vista: Optimized System for Declarative Feature Transfer from Deep CNNs at Scale. In SIGMOD. 1685--1700.","author":"Nakandala Supun","year":"2020","unstructured":"Supun Nakandala and Arun Kumar. 2020. Vista: Optimized System for Declarative Feature Transfer from Deep CNNs at Scale. In SIGMOD. 1685--1700."},{"key":"e_1_3_2_2_51_1","doi-asserted-by":"publisher","DOI":"10.1007\/BF01588971"},{"key":"e_1_3_2_2_52_1","doi-asserted-by":"crossref","unstructured":"Yongjoo Park Jingyi Qing Xiaoyang Shen and Barzan Mozafari. 2019. BlinkML: Efficient Maximum Likelihood Estimation with Probabilistic Guarantees. In SIGMOD.","DOI":"10.1145\/3299869.3300077"},{"key":"e_1_3_2_2_53_1","volume-title":"Lampert","author":"Rebuffi Sylvestre-Alvise","year":"2017","unstructured":"Sylvestre-Alvise Rebuffi, Alexander Kolesnikov, Georg Sperl, and Christoph H. Lampert. 2017. iCaRL: Incremental Classifier and Representation Learning. In CVPR. 5533--5542."},{"key":"e_1_3_2_2_54_1","unstructured":"Matthew Riemer Ignacio Cases Robert Ajemian Miao Liu Irina Rish Yuhai Tu and Gerald Tesauro. 2019. Learning to Learn without Forgetting by Maximizing Transfer and Minimizing Interference. In ICLR."},{"key":"e_1_3_2_2_55_1","volume-title":"An overview of gradient descent optimization algorithms. arXiv preprint arXiv:1609.04747","author":"Ruder Sebastian","year":"2016","unstructured":"Sebastian Ruder. 2016. An overview of gradient descent optimization algorithms. arXiv preprint arXiv:1609.04747 (2016)."},{"key":"e_1_3_2_2_56_1","volume-title":"Hoi","author":"Sahoo Doyen","year":"2018","unstructured":"Doyen Sahoo, Quang Pham, Jing Lu, and Steven C. H. Hoi. 2018. Online Deep Learning: Learning Deep Neural Networks on the Fly. In IJCAI. 2660--2666."},{"key":"e_1_3_2_2_57_1","first-page":"4535","article-title":"Progress & Compress: A scalable framework for continual learning","volume":"80","author":"Schwarz Jonathan","year":"2018","unstructured":"Jonathan Schwarz, Wojciech Czarnecki, Jelena Luketina, Agnieszka Grabska-Barwinska, Yee Whye Teh, Razvan Pascanu, and Raia Hadsell. 2018. Progress & Compress: A scalable framework for continual learning. In ICML, Vol. 80. 4535--4544.","journal-title":"ICML"},{"key":"e_1_3_2_2_58_1","unstructured":"Ozan Sener and Silvio Savarese. 2018. Active Learning for Convolutional Neural Networks: A Core-Set Approach. In ICLR."},{"key":"e_1_3_2_2_59_1","volume-title":"Osman Hasan, and Muhammad Shafique.","author":"Shaheen Khadija","year":"2021","unstructured":"Khadija Shaheen, Muhammad Abdullah Hanif, Osman Hasan, and Muhammad Shafique. 2021. Continual Learning for Real-World Autonomous Systems: Algorithms, Challenges and Frameworks. CoRR, Vol. abs\/2105.12374 (2021)."},{"key":"e_1_3_2_2_60_1","unstructured":"Shai Shalev-Shwartz and Yoram Singer. 2007. Online learning: Theory algorithms and applications. (2007)."},{"key":"e_1_3_2_2_61_1","volume-title":"Federated Multi-Task Learning. CoRR","author":"Smith Virginia","year":"2017","unstructured":"Virginia Smith, Chao-Kai Chiang, Maziar Sanjabi, and Ameet Talwalkar. 2017. Federated Multi-Task Learning. CoRR, Vol. abs\/1705.10467 (2017)."},{"key":"e_1_3_2_2_62_1","first-page":"1","article-title":"A New Approximation Guarantee for Monotone Submodular Function Maximization via Discrete Convexity","volume":"107","author":"Soma Tasuku","year":"2018","unstructured":"Tasuku Soma and Yuichi Yoshida. 2018. A New Approximation Guarantee for Monotone Submodular Function Maximization via Discrete Convexity. In ICALP, Vol. 107. 99:1--99:14.","journal-title":"ICALP"},{"key":"e_1_3_2_2_63_1","unstructured":"Kai Sheng Tai Vatsal Sharan Peter Bailis and Gregory Valiant. 2018. Sketching Linear Classifiers over Data Streams. In SIGMOD."},{"key":"e_1_3_2_2_64_1","doi-asserted-by":"crossref","unstructured":"Balajee Vamanan Gwendolyn Voskuilen and T. N. Vijaykumar. 2010. EffiCuts: optimizing packet classification for memory and throughput. In SIGCOMM. 207--218.","DOI":"10.1145\/1851275.1851208"},{"key":"e_1_3_2_2_65_1","volume-title":"A Practical Incremental Method to Train Deep CTR Models. CoRR","author":"Wang Yichao","year":"2020","unstructured":"Yichao Wang, Huifeng Guo, Ruiming Tang, Zhirong Liu, and Xiuqiang He. 2020. A Practical Incremental Method to Train Deep CTR Models. CoRR, Vol. abs\/2009.02147 (2020)."},{"key":"e_1_3_2_2_66_1","first-page":"4035","article-title":"ZipML: Training Linear Models with End-to-End Low Precision, and a Little Bit of Deep Learning","volume":"70","author":"Zhang Hantian","year":"2017","unstructured":"Hantian Zhang, Jerry Li, Kaan Kara, Dan Alistarh, Ji Liu, and Ce Zhang. 2017. ZipML: Training Linear Models with End-to-End Low Precision, and a Little Bit of Deep Learning. In ICML, Vol. 70. 4035--4043.","journal-title":"ICML"},{"key":"e_1_3_2_2_67_1","first-page":"1","article-title":"Stochastic Optimization with Importance Sampling for Regularized Loss Minimization","volume":"37","author":"Zhao Peilin","year":"2015","unstructured":"Peilin Zhao and Tong Zhang. 2015. Stochastic Optimization with Importance Sampling for Regularized Loss Minimization. In ICML, Vol. 37. 1--9.","journal-title":"ICML"}],"event":{"name":"SIGMOD\/PODS '22: International Conference on Management of Data","location":"Philadelphia PA USA","acronym":"SIGMOD\/PODS '22","sponsor":["SIGMOD ACM Special Interest Group on Management of Data"]},"container-title":["Proceedings of the 2022 International Conference on Management of Data"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3514221.3517836","content-type":"unspecified","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3514221.3517836","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,6,17]],"date-time":"2025-06-17T19:30:35Z","timestamp":1750188635000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3514221.3517836"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022,6,10]]},"references-count":67,"alternative-id":["10.1145\/3514221.3517836","10.1145\/3514221"],"URL":"https:\/\/doi.org\/10.1145\/3514221.3517836","relation":{},"subject":[],"published":{"date-parts":[[2022,6,10]]},"assertion":[{"value":"2022-06-11","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}