{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,3,26]],"date-time":"2025-03-26T10:21:50Z","timestamp":1742984510533,"version":"3.40.3"},"publisher-location":"Cham","reference-count":22,"publisher":"Springer International Publishing","isbn-type":[{"type":"print","value":"9783030959524"},{"type":"electronic","value":"9783030959531"}],"license":[{"start":{"date-parts":[[2022,1,1]],"date-time":"2022-01-01T00:00:00Z","timestamp":1640995200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"},{"start":{"date-parts":[[2022,1,1]],"date-time":"2022-01-01T00:00:00Z","timestamp":1640995200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2022]]},"DOI":"10.1007\/978-3-030-95953-1_9","type":"book-chapter","created":{"date-parts":[[2022,2,15]],"date-time":"2022-02-15T19:49:53Z","timestamp":1644954593000},"page":"119-133","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["Optimized Code Generation for\u00a0Deep Neural Networks"],"prefix":"10.1007","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-9751-7844","authenticated-orcid":false,"given":"Janaan","family":"Lake","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-5052-8183","authenticated-orcid":false,"given":"Tharindu R.","family":"Patabandi","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-3058-7573","authenticated-orcid":false,"given":"Mary","family":"Hall","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2022,2,16]]},"reference":[{"unstructured":"Eigen (2019). http:\/\/eigen.tuxfamily.org\/","key":"9_CR1"},{"unstructured":"Intel math Kernal library for deep learning networks (2019). https:\/\/software.intel.com\/en-us\/articles\/intel-mkl-dnn-part-1-library-overview-and-installation","key":"9_CR2"},{"unstructured":"NVIDIA cuDNN (2019). https:\/\/developer.nvidia.com\/cudnn","key":"9_CR3"},{"unstructured":"Abadi, M., et al.: Tensorflow: large-scale machine learning on heterogeneous distributed systems. arXiv e-prints 1603.04467 (March 2016)","key":"9_CR4"},{"key":"9_CR5","volume-title":"Optimizing Compilers for Modern Architectures","author":"R Allen","year":"2002","unstructured":"Allen, R., Kennedy, K.: Optimizing Compilers for Modern Architectures. Academic Press, London (2002)"},{"unstructured":"Chen, T., et al.: MXNet: a flexible and efficient machine learning library for heterogeneous distributed systems. arXiv e-prints arXiv:1512.01274 (2015)","key":"9_CR6"},{"unstructured":"Chen, T., et al.: TVM: an automated end-to-end optimizing compiler for deep learning. arXiv e-prints arXiv:1802.04799 (2018)","key":"9_CR7"},{"unstructured":"Collobert, R., Kavukcuoglu, K., Farabet, C.: Torch7: a Matlab-like environment for machine learning. In: BigLearn, NIPS Workshop (2011)","key":"9_CR8"},{"doi-asserted-by":"crossref","unstructured":"He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In: 2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pp. 770\u2013778 (2016)","key":"9_CR9","DOI":"10.1109\/CVPR.2016.90"},{"unstructured":"Ioffe, S., Szegedy, C.: Batch normalization: accelerating deep network training by reducing internal covariate shift. CoRR abs\/1502.03167 (2015)","key":"9_CR10"},{"doi-asserted-by":"publisher","unstructured":"Jia, Y., et al.: Caffe: convolutional architecture for fast feature embedding. In: Proceedings of the 22nd ACM International Conference on Multimedia, MM \u201914, pp. 675\u2013678. Association for Computing Machinery, New York (2014). https:\/\/doi.org\/10.1145\/2647868.2654889","key":"9_CR11","DOI":"10.1145\/2647868.2654889"},{"unstructured":"Lake, J.: Optimized code generation for deep learning networks using LATTE and SWIRL (2020). Unpublished bachelor\u2019s thesis","key":"9_CR12"},{"doi-asserted-by":"publisher","unstructured":"Ragan-Kelley, J., Barnes, C., Adams, A., Paris, S., Durand, F., Amarasinghe, S.: Halide: a language and compiler for optimizing parallelism, locality, and recomputation in image processing pipelines. In: Proceedings of the 34th ACM SIGPLAN Conference on Programming Language Design and Implementation, PLDI \u201913, pp. 519\u2013530. Association for Computing Machinery, New York (2013). https:\/\/doi.org\/10.1145\/2491956.2462176","key":"9_CR13","DOI":"10.1145\/2491956.2462176"},{"unstructured":"Rotem, N., et al.: Glow: graph lowering compiler techniques for neural networks. arXiv e-prints arXiv:1805.00907 (2018)","key":"9_CR14"},{"unstructured":"Schilling, F.: The effect of Batch Normalization on deep convolutional neural networks (Dissertation) (2016). http:\/\/urn.kb.se\/resolve?urn=urn:nbn:se:kth:diva-191222","key":"9_CR15"},{"doi-asserted-by":"publisher","unstructured":"Seide, F., Agarwal, A.: CNTK: Microsoft\u2019s open-source deep-learning toolkit. In: Proceedings of the 22nd ACM SIGKDD International Conference on Knowledge Discovery and Data Mining, KDD \u201916, p. 2135. Association for Computing Machinery, New York (2016). https:\/\/doi.org\/10.1145\/2939672.2945397","key":"9_CR16","DOI":"10.1145\/2939672.2945397"},{"unstructured":"Simonyan, K., Zisserman, A.: Very deep convolutional networks for large-scale image recognition. CoRR abs\/1409.1556 (2015)","key":"9_CR17"},{"doi-asserted-by":"crossref","unstructured":"Szegedy, C., et al.: Going deeper with convolutions. In: 2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pp. 1\u20139 (2015)","key":"9_CR18","DOI":"10.1109\/CVPR.2015.7298594"},{"unstructured":"Theano Development Team: Theano: A Python framework for fast computation of mathematical expressions. arXiv e-prints abs\/1605.02688 (2016)","key":"9_CR19"},{"issue":"6","key":"9_CR20","doi-asserted-by":"publisher","first-page":"209","DOI":"10.1145\/2980983.2908105","volume":"51","author":"L Truong","year":"2016","unstructured":"Truong, L., et al.: Latte: a language, compiler, and runtime for elegant and efficient deep neural networks. SIGPLAN Not. 51(6), 209\u2013223 (2016)","journal-title":"SIGPLAN Not."},{"key":"9_CR21","doi-asserted-by":"publisher","first-page":"1275","DOI":"10.1177\/1094342019866247","volume":"33","author":"A Venkat","year":"2019","unstructured":"Venkat, A., Rusira, T., Barik, R., Hall, M.W., Truong, L.: SWIRL: high-performance many-core CPU code generation for deep neural networks. Int. J. High Perform. Comput. Appl. 33, 1275\u20131289 (2019)","journal-title":"Int. J. High Perform. Comput. Appl."},{"doi-asserted-by":"publisher","unstructured":"Xing, Y., Weng, J., Wang, Y., Sui, L., Shan, Y., Wang, Y.: An in-depth comparison of compilers for deep neural networks on hardware. In: 2019 IEEE International Conference on Embedded Software and Systems (ICESS), pp. 1\u20138 (2019). https:\/\/doi.org\/10.1109\/ICESS.2019.8782480","key":"9_CR22","DOI":"10.1109\/ICESS.2019.8782480"}],"container-title":["Lecture Notes in Computer Science","Languages and Compilers for Parallel Computing"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-030-95953-1_9","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,2,15]],"date-time":"2022-02-15T19:55:21Z","timestamp":1644954921000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-030-95953-1_9"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022]]},"ISBN":["9783030959524","9783030959531"],"references-count":22,"URL":"https:\/\/doi.org\/10.1007\/978-3-030-95953-1_9","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"type":"print","value":"0302-9743"},{"type":"electronic","value":"1611-3349"}],"subject":[],"published":{"date-parts":[[2022]]},"assertion":[{"value":"16 February 2022","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"LCPC","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Workshop on Languages and Compilers for Parallel Computing","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2020","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"14 October 2020","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"16 October 2020","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"lcpc2020","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Single-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Easychair","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"19","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"15","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"0","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"79% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Yes","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}