{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,7]],"date-time":"2026-03-07T18:39:58Z","timestamp":1772908798858,"version":"3.50.1"},"publisher-location":"Cham","reference-count":61,"publisher":"Springer International Publishing","isbn-type":[{"value":"9783031195679","type":"print"},{"value":"9783031195686","type":"electronic"}],"license":[{"start":{"date-parts":[[2023,10,1]],"date-time":"2023-10-01T00:00:00Z","timestamp":1696118400000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2023,10,1]],"date-time":"2023-10-01T00:00:00Z","timestamp":1696118400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2024]]},"DOI":"10.1007\/978-3-031-19568-6_5","type":"book-chapter","created":{"date-parts":[[2023,9,30]],"date-time":"2023-09-30T09:01:55Z","timestamp":1696064515000},"page":"121-172","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":7,"title":["Efficient Hardware Acceleration of Emerging Neural Networks for Embedded Machine Learning: An Industry Perspective"],"prefix":"10.1007","author":[{"given":"Arnab","family":"Raha","sequence":"first","affiliation":[]},{"given":"Raymond","family":"Sung","sequence":"additional","affiliation":[]},{"given":"Soumendu","family":"Ghosh","sequence":"additional","affiliation":[]},{"given":"Praveen Kumar","family":"Gupta","sequence":"additional","affiliation":[]},{"given":"Deepak A.","family":"Mathaikutty","sequence":"additional","affiliation":[]},{"given":"Umer I.","family":"Cheema","sequence":"additional","affiliation":[]},{"given":"Kevin","family":"Hyland","sequence":"additional","affiliation":[]},{"given":"Cormac","family":"Brick","sequence":"additional","affiliation":[]},{"given":"Vijay","family":"Raghunathan","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2023,10,1]]},"reference":[{"key":"5_CR1","doi-asserted-by":"crossref","unstructured":"Raha, A., Kim, S.K., Mathaikutty, D.A., Venkataramanan, G., Mohapatra, D., Sung, R., Brick, C., Chinya, G.N.: Design considerations for edge neural network accelerators: An industry perspective. In: 34th International Conference on VLSI Design and 20th International Conference on Embedded Systems, pp. 328\u2013333 (2021)","DOI":"10.1109\/VLSID51830.2021.00061"},{"key":"5_CR2","doi-asserted-by":"crossref","unstructured":"Raha, A., Ghosh, S., Mohapatra, D., Mathaikutty, D.A., Sung, R., Brick, C., Raghunathan, V.: Special session: Approximate TinyML systems: Full system approximations for extreme energy-efficiency in intelligent edge devices. In: IEEE 39th International Conference on Computer Design (ICCD), pp. 13\u201316 (2021)","DOI":"10.1109\/ICCD53106.2021.00015"},{"key":"5_CR3","doi-asserted-by":"publisher","first-page":"2295","DOI":"10.1109\/JPROC.2017.2761740","volume":"105","author":"V Sze","year":"2017","unstructured":"Sze, V., Chen, Y.H., Yang, T.-J., Emer, J.S.: Efficient processing of deep neural networks: A tutorial and survey. Proc. IEEE 105, 2295\u20132329 (2017)","journal-title":"Proc. IEEE"},{"key":"5_CR4","doi-asserted-by":"publisher","first-page":"20","DOI":"10.1109\/MM.2020.2985963","volume":"40","author":"H Kwon","year":"2020","unstructured":"Kwon, H., Chatarasi, P., Sarkar, V., Krishna, T., Pellauer, M., Parashar, A.: Maestro: A data-centric approach to understand reuse, performance, and hardware cost of DNN mappings. IEEE Micro 40, 20\u201329 (2020)","journal-title":"IEEE Micro"},{"key":"5_CR5","doi-asserted-by":"publisher","first-page":"56","DOI":"10.1109\/MM.2021.3058217","volume":"41","author":"T Norrie","year":"2021","unstructured":"Norrie, T., Patil, N., Yoon, D.H., Kurian, G., Li, S., Laudon, J., Young, C., Jouppi, N.P., Patterson, D.A.: The design process for Google\u2019s training chips: Tpuv2 and tpuv3. IEEE Micro 41, 56\u201363 (2021)","journal-title":"IEEE Micro"},{"key":"5_CR6","doi-asserted-by":"crossref","unstructured":"Jang, J.-W., Lee, S., Kim, D., Park, H., Ardestani, A.S., Choi, Y., Kim, C., Kim, Y., Yu, H., et al.: Sparsity-aware and re-configurable NPU architecture for Samsung flagship mobile soc. In: ACM\/IEEE 48th Annual International Symposium on Computer Architecture (ISCA), pp. 15\u201328, (2021)","DOI":"10.1109\/ISCA52012.2021.00011"},{"issue":"6","key":"5_CR7","doi-asserted-by":"publisher","first-page":"84","DOI":"10.1145\/3065386","volume":"60","author":"A Krizhevsky","year":"2017","unstructured":"Krizhevsky, A., Sutskever, I., Hinton, G.E.: ImageNet classification with deep convolutional neural networks. Commun. ACM 60, 6, 84\u201390 (2017). https:\/\/doi.org\/10.1145\/3065386","journal-title":"Commun. ACM"},{"key":"5_CR8","unstructured":"Zhao, Y., Wang, G., Tang, C., Luo, C., Zeng, W., Zha, Z.-J.: A battle of network structures: An empirical study of CNN, transformer, and MLP (2021). arXiv"},{"key":"5_CR9","unstructured":"Meta AI. The latest in machine learning \u2014 papers with code. https:\/\/paperswithcode.com\/"},{"key":"5_CR10","unstructured":"Goodfellow, I., Pouget-Abadie, J., Mirza, M., Xu, B., Warde-Farley, D., Ozair, S., Courville, A., Bengio, Y.: Generative adversarial nets. In: Advances in Neural Information Processing Systems, vol. 27 (2014)"},{"key":"5_CR11","unstructured":"Vaswani, A., Shazeer, N.M., Parmar, N., Uszkoreit, J., Jones, L., Gomez, A.N., Kaiser, L., Polosukhin, I.: Attention is all you need (2017). arXiv"},{"key":"5_CR12","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/3505244","volume":"54","author":"S Khan","year":"2021","unstructured":"Khan, S., Naseer, M., Hayat, M., Zamir, S.W., Khan, F.S., Shah, M.: Transformers in vision: A survey. ACM Comput. Surv. 54, 1\u201341 (2021)","journal-title":"ACM Comput. Surv."},{"key":"5_CR13","unstructured":"Dosovitskiy, A., Beyer, L., Kolesnikov, A., Weissenborn, D., Zhai, X., Unterthiner, T., Dehghani, M., Minderer, M., Heigold, G., Gelly, S., Uszkoreit, J., Houlsby, N.: An image is worth 16x16 words: Transformers for image recognition at scale (2021). arXiv"},{"key":"5_CR14","doi-asserted-by":"crossref","unstructured":"Lin, T., Wang, Y., Liu, X., Qiu, X.: A survey of transformers (2021). arXiv","DOI":"10.1016\/j.aiopen.2022.10.001"},{"key":"5_CR15","first-page":"24261","volume":"34","author":"IO Tolstikhin","year":"2021","unstructured":"Tolstikhin, I.O., Houlsby, N., Kolesnikov, A., Beyer, L., Zhai, X., Unterthiner, T., Yung, J., Steiner, A., Keysers, D., Uszkoreit, J., et al.: MLP-mixer: An all-MLP architecture for vision. Adv. Neural Inf. Process. Syst. 34, 24261\u201324272 (2021)","journal-title":"Adv. Neural Inf. Process. Syst."},{"issue":"1","key":"5_CR16","doi-asserted-by":"publisher","first-page":"141","DOI":"10.3390\/electronics11010141","volume":"11","author":"H Ko","year":"2022","unstructured":"Ko, H., Lee, S., Park, Y., Choi, A.: A survey of recommendation systems: Recommendation models, techniques, and application fields. Electronics 11(1), 141 (2022)","journal-title":"Electronics"},{"key":"5_CR17","unstructured":"Wu, S., Sun, F., Zhang, W., Cui, B.: Graph neural networks in recommender systems: A survey (2020). arXiv"},{"issue":"1","key":"5_CR18","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/3158369","volume":"52","author":"S Zhang","year":"2019","unstructured":"Zhang, S., Yao, L., Sun, A., Tay, Y.: Deep learning based recommender system: A survey and new perspectives. ACM Comput. Surv. 52(1), 1\u201338 (2019)","journal-title":"ACM Comput. Surv."},{"key":"5_CR19","doi-asserted-by":"crossref","unstructured":"Dong, G., Tang, M., Wang, Z., Gao, J., Guo, S., Cai, L., Gutierrez, R., Campbell, B., Barnes, L.E., Boukhechba, M.L: Graph neural networks in IoT: A survey. ACM Trans. Sensor Netw. (2022). http:\/\/nvdla.org\/hw\/v1\/ias\/lut-programming.html","DOI":"10.1145\/3565973"},{"issue":"9","key":"5_CR20","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/3477141","volume":"54","author":"S Abadal","year":"2021","unstructured":"Abadal, S., Jain, A., Guirado, R., L\u00f3pez-Alonso, J., Alarc\u00f3n, E.: Computing graph neural networks: A survey from algorithms to accelerators. ACM Comput. Surv. 54(9), 1\u201338 (2021)","journal-title":"ACM Comput. Surv."},{"key":"5_CR21","doi-asserted-by":"publisher","first-page":"57","DOI":"10.1016\/j.aiopen.2021.01.001","volume":"1","author":"J Zhou","year":"2020","unstructured":"Zhou, J., Cui, G., Hu, S., Zhang, Z., Yang, C., Liu, Z., Wang, L., Li, C., Sun, M.: Graph neural networks: A review of methods and applications. AI Open 1, 57\u201381 (2020)","journal-title":"AI Open"},{"key":"5_CR22","unstructured":"NVDLA Open Source Project - LUT programming"},{"issue":"1","key":"5_CR23","doi-asserted-by":"publisher","first-page":"127","DOI":"10.1109\/JSSC.2016.2616357","volume":"52","author":"Y-H Chen","year":"2017","unstructured":"Chen, Y.-H., Krishna, T., Emer, J.S., Sze, V.: Eyeriss: An energy-efficient reconfigurable accelerator for deep convolutional neural networks. IEEE J. Solid-State Circuits 52(1), 127\u2013138 (2017)","journal-title":"IEEE J. Solid-State Circuits"},{"issue":"2","key":"5_CR24","doi-asserted-by":"publisher","first-page":"292","DOI":"10.1109\/JETCAS.2019.2910232","volume":"9","author":"Y-H Chen","year":"2019","unstructured":"Chen, Y.-H., Yang, T.J., Emer, J., Sze, V.: Eyeriss v2: A flexible accelerator for emerging deep neural networks on mobile devices. IEEE J. Emerg. Sel. Topics Circuits Syst. 9(2), 292\u2013308 (2019)","journal-title":"IEEE J. Emerg. Sel. Topics Circuits Syst."},{"key":"5_CR25","doi-asserted-by":"crossref","unstructured":"Lin, C.-H., Cheng, C.-C., Tsai, Y.-M., Hung, S.-J., Kuo, Y.-T., Wang, P.H., Tsung, P.-K., Hsu, J.-Y., Lai, W.-C., et al.: 7.1 a 3.4-to-13.3tops\/w 3.6tops dual-core deep-learning accelerator for versatile AI applications in 7nm 5g smartphone soc. In: IEEE International Solid-State Circuits Conference-(ISSCC), pp. 134\u2013136 (2020)","DOI":"10.1109\/ISSCC19947.2020.9063111"},{"issue":"2","key":"5_CR26","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/3140659.3080246","volume":"45","author":"NP Jouppi","year":"2017","unstructured":"Jouppi, N.P., Young, C., Patil, N., Patterson, D., Agrawal, G., Bajwa, R., Bates, S., Bhatia, S., Boden, N., Borchers, A., et al.: In-datacenter performance analysis of a tensor processing unit. SIGARCH Comput. Archit. News 45(2), 1\u201312 (2017)","journal-title":"SIGARCH Comput. Archit. News"},{"key":"5_CR27","doi-asserted-by":"crossref","unstructured":"Qin, E., Samajdar, A., Kwon, H., Nadella, V., Srinivasan, S.M., Das, D., Kaul, B., Krishna, T.: Sigma: A sparse and irregular GEMM accelerator with flexible interconnects for DNN training. In: IEEE International Symposium on High Performance Computer Architecture (HPCA), pp. 58\u201370 (2020)","DOI":"10.1109\/HPCA47549.2020.00015"},{"key":"5_CR28","unstructured":"NVIDIA. Nvidia ampere architecture (2022). https:\/\/www.nvidia.com\/en-us\/data-center\/ampere-architecture\/"},{"key":"5_CR29","doi-asserted-by":"crossref","unstructured":"Parashar, A., Rhu, M., Mukkara, A., Puglielli, A., Venkatesan, R., Khailany, B., Emer, J., Keckler, S.W., Dally, W.J.: Scnn: An accelerator for compressed-sparse convolutional neural networks. In: Proceedings of the 44th Annual International Symposium on Computer Architecture, pp. 27\u201340 (2017)","DOI":"10.1145\/3079856.3080254"},{"key":"5_CR30","doi-asserted-by":"crossref","unstructured":"Rhu, M., O\u2019Connor, M., Chatterjee, N., Pool, J., Kwon, Y., Keckler, S.W.: Compressing DMA engine: Leveraging activation sparsity for training deep neural networks. In: IEEE International Symposium on High Performance Computer Architecture (HPCA), pp. 78\u201391 (2018)","DOI":"10.1109\/HPCA.2018.00017"},{"key":"5_CR31","unstructured":"Intel\u24c7 Movidius\u2122 Myriad\u2122 X Vision Processing Unit (VPU). https:\/\/www.intel.com\/content\/www\/us\/en\/products\/details\/processors\/movidius-vpu\/movidius-myriad-x.html"},{"key":"5_CR32","first-page":"2198","volume":"2","author":"B Lee","year":"2003","unstructured":"Lee, B., Burgess, N.: Some results on Taylor-series function approximation on FPGA. In: The Thirty-Seventh Asilomar Conference on Signals, Systems Computers, vol. 2, pp. 2198\u20132202 (2003)","journal-title":"In: The Thirty-Seventh Asilomar Conference on Signals, Systems Computers"},{"key":"5_CR33","doi-asserted-by":"crossref","unstructured":"Lin, C.-W., Wang, J.-S.: A digital circuit design of hyperbolic tangent sigmoid function for neural networks. In: 2008 IEEE International Symposium on Circuits and Systems (ISCAS), pp. 856\u2013859 (2008)","DOI":"10.1109\/ISCAS.2008.4541553"},{"key":"5_CR34","first-page":"1070","volume":"1","author":"K Leboeuf","year":"2008","unstructured":"Leboeuf, K., Namin, A.H., Muscedere, R., Wu, H., Ahmadi, M.: High speed VLSI implementation of the hyperbolic tangent sigmoid function. In: Third International Conference on Convergence and Hybrid Information Technology, vol. 1, pp. 1070\u20131073 (2008)","journal-title":"In: Third International Conference on Convergence and Hybrid Information Technology"},{"issue":"1","key":"5_CR35","doi-asserted-by":"publisher","first-page":"39","DOI":"10.1109\/TVLSI.2012.2232321","volume":"22","author":"B Zamanlooy","year":"2014","unstructured":"Zamanlooy, B., Mirhassani, M.: Efficient VLSI implementation of neural networks with hyperbolic tangent activation function. IEEE Trans. Very Large Scale Integr. Syst. 22(1), 39\u201348 (2014)","journal-title":"IEEE Trans. Very Large Scale Integr. Syst."},{"key":"5_CR36","doi-asserted-by":"crossref","unstructured":"Ioannou, Y.A., Robertson, D.P., Cipolla, R., Criminisi, A.: Deep roots: Improving CNN efficiency with hierarchical filter groups. In: 2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pp. 5977\u20135986 (2017)","DOI":"10.1109\/CVPR.2017.633"},{"key":"5_CR37","unstructured":"Sun, K., Li, M., Liu, D., Wang, J.: Igcv3: Interleaved low-rank group convolutions for efficient deep neural networks. In: BMVC (2018)"},{"key":"5_CR38","unstructured":"Dumoulin, V., Visin, F.: A guide to convolution arithmetic for deep learning (2016). arXiv"},{"key":"5_CR39","unstructured":"Devlin, J., Chang, M.-W., Lee, K., Toutanova, K.: Bert: Pre-training of deep bidirectional transformers for language understanding. In: NAACL (2019)"},{"key":"5_CR40","first-page":"9","volume":"1","author":"A Radford","year":"2019","unstructured":"Radford, A., Wu, J., Child, R., Luan, D., Amodei, D., Sutskever, I.: Language models are unsupervised multitask learners. OpenAI Blog 1, 9 (2019)","journal-title":"OpenAI Blog"},{"key":"5_CR41","unstructured":"Brown, T.B., Mann, B., Ryder, N., Subbiah, M., Kaplan, J., Dhariwal, P., Neelakantan, A., Shyam, P., et al.: Language models are few-shot learners (2020). arXiv"},{"key":"5_CR42","unstructured":"Raffel, C., Shazeer, N., Roberts, A., Lee, K., Narang, S., Matena, M., Zhou, Y., Li, W., Liu, P.J.: Exploring the limits of transfer learning with a unified text-to-text transformer (2019). arXiv"},{"key":"5_CR43","doi-asserted-by":"crossref","unstructured":"Liu, Z., Lin, Y., Cao, Y., Hu, H., Wei, Y., Zhang, Z., Lin, S., Guo, B.: Swin transformer: Hierarchical vision transformer using shifted windows. In: IEEE\/CVF International Conference on Computer Vision (ICCV), pp. 9992\u201310002 (2021)","DOI":"10.1109\/ICCV48922.2021.00986"},{"key":"5_CR44","doi-asserted-by":"crossref","unstructured":"Wang, H., Zhang, Z., Han, S.: SpAtten: Efficient sparse attention architecture with cascade token and head pruning. In: IEEE International Symposium on High-Performance Computer Architecture (HPCA), pp. 97\u2013110 (2021)","DOI":"10.1109\/HPCA51647.2021.00018"},{"key":"5_CR45","doi-asserted-by":"publisher","unstructured":"He, K., Zhang, X., Ren, S., Sun, J.: \u201cDeep Residual Learning for Image Recognition,\u201d 2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pp. 770\u2013778 (2016). https:\/\/doi.org\/10.1109\/CVPR.2016.90","DOI":"10.1109\/CVPR.2016.90"},{"key":"5_CR46","unstructured":"Hamilton, W.L., Ying, R., Leskovec, J.: Inductive representation learning on large graphs (2017). arXiv"},{"key":"5_CR47","unstructured":"Veli\u010dkovi\u0107, P., Cucurull, G., Casanova, A., Romero, A., Li\u00f2, P., Bengio, Y.: Graph attention networks (2018). arXiv"},{"key":"5_CR48","doi-asserted-by":"crossref","unstructured":"Yan, M., Deng, L., Hu, X., Liang, L., Feng, Y., Ye, X., Zhang, Z., Fan, D., Xie, Y.: HyGCN: A GCN accelerator with hybrid architecture (2020). arXiv","DOI":"10.1109\/HPCA47549.2020.00012"},{"key":"5_CR49","doi-asserted-by":"crossref","unstructured":"Stevens, J.R., Das, D., Avancha, S., Kaul, B., Raghunathan, A.: GNNerator: A hardware\/software framework for accelerating graph neural networks (2021). arXiv","DOI":"10.1109\/DAC18074.2021.9586122"},{"key":"5_CR50","doi-asserted-by":"crossref","unstructured":"Liu, Z., Mao, H., Wu, C.Y., Feichtenhofer, C., Darrell, T., Xie, S.: A convnet for the 2020s (2022). arXiv","DOI":"10.1109\/CVPR52688.2022.01167"},{"key":"5_CR51","unstructured":"Susskind, Z., Arden, B., John, L.K., Stockton, P., John, E.B.: Neuro-symbolic AI: An emerging class of AI workloads and their characterization (2021). arXiv"},{"issue":"2","key":"5_CR52","doi-asserted-by":"publisher","first-page":"869","DOI":"10.1109\/COMST.2020.2970550","volume":"22","author":"X Wang","year":"2020","unstructured":"Wang, X., Han, Y., Leung, V.C., Niyato, D., Yan, X., Chen, X.: Convergence of edge computing and deep learning: A comprehensive survey. IEEE Commun. Surv. Tutor. 22(2), 869\u2013904 (2020)","journal-title":"IEEE Commun. Surv. Tutor."},{"issue":"5s","key":"5_CR53","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/3126531","volume":"16","author":"A Raha","year":"2017","unstructured":"Raha, A., Raghunathan, V.: qLUT: Input-aware quantized table lookup for energy-efficient approximate accelerators. ACM Trans. Embed. Comput. Syst. 16(5s), 1\u201323 (2017)","journal-title":"ACM Trans. Embed. Comput. Syst."},{"key":"5_CR54","unstructured":"Salvator, D., Wu, H., Kulkarni, M., Emmart, N.: Nvidia technical blog: Int4 precision for AI inference (2019). https:\/\/www.nvidia.com\/en-us\/data-center\/ampere-architecture\/"},{"key":"5_CR55","unstructured":"Choi, J., Venkataramani, S.: Highly accurate deep learning inference with 2-bit precision (2019). https:\/\/www.ibm.com\/blogs\/research\/2019\/04\/2-bit-precision\/"},{"key":"5_CR56","doi-asserted-by":"crossref","unstructured":"Ghosh, S.K., Raha, A., Raghunathan, V.: Approximate inference systems (axis) end-to-end approximations for energy-efficient inference at the edge. In: Proceedings of the ACM\/IEEE International Symposium on Low Power Electronics and Design, pp. 7\u201312 (2020)","DOI":"10.1145\/3370748.3406575"},{"key":"5_CR57","doi-asserted-by":"crossref","unstructured":"Bavikadi, S., Sutradhar, P.R., Khasawneh, K.N., Ganguly, A., Dinakarrao, S.M.P.: A review of in-memory computing architectures for machine learning applications. In: Proceedings of the Great Lakes Symposium on VLSI, pp. 89\u201394 (2020)","DOI":"10.1145\/3386263.3407649"},{"issue":"3","key":"5_CR58","doi-asserted-by":"publisher","first-page":"31","DOI":"10.1109\/MCAS.2021.3092533","volume":"21","author":"S Yu","year":"2021","unstructured":"Yu, S., Jiang, H., Huang, S., Peng, X., Lu, A.: Compute-in-memory chips for deep learning: recent trends and prospects. IEEE Circuits Syst. Mag. 21(3), 31\u201356 (2021)","journal-title":"IEEE Circuits Syst. Mag."},{"issue":"10","key":"5_CR59","first-page":"1415","volume":"65","author":"L Bai","year":"2018","unstructured":"Bai, L., Zhao, Y., Huang, X.: A CNN accelerator on FPGA using depthwise separable convolution. IEEE Trans. Circuits Syst. II: Express Briefs 65(10), 1415\u20131419 (2018)","journal-title":"IEEE Trans. Circuits Syst. II: Express Briefs"},{"key":"5_CR60","doi-asserted-by":"crossref","unstructured":"Lu, S., Wang, M., Liang, S., Lin, J., Wang, Z.: Hardware accelerator for multi-head attention and position-wise feed-forward in the transformer. In: IEEE 33rd International System-on-Chip Conference (SOCC), pp. 84\u201389. IEEE (2020)","DOI":"10.1109\/SOCC49529.2020.9524802"},{"key":"5_CR61","unstructured":"Kiningham, K., Re, C., Levis, P.: Grip: A graph neural network accelerator architecture (2020). arXiv"}],"container-title":["Embedded Machine Learning for Cyber-Physical, IoT, and Edge Computing"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-19568-6_5","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,10,29]],"date-time":"2024-10-29T18:22:37Z","timestamp":1730226157000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-19568-6_5"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,10,1]]},"ISBN":["9783031195679","9783031195686"],"references-count":61,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-19568-6_5","relation":{},"subject":[],"published":{"date-parts":[[2023,10,1]]},"assertion":[{"value":"1 October 2023","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}}]}}