{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,3,27]],"date-time":"2025-03-27T11:05:28Z","timestamp":1743073528868,"version":"3.40.3"},"publisher-location":"Cham","reference-count":23,"publisher":"Springer Nature Switzerland","isbn-type":[{"type":"print","value":"9783031697654"},{"type":"electronic","value":"9783031697661"}],"license":[{"start":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T00:00:00Z","timestamp":1704067200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T00:00:00Z","timestamp":1704067200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2024]]},"DOI":"10.1007\/978-3-031-69766-1_8","type":"book-chapter","created":{"date-parts":[[2024,8,25]],"date-time":"2024-08-25T19:02:05Z","timestamp":1724612525000},"page":"107-120","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["Watt: A Write-Optimized RRAM-Based Accelerator for\u00a0Attention"],"prefix":"10.1007","author":[{"given":"Xuan","family":"Zhang","sequence":"first","affiliation":[]},{"given":"Zhuoran","family":"Song","sequence":"additional","affiliation":[]},{"given":"Xing","family":"Li","sequence":"additional","affiliation":[]},{"given":"Zhezhi","family":"He","sequence":"additional","affiliation":[]},{"given":"Naifeng","family":"Jing","sequence":"additional","affiliation":[]},{"given":"Li","family":"Jiang","sequence":"additional","affiliation":[]},{"given":"Xiaoyao","family":"Liang","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,8,26]]},"reference":[{"key":"8_CR1","unstructured":"https:\/\/www.synopsys.com\/community\/university-program\/teaching-resources.html"},{"key":"8_CR2","doi-asserted-by":"crossref","unstructured":"Arnab, A., et al.: ViViT: a video vision transformer. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 6836\u20136846 (2021)","DOI":"10.1109\/ICCV48922.2021.00676"},{"issue":"2","key":"8_CR3","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/3085572","volume":"14","author":"R Balasubramonian","year":"2017","unstructured":"Balasubramonian, R., et al.: Cacti 7: New tools for interconnect exploration in innovative off-chip memories. ACM Trans. Architect. Code Optim. (TACO) 14(2), 1\u201325 (2017)","journal-title":"ACM Trans. Architect. Code Optim. (TACO)"},{"issue":"12","key":"8_CR4","first-page":"3067","volume":"37","author":"PY Chen","year":"2018","unstructured":"Chen, P.Y., et al.: Neurosim: a circuit-level macro model for benchmarking neuro-inspired architectures in online learning. IEEE TCAD 37(12), 3067\u20133080 (2018)","journal-title":"IEEE TCAD"},{"key":"8_CR5","unstructured":"Devlin, J., et al.: Bert: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805 (2018)"},{"key":"8_CR6","doi-asserted-by":"crossref","unstructured":"Dong, X.y., et al.: NVSim: a circuit-level performance, energy, and area model for emerging nonvolatile memory. TCAD 31(7), 994\u20131007 (2012)","DOI":"10.1109\/TCAD.2012.2185930"},{"key":"8_CR7","doi-asserted-by":"crossref","unstructured":"Ham, T.J., et al.: $$A^3$$: Accelerating attention mechanisms in neural networks with approximation. In: 2020 HPCA. pp. 328\u2013341. IEEE (2020)","DOI":"10.1109\/HPCA47549.2020.00035"},{"issue":"1","key":"8_CR8","doi-asserted-by":"publisher","first-page":"87","DOI":"10.1109\/TPAMI.2022.3152247","volume":"45","author":"K Han","year":"2022","unstructured":"Han, K., et al.: A survey on vision transformer. IEEE Trans. Pattern Anal. Mach. Intell. 45(1), 87\u2013110 (2022)","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"8_CR9","doi-asserted-by":"crossref","unstructured":"Liu, F., et al.: Spark: scalable and precision-aware acceleration of neural networks via efficient encoding. In: 2024 IEEE International Symposium on High-Performance Computer Architecture (HPCA), pp. 1029\u20131042 (2024)","DOI":"10.1109\/HPCA57654.2024.00082"},{"key":"8_CR10","doi-asserted-by":"crossref","unstructured":"Lu, L., et al.: Sanger: a co-design framework for enabling sparse attention using reconfigurable architecture. In: MICRO-54: 54th Annual IEEE\/ACM International Symposium on Microarchitecture, pp. 977\u2013991 (2021)","DOI":"10.1145\/3466752.3480125"},{"key":"8_CR11","doi-asserted-by":"crossref","unstructured":"Niu, D., et al.: Design of cross-point metal-oxide ReRAM emphasizing reliability and cost. In: 2013 IEEE\/ACM International Conference on Computer-Aided Design (ICCAD), pp. 17\u201323. IEEE (2013)","DOI":"10.1109\/ICCAD.2013.6691092"},{"issue":"2","key":"8_CR12","doi-asserted-by":"publisher","first-page":"604","DOI":"10.1109\/TNNLS.2020.2979670","volume":"32","author":"DW Otter","year":"2020","unstructured":"Otter, D.W., et al.: A survey of the usages of deep learning for natural language processing. IEEE trans. Neural Netw. Learning Syst. 32(2), 604\u2013624 (2020)","journal-title":"IEEE trans. Neural Netw. Learning Syst."},{"issue":"8","key":"8_CR13","first-page":"9","volume":"1","author":"A Radford","year":"2019","unstructured":"Radford, A., et al.: Language models are unsupervised multitask learners. OpenAI blog 1(8), 9 (2019)","journal-title":"OpenAI blog"},{"key":"8_CR14","doi-asserted-by":"crossref","unstructured":"Rajpurkar, P., Zhang, J., Lopyrev, K., Liang, P.: Squad: 100,000+ questions for machine comprehension of text. arXiv preprint arXiv:1606.05250 (2016)","DOI":"10.18653\/v1\/D16-1264"},{"issue":"3","key":"8_CR15","doi-asserted-by":"publisher","first-page":"14","DOI":"10.1145\/3007787.3001139","volume":"44","author":"A Shafiee","year":"2016","unstructured":"Shafiee, A., et al.: ISAAC: a convolutional neural network accelerator with in-situ analog arithmetic in crossbars. ACM SIGARCH Comput. Architect. News 44(3), 14\u201326 (2016)","journal-title":"ACM SIGARCH Comput. Architect. News"},{"key":"8_CR16","doi-asserted-by":"crossref","unstructured":"Song, L.H., et al.: PipeLayer: a pipelined ReRAM-based accelerator for deep learning. In: 2017 HPCA, pp. 541\u2013552. IEEE (2017)","DOI":"10.1109\/HPCA.2017.55"},{"key":"8_CR17","doi-asserted-by":"crossref","unstructured":"Wang, H., et al.: SpAtten: efficient sparse attention architecture with cascade token and head pruning. In: 2021 HPCA, pp. 97\u2013110. IEEE (2021)","DOI":"10.1109\/HPCA51647.2021.00018"},{"key":"8_CR18","doi-asserted-by":"crossref","unstructured":"Wen, W., et al.: Renew: enhancing lifetime for ReRAM crossbar based neural network accelerators. In: 2019 IEEE 37th International Conference on Computer Design (ICCD), pp. 487\u2013496. IEEE (2019)","DOI":"10.1109\/ICCD46524.2019.00074"},{"key":"8_CR19","doi-asserted-by":"crossref","unstructured":"Yang, X., et al.: ReTransformer: ReRAM-based processing-in-memory architecture for transformer acceleration. In: Proceedings of the 39th International Conference on Computer-Aided Design, pp. 1\u20139 (2020)","DOI":"10.1145\/3400302.3415640"},{"key":"8_CR20","doi-asserted-by":"crossref","unstructured":"You, H., et al.: ViTCoD: vision transformer acceleration via dedicated algorithm and accelerator co-design. In: 2023 HPCA, pp. 273\u2013286. IEEE (2023)","DOI":"10.1109\/HPCA56546.2023.10071027"},{"key":"8_CR21","doi-asserted-by":"crossref","unstructured":"Zhang, X., et al.: HyAcc: a hybrid CAM-MAC RRAM-based accelerator for recommendation model. In: 2023 ICCD, pp. 375\u2013382. IEEE (2023)","DOI":"10.1109\/ICCD58817.2023.00063"},{"key":"8_CR22","doi-asserted-by":"crossref","unstructured":"Zhou, M., et al.: TransPIM: a memory-based acceleration via software-hardware co-design for transformer. In: 2022 HPCA, pp. 1071\u20131085. IEEE (2022)","DOI":"10.1109\/HPCA53966.2022.00082"},{"key":"8_CR23","doi-asserted-by":"crossref","unstructured":"Zokaee, F., et al.: Mitigating voltage drop in resistive memories by dynamic reset voltage regulation and partition reset. In: 2020 HPCA, pp. 275\u2013286. IEEE (2020)","DOI":"10.1109\/HPCA47549.2020.00031"}],"container-title":["Lecture Notes in Computer Science","Euro-Par 2024: Parallel Processing"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-69766-1_8","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,8,25]],"date-time":"2024-08-25T19:09:32Z","timestamp":1724612972000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-69766-1_8"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024]]},"ISBN":["9783031697654","9783031697661"],"references-count":23,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-69766-1_8","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"type":"print","value":"0302-9743"},{"type":"electronic","value":"1611-3349"}],"subject":[],"published":{"date-parts":[[2024]]},"assertion":[{"value":"26 August 2024","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"Euro-Par","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"European Conference on Parallel Processing","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Madrid","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Spain","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2024","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"26 August 2024","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"30 August 2024","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"30","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"europar2024","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/2024.euro-par.org\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}