{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,31]],"date-time":"2026-01-31T06:53:33Z","timestamp":1769842413338,"version":"3.49.0"},"reference-count":37,"publisher":"IEEE","license":[{"start":{"date-parts":[[2024,12,4]],"date-time":"2024-12-04T00:00:00Z","timestamp":1733270400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2024,12,4]],"date-time":"2024-12-04T00:00:00Z","timestamp":1733270400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2024,12,4]]},"DOI":"10.1109\/commnet63022.2024.10793270","type":"proceedings-article","created":{"date-parts":[[2024,12,17]],"date-time":"2024-12-17T19:09:17Z","timestamp":1734462557000},"page":"1-10","source":"Crossref","is-referenced-by-count":1,"title":["All You Should Know About Large Language Models (LLMs)"],"prefix":"10.1109","author":[{"given":"Abadila","family":"Alaktif","sequence":"first","affiliation":[{"name":"C3S, ENSEM, EST, Hassan II University,Casablanca,Morocco"}]},{"given":"Meriyem","family":"Chergui","sequence":"additional","affiliation":[{"name":"C3S, ENSEM, EST, Hassan II University,Casablanca,Morocco"}]},{"given":"Imane","family":"Daoudi","sequence":"additional","affiliation":[{"name":"C3S, ENSEM, EST, Hassan II University,Casablanca,Morocco"}]},{"given":"Abdelkrim","family":"Ammoumou","sequence":"additional","affiliation":[{"name":"C3S, ENSEM, EST, Hassan II University,Casablanca,Morocco"}]}],"member":"263","reference":[{"key":"ref1","article-title":"Attention Is All You Need","author":"Vaswani","year":"2017","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref2","article-title":"An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale","volume-title":"Proc. International Conference on Learning Representations (ICLR)","author":"Dosovitskiy"},{"key":"ref3","article-title":"Generating Long Sequences with Sparse Transformers","author":"Child","year":"2019","journal-title":"arXiv preprint arXiv:1904.10509"},{"key":"ref4","article-title":"Longformer: The Long-Document Transformer","author":"Beltagy","year":"2020","journal-title":"arXiv preprint arXiv:2004.05150"},{"key":"ref5","article-title":"FlashAttention: Fast and Memory-Efficient Exact Attention with IO-Awareness","author":"Dao","year":"2022","journal-title":"arXiv preprint arXiv:2205.14135"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v35i16.17664"},{"key":"ref7","article-title":"FMMformer: Efficient and Flexible Transformer via Decomposed Near-field and Far-field Attention","author":"Nguyen","year":"2021","journal-title":"arXiv preprint arXiv:2108.02347"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP49357.2023.10095821"},{"key":"ref9","article-title":"Linformer: Self-Attention with Linear Complexity","author":"Wang","year":"2020","journal-title":"arXiv preprint arXiv:2006.04768"},{"key":"ref10","article-title":"Rethinking Attention with Performers","author":"Choromanski","year":"2022","journal-title":"arXiv preprint arXiv:2009.14794"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1162\/tacl_a_00353"},{"key":"ref12","article-title":"Pointer Sentinel Mixture Models","author":"Merity","year":"2016"},{"key":"ref13","article-title":"Transformers: \u2018The End of History\u2019 for NLP?","author":"Chernyavskiy","year":"2021","journal-title":"arXiv preprint arXiv:2105.00813"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/P18-1031"},{"key":"ref15","article-title":"BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding","author":"Devlin","year":"2019","journal-title":"arXiv preprint arXiv:1810.04805"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-32381-3_16"},{"key":"ref17","article-title":"How transferable are features in deep neural networks?","author":"Yosinski","year":"2014","journal-title":"arXiv preprint arXiv:1411.1792"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.acl-main.740"},{"key":"ref19","article-title":"Efficiently Scaling Transformer Inference","author":"Pope","year":"2022","journal-title":"arXiv preprint arXiv:2211.05102"},{"key":"ref20","article-title":"Boosting Nystr\u00f6m Method","author":"Hamm","year":"2023","journal-title":"arXiv preprint arXiv:2302.11032"},{"key":"ref21","article-title":"Notes on the Fast Multipole Method","author":"Kajima","year":"2022","journal-title":"arXiv preprint arXiv:2212.13080"},{"key":"ref22","article-title":"LoRA: Low-Rank Adaptation of Large Language Models","author":"Hu","year":"2021","journal-title":"arXiv preprint arXiv:2106.09685"},{"key":"ref23","article-title":"QLoRA: Efficient Finetuning of Quantized LLMs","author":"Dettmers","year":"2023","journal-title":"arXiv preprint arXiv:2305.14314"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1145\/3604915.3608779"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00299"},{"key":"ref26","article-title":"Clinicallongformer and clinical-bigbird: Transformers for long clinical sequences","author":"Li","year":"2022","journal-title":"arXiv preprint arXiv:2201.11838"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1145\/3583780.3614812"},{"key":"ref28","article-title":"Flashattention-2: Faster attention with better parallelism and work partitioning","author":"Dao","year":"2023","journal-title":"arXiv preprint arXiv:2307.08691"},{"key":"ref29","article-title":"Flashattention-3: Fast and accurate attention with asynchrony and low-precision","author":"Shah","year":"2024","journal-title":"arXiv preprint arXiv:2407.08608"},{"key":"ref30","article-title":"Efficient transformers applied to video classification","author":"Mart\u00ednez P\u00e9rez","year":"2023"},{"key":"ref31","article-title":"Self-Selected Attention Span for Accelerating Large Language Model Inference","author":"Jin","year":"2024","journal-title":"arXiv preprint arXiv:2404.09336"},{"key":"ref32","article-title":"Detecting Fake Content with Relative Entropy Scoring","author":"Lavergne","year":"2008"},{"key":"ref33","article-title":"Revisiting Linformer with a modified self-attention with linear complexity","author":"Verma","year":"2020","journal-title":"arXiv preprint arXiv:2101.10277"},{"key":"ref34","article-title":"HyperAttention and Linformer-Based -catenin Sequence Prediction For Bone Formation","volume":"16","author":"Kumar Yadalam","year":"2024","journal-title":"Cureus"},{"key":"ref35","article-title":"Sub-Linear Memory: How to Make Performers SLiM","author":"Likhosherstov","year":"2020","journal-title":"arXiv preprint arXiv:2012.11346"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1016\/j.atech.2021.100017"},{"key":"ref37","article-title":"ConvNet-based performers attention and supervised contrastive learning for activity recognition","volume":"53","author":"Hamad","year":"2022","journal-title":"Applied Intelligence"}],"event":{"name":"2024 7th International Conference on Advanced Communication Technologies and Networking (CommNet)","location":"Rabat, Morocco","start":{"date-parts":[[2024,12,4]]},"end":{"date-parts":[[2024,12,6]]}},"container-title":["2024 7th International Conference on Advanced Communication Technologies and Networking (CommNet)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/10793106\/10793244\/10793270.pdf?arnumber=10793270","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,12,18]],"date-time":"2024-12-18T07:17:50Z","timestamp":1734506270000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10793270\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,12,4]]},"references-count":37,"URL":"https:\/\/doi.org\/10.1109\/commnet63022.2024.10793270","relation":{},"subject":[],"published":{"date-parts":[[2024,12,4]]}}}