{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,11,6]],"date-time":"2025-11-06T12:33:20Z","timestamp":1762432400715,"version":"3.40.3"},"publisher-location":"Cham","reference-count":73,"publisher":"Springer Nature Switzerland","isbn-type":[{"type":"print","value":"9783031407246"},{"type":"electronic","value":"9783031407253"}],"license":[{"start":{"date-parts":[[2023,1,1]],"date-time":"2023-01-01T00:00:00Z","timestamp":1672531200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2023,1,1]],"date-time":"2023-01-01T00:00:00Z","timestamp":1672531200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2023]]},"DOI":"10.1007\/978-3-031-40725-3_20","type":"book-chapter","created":{"date-parts":[[2023,8,28]],"date-time":"2023-08-28T23:02:46Z","timestamp":1693263766000},"page":"231-243","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":1,"title":["Analysis of\u00a0Transformer Model Applications"],"prefix":"10.1007","author":[{"ORCID":"https:\/\/orcid.org\/0009-0001-4749-0882","authenticated-orcid":false,"given":"M. I.","family":"Cabrera-Bermejo","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-7891-3059","authenticated-orcid":false,"given":"M. J.","family":"Del Jesus","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-1062-3127","authenticated-orcid":false,"given":"A. J.","family":"Rivera","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-7398-5870","authenticated-orcid":false,"given":"D.","family":"Elizondo","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-3083-8942","authenticated-orcid":false,"given":"F.","family":"Charte","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-6670-564X","authenticated-orcid":false,"given":"M. D.","family":"P\u00e9rez-Godoy","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2023,8,29]]},"reference":[{"doi-asserted-by":"crossref","unstructured":"Ahmad, W.U., et al.: Gate: graph attention transformer encoder for cross-lingual relation and event extraction. In: Proceedings of the AAAI, vol. 35, no. 14, pp. 12462\u201312470 (2021)","key":"20_CR1","DOI":"10.1609\/aaai.v35i14.17478"},{"doi-asserted-by":"crossref","unstructured":"Alamri, F., et al.: Transformer-encoder detector module: Using context to improve robustness to adversarial attacks on object detection. In: Proceedings of the ICPR, pp. 9577\u20139584 (2021)","key":"20_CR2","DOI":"10.1109\/ICPR48806.2021.9413344"},{"doi-asserted-by":"crossref","unstructured":"Arnab, A., et al.: ViViT: a video vision transformer. In: Proceedings of the ICCV, pp. 6836\u20136846 (2021)","key":"20_CR3","DOI":"10.1109\/ICCV48922.2021.00676"},{"doi-asserted-by":"crossref","unstructured":"Bapna, A., Firat, O.: Simple, scalable adaptation for neural machine translation. In: Proceedings of the EMNLP IJCNLP, pp. 1538\u20131548 (2019)","key":"20_CR4","DOI":"10.18653\/v1\/N19-1191"},{"doi-asserted-by":"publisher","unstructured":"Cao, H., et al.: Swin-Unet: unet-like pure transformer for medical image segmentation. In: Karlinsky, L., Michaeli, T., Nishino, K. (eds.) Computer Vision \u2013 ECCV 2022 Workshops. ECCV 2022. LNCS, vol. 13803, pp. 205\u2013218. Springer, Cham (2023). https:\/\/doi.org\/10.1007\/978-3-031-25066-8_9","key":"20_CR5","DOI":"10.1007\/978-3-031-25066-8_9"},{"key":"20_CR6","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"213","DOI":"10.1007\/978-3-030-58452-8_13","volume-title":"Computer Vision \u2013 ECCV 2020","author":"N Carion","year":"2020","unstructured":"Carion, N., Massa, F., Synnaeve, G., Usunier, N., Kirillov, A., Zagoruyko, S.: End-to-end object detection with transformers. In: Vedaldi, A., Bischof, H., Brox, T., Frahm, J.-M. (eds.) ECCV 2020. LNCS, vol. 12346, pp. 213\u2013229. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-58452-8_13"},{"doi-asserted-by":"crossref","unstructured":"Chen, B., et al.: Path-augmented graph transformer network. arXiv:1905.12712 (2019)","key":"20_CR7","DOI":"10.26434\/chemrxiv.8214422"},{"key":"20_CR8","doi-asserted-by":"publisher","first-page":"445","DOI":"10.1016\/j.inffus.2022.10.030","volume":"91","author":"J Chen","year":"2023","unstructured":"Chen, J., et al.: Shape-former: bridging CNN and transformer via ShapeConv for multimodal image matching. Inf. Fusion 91, 445\u2013457 (2023)","journal-title":"Inf. Fusion"},{"doi-asserted-by":"crossref","unstructured":"Chen, W., et al.: Key-sparse transformer for multimodal speech emotion recognition. In: Proceedings of the ICASSP, pp. 6897\u20136901 (2022)","key":"20_CR9","DOI":"10.1109\/ICASSP43922.2022.9746598"},{"doi-asserted-by":"crossref","unstructured":"Chen, X., et al.: Developing real-time streaming transformer transducer for speech recognition on large-scale dataset. In: Proceedings of the IEEE ICASSP, pp. 5904\u20135908 (2021)","key":"20_CR10","DOI":"10.1109\/ICASSP39728.2021.9413535"},{"doi-asserted-by":"crossref","unstructured":"Cornia, M., et al.: Meshed-memory transformer for image captioning. In: Proceedings of the CVPR, pp. 10575\u201310584 (2020)","key":"20_CR11","DOI":"10.1109\/CVPR42600.2020.01059"},{"unstructured":"Da, J., et al.: Edited media understanding frames: reasoning about the intent and implications of visual misinformation. In: Proceedings of the ACL IJCNLP, pp. 2026\u20132039 (2020)","key":"20_CR12"},{"doi-asserted-by":"crossref","unstructured":"Dang, F., et al.: DPT-FSNet: dual-path transformer based full-band and sub-band fusion network for speech enhancement. In: Proceedings of the ICASSP, pp. 6857\u20136861 (2022)","key":"20_CR13","DOI":"10.1109\/ICASSP43922.2022.9746171"},{"unstructured":"Devlin, J., et al.: BERT: pre-training of deep bidirectional transformers for language understanding. In: Proceedings of the NAACL, pp. 4171\u20134186 (2019)","key":"20_CR14"},{"doi-asserted-by":"crossref","unstructured":"Dong, L., et al.: Speech-transformer: a no-recurrence sequence-to-sequence model for speech recognition. In: Proceedings of the IEEE ICASSP, pp. 5884\u20135888 (2018)","key":"20_CR15","DOI":"10.1109\/ICASSP.2018.8462506"},{"unstructured":"Dosovitskiy, A., et al.: An image is worth 16 $$\\times $$ 16 words: transformers for image recognition at scale. arXiv:2010.11929 (2020)","key":"20_CR16"},{"doi-asserted-by":"crossref","unstructured":"Gao, D., et al.: FashionBERT: text and image matching with adaptive loss for cross-modal retrieval. In: Proceedings of the ACM SIGIR, pp. 2251\u20132260 (2020)","key":"20_CR17","DOI":"10.1145\/3397271.3401430"},{"unstructured":"Gu, J., et al.: Non-autoregressive neural machine translation. In: Proceedings of the ICLR (2018)","key":"20_CR18"},{"doi-asserted-by":"crossref","unstructured":"Gui, L., et al.: KAT: a knowledge augmented transformer for vision-and-language. In: Proceedings of the NAACL, pp. 956\u2013968 (2022)","key":"20_CR19","DOI":"10.18653\/v1\/2022.naacl-main.70"},{"doi-asserted-by":"crossref","unstructured":"Gulati, A., et al.: Conformer: convolution-augmented transformer for speech recognition. In: Proceedings of the Interspeech, pp. 5036\u20135040 (2020)","key":"20_CR20","DOI":"10.21437\/Interspeech.2020-3015"},{"doi-asserted-by":"crossref","unstructured":"Han, C., et al.: Learning shared semantic space for speech-to-text translation. In: Proceedings of the ACL IJCNLP, pp. 2214\u20132225 (2021)","key":"20_CR21","DOI":"10.18653\/v1\/2021.findings-acl.195"},{"doi-asserted-by":"crossref","unstructured":"Hasan, M.K., et al.: Humor knowledge enriched transformer for understanding multimodal humor. In: Proceedings of the AAAI, vol. 14B, pp. 12972\u201312980 (2021)","key":"20_CR22","DOI":"10.1609\/aaai.v35i14.17534"},{"doi-asserted-by":"crossref","unstructured":"Hatamizadeh, A., et al.: UNETR: transformers for 3D medical image segmentation. In: Proceedings of the IEEE\/CVF WACV, pp. 1748\u20131758 (2022)","key":"20_CR23","DOI":"10.1109\/WACV51458.2022.00181"},{"doi-asserted-by":"crossref","unstructured":"He, Z., et al.: ActionBert: leveraging user actions for semantic understanding of user interfaces. In: Proceedings of the AAAI, vol. 7, pp. 5931\u20135938 (2021)","key":"20_CR24","DOI":"10.1609\/aaai.v35i7.16741"},{"unstructured":"Hollmann, N., et al.: TabPFN: a transformer that solves small tabular classification problems in a second. arXiv:2207.01848 (2022)","key":"20_CR25"},{"doi-asserted-by":"crossref","unstructured":"Hu, R., et al.: Iterative answer prediction with pointer-augmented multimodal transformers for Text-VQA. In: Proceedings of the CVPR, pp. 9989\u20139999 (2020)","key":"20_CR26","DOI":"10.1109\/CVPR42600.2020.01001"},{"unstructured":"Huang, C.Z.A., et al.: Music transformer. arXiv:1809.04281 (2018)","key":"20_CR27"},{"unstructured":"Huang, G., et al.: Multimodal pretraining for dense video captioning. In: Proceedings of the AACL, pp. 470\u2013490 (2020)","key":"20_CR28"},{"doi-asserted-by":"crossref","unstructured":"Ihm, H.R., et al.: Reformer-TTS: neural speech synthesis with reformer network. In: Proceedings of the Interspeech, pp. 2012\u20132016 (2020)","key":"20_CR29","DOI":"10.21437\/Interspeech.2020-2189"},{"doi-asserted-by":"crossref","unstructured":"Iqbal, A., Sharif, M.: BTS-ST: Swin transformer network for segmentation and classification of multimodality breast cancer images. KBS 267, 110393 (2023)","key":"20_CR30","DOI":"10.1016\/j.knosys.2023.110393"},{"doi-asserted-by":"crossref","unstructured":"Ji, J., et al.: Improving image captioning by leveraging intra- and inter-layer global representation in transformer network. In: Proceedings of the AAAI, vol. 35, no. 2, pp. 1655\u20131663 (2021)","key":"20_CR31","DOI":"10.1609\/aaai.v35i2.16258"},{"doi-asserted-by":"crossref","unstructured":"Jiang, W., et al.: Low complexity speech enhancement network based on frame-level swin transformer. Electronics 12(6) (2023)","key":"20_CR32","DOI":"10.3390\/electronics12061330"},{"unstructured":"Jiang, Y., et al.: TransGAN: two pure transformers can make one strong GAN, and that can scale up. In: Proceedings of the NIPS, vol. 34, pp. 14745\u201314758 (2021)","key":"20_CR33"},{"doi-asserted-by":"crossref","unstructured":"Kim, J., et al.: T-GSA: transformer with gaussian-weighted self-attention for speech enhancement. In: Proceedings of the IEEE ICASSP, pp. 6649\u20136653 (2020)","key":"20_CR34","DOI":"10.1109\/ICASSP40776.2020.9053591"},{"unstructured":"Lan, Z., et al.: Albert: a lite bert for self-supervised learning of language representations. In: Proceedings of the ICLR, pp. 344\u2013350 (2020)","key":"20_CR35"},{"doi-asserted-by":"crossref","unstructured":"Li, B., et al.: Two-stream convolution augmented transformer for human activity recognition. In: Proceedings of the AAAI, vol. 35, no. 1, pp. 286\u2013293 (2021)","key":"20_CR36","DOI":"10.1609\/aaai.v35i1.16103"},{"unstructured":"Li, L.H., et al.: VisualBERT: a simple and performant baseline for vision and language. arXiv:1908.03557 (2019)","key":"20_CR37"},{"doi-asserted-by":"crossref","unstructured":"Li, N., et al.: Neural speech synthesis with transformer network. In: Proceedings of the AAAI, vol. 33, pp. 6706\u20136713 (2019)","key":"20_CR38","DOI":"10.1609\/aaai.v33i01.33016706"},{"doi-asserted-by":"crossref","unstructured":"Li, W., et al.: UNIMO: towards unified-modal understanding and generation via cross-modal contrastive learning. In: Proceedings of the ACL IJCNLP, pp. 2592\u20132607 (2021)","key":"20_CR39","DOI":"10.18653\/v1\/2021.acl-long.202"},{"key":"20_CR40","first-page":"1293","volume":"325","author":"Y Li","year":"2020","unstructured":"Li, Y., Moura, J.M.F.: Forecaster: a graph transformer for forecasting spatial and time-dependent data. Front. Artif. Intell. Appl. 325, 1293\u20131300 (2020)","journal-title":"Front. Artif. Intell. Appl."},{"doi-asserted-by":"crossref","unstructured":"Li, Y., et al.: An efficient transformer decoder with compressed sub-layers. In: Proceedings of the AAAI, vol. 35, no. 15, pp. 13315\u201313323 (2021)","key":"20_CR41","DOI":"10.1609\/aaai.v35i15.17572"},{"key":"20_CR42","doi-asserted-by":"publisher","first-page":"111","DOI":"10.1016\/j.aiopen.2022.10.001","volume":"3","author":"T Lin","year":"2022","unstructured":"Lin, T., et al.: A survey of transformers. AI Open 3, 111\u2013132 (2022)","journal-title":"AI Open"},{"doi-asserted-by":"crossref","unstructured":"Liu, Z., et al.: Swin transformer: hierarchical vision transformer using shifted windows. In: Proceedings of the ICCV, pp. 10012\u201310022 (2021)","key":"20_CR43","DOI":"10.1109\/ICCV48922.2021.00986"},{"doi-asserted-by":"crossref","unstructured":"Luo, Y., et al.: Dual-level collaborative transformer for image captioning. In: Proceedings of the AAAI, vol. 35, no. 3, pp. 2286\u20132293 (2021)","key":"20_CR44","DOI":"10.1609\/aaai.v35i3.16328"},{"unstructured":"Maziarka, L., et al.: Molecule attention transformer. arXiv:2002.08264 (2020)","key":"20_CR45"},{"doi-asserted-by":"crossref","unstructured":"Michalopoulos, G., et al.: UmlsBERT: clinical domain knowledge augmentation of contextual embeddings using the unified medical language system metathesaurus. In: Proceedings of the NAACL, pp. 1744\u20131753 (2021)","key":"20_CR46","DOI":"10.18653\/v1\/2021.naacl-main.139"},{"unstructured":"Mohamed, A., et al.: Transformers with convolutional context for asr. arXiv:1904.11660 (2019)","key":"20_CR47"},{"unstructured":"Parmar, N., et al.: Image transformer. In: Proceedings of the ICML, vol. 80, pp. 4055\u20134064 (2018)","key":"20_CR48"},{"doi-asserted-by":"crossref","unstructured":"Prakash, P., et al.: RareBERT: transformer architecture for rare disease patient identification using administrative claims. In: Proceedings of the AAAI, vol. 35, no. 1, pp. 453\u2013460 (2021)","key":"20_CR49","DOI":"10.1609\/aaai.v35i1.16122"},{"unstructured":"Qi, D., et al.: ImageBERT: cross-modal pre-training with large-scale weak-supervised image-text data. arXiv:2001.07966 (2020)","key":"20_CR50"},{"unstructured":"Qin, Z., et al.: cosFormer: rethinking softmax in attention. arXiv:2202.08791 (2022)","key":"20_CR51"},{"key":"20_CR52","doi-asserted-by":"publisher","first-page":"53","DOI":"10.1162\/tacl_a_00353","volume":"9","author":"A Roy","year":"2021","unstructured":"Roy, A., et al.: Efficient content-based sparse attention with routing transformers. TACL 9, 53\u201368 (2021)","journal-title":"TACL"},{"doi-asserted-by":"crossref","unstructured":"Song, H., et al.: Attend and diagnose: clinical time series analysis using attention models. In: Proceedings of the AAAI, pp. 4091\u20134098 (2018)","key":"20_CR53","DOI":"10.1609\/aaai.v32i1.11635"},{"unstructured":"Su, W., et al.: VL-BERT: pre-training of generic visual-linguistic representations. arXiv:1908.08530 (2019)","key":"20_CR54"},{"doi-asserted-by":"crossref","unstructured":"Subakan, C., et al.: Attention is all you need in speech separation. In: Proceedings of the IEEE ICASSP, pp. 21\u201325 (2021)","key":"20_CR55","DOI":"10.1109\/ICASSP39728.2021.9413901"},{"doi-asserted-by":"crossref","unstructured":"Sun, C., et al.: VideoBERT: a joint model for video and language representation learning. In: Proceedings of the ICCV, pp. 7463\u20137472 (2019)","key":"20_CR56","DOI":"10.1109\/ICCV.2019.00756"},{"doi-asserted-by":"crossref","unstructured":"Sun, L., et al.: RpBERT: a text-image relation propagation-based BERT model for multimodal NER. In: Proceedings of the AAAI, vol. 15, pp. 13860\u201313868 (2021)","key":"20_CR57","DOI":"10.1609\/aaai.v35i15.17633"},{"doi-asserted-by":"crossref","unstructured":"Taubner, F., et al.: LCD - line clustering and description for place recognition. In: Proceedings of the 3DV, pp. 908\u2013917 (2020)","key":"20_CR58","DOI":"10.1109\/3DV50981.2020.00101"},{"unstructured":"Vaswani, A., et al.: Attention is all you need. In: Proceedings of the NIPS, vol. 30, pp. 5999\u20136009 (2017)","key":"20_CR59"},{"doi-asserted-by":"crossref","unstructured":"Wang, C., et al.: Semi-autoregressive neural machine translation. In: Proceedings of the EMNLP, pp. 479\u2013488 (2018)","key":"20_CR60","DOI":"10.18653\/v1\/D18-1044"},{"doi-asserted-by":"crossref","unstructured":"Wang, Y., et al.: Non-autoregressive machine translation with auxiliary regularization. In: Proceedings of the AAAI, pp. 5377\u20135384 (2019)","key":"20_CR61","DOI":"10.1609\/aaai.v33i01.33015377"},{"unstructured":"Wolf, T., et al.: TransferTransfo: a transfer learning approach for neural network based conversational agents. arXiv:1901.08149 (2019)","key":"20_CR62"},{"key":"20_CR63","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1038\/s41524-023-01016-5","volume":"9","author":"C Xu","year":"2023","unstructured":"Xu, C., et al.: Transpolymer: a transformer-based language model for polymer property predictions. NPJ Comput. Mater. 9, 1\u201314 (2023)","journal-title":"NPJ Comput. Mater."},{"unstructured":"Yan, H., et al.: Tener: adapting transformer encoder for named entity recognition. arXiv:1911.04474 (2019)","key":"20_CR64"},{"doi-asserted-by":"crossref","unstructured":"Ye, H., et al.: Contrastive triple extraction with generative transformer. In: Proceedings of the AAAI, vol. 35, no. 16, pp. 14257\u201314265 (2021)","key":"20_CR65","DOI":"10.1609\/aaai.v35i16.17677"},{"unstructured":"Yu, A.W., et al.: Fast and accurate reading comprehension by combining self-attention and convolution. In: Proceedings of the ICLR (2018)","key":"20_CR66"},{"key":"20_CR67","doi-asserted-by":"publisher","first-page":"1152","DOI":"10.1007\/s12559-020-09817-2","volume":"14","author":"W Yu","year":"2022","unstructured":"Yu, W., et al.: Setransformer: speech enhancement transformer. Cogn. Comput. 14, 1152\u20131158 (2022)","journal-title":"Cogn. Comput."},{"doi-asserted-by":"crossref","unstructured":"Zhang, H., et al.: Token shift transformer for video classification. In: Proceedings of the ACM MM, pp. 917\u2013925 (2021)","key":"20_CR68","DOI":"10.1145\/3474085.3475272"},{"key":"20_CR69","doi-asserted-by":"publisher","first-page":"1141","DOI":"10.1007\/s11263-022-01739-w","volume":"131","author":"Q Zhang","year":"2023","unstructured":"Zhang, Q., et al.: ViTAEv2: vision transformer advanced by exploring inductive bias for image recognition and beyond. IJCV 131, 1141\u20131162 (2023)","journal-title":"IJCV"},{"doi-asserted-by":"crossref","unstructured":"Zheng, Y., et al.: Improving end-to-end speech synthesis with local recurrent neural network enhanced transformer. In: Proceedings of the IEEE ICASSP, pp. 6734\u20136738 (2020)","key":"20_CR70","DOI":"10.1109\/ICASSP40776.2020.9054148"},{"doi-asserted-by":"crossref","unstructured":"Zhou, H., et al.: Informer: beyond efficient transformer for long sequence time-series forecasting. In: Proceedings of the AAAI, vol. 35, pp. 11106\u201311115 (2021)","key":"20_CR71","DOI":"10.1609\/aaai.v35i12.17325"},{"doi-asserted-by":"crossref","unstructured":"Zhou, L., Luo, Y.: Deep features fusion with mutual attention transformer for skin lesion diagnosis. In: Proceedings of the ICIP, pp. 3797\u20133801 (2021)","key":"20_CR72","DOI":"10.1109\/ICIP42928.2021.9506211"},{"unstructured":"Zhu, X., et al.: Deformable DETR: deformable transformers for end-to-end object detection. In: Proceedings of the ICLR, pp. 1\u201316 (2021)","key":"20_CR73"}],"container-title":["Lecture Notes in Computer Science","Hybrid Artificial Intelligent Systems"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-40725-3_20","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,5,8]],"date-time":"2024-05-08T06:04:54Z","timestamp":1715148294000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-40725-3_20"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023]]},"ISBN":["9783031407246","9783031407253"],"references-count":73,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-40725-3_20","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"type":"print","value":"0302-9743"},{"type":"electronic","value":"1611-3349"}],"subject":[],"published":{"date-parts":[[2023]]},"assertion":[{"value":"29 August 2023","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"HAIS","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Conference on Hybrid Artificial Intelligence Systems","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Salamanca","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Spain","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2023","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"5 September 2023","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"7 September 2023","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"18","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"hais2023","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"http:\/\/2023.haisconference.eu\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Single-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Easychair","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"120","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"65","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"0","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"54% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"2","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"No","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}