{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,3,26]],"date-time":"2025-03-26T07:54:32Z","timestamp":1742975672592,"version":"3.40.3"},"publisher-location":"Cham","reference-count":15,"publisher":"Springer Nature Switzerland","isbn-type":[{"type":"print","value":"9783031606649"},{"type":"electronic","value":"9783031606656"}],"license":[{"start":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T00:00:00Z","timestamp":1704067200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T00:00:00Z","timestamp":1704067200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2024]]},"DOI":"10.1007\/978-3-031-60665-6_11","type":"book-chapter","created":{"date-parts":[[2024,6,27]],"date-time":"2024-06-27T19:21:12Z","timestamp":1719516072000},"page":"151-162","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":1,"title":["A Vision Transformer Approach to Fundus Image Classification"],"prefix":"10.1007","author":[{"given":"Danilo","family":"Leite","sequence":"first","affiliation":[]},{"given":"Jos\u00e9","family":"Camara","sequence":"additional","affiliation":[]},{"given":"Jo\u00e3o","family":"Rodrigues","sequence":"additional","affiliation":[]},{"given":"Ant\u00f3nio","family":"Cunha","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,6,28]]},"reference":[{"issue":"11","key":"11_CR1","doi-asserted-by":"publisher","first-page":"2081","DOI":"10.1016\/j.ophtha.2014.05.013","volume":"121","author":"YC Tham","year":"2014","unstructured":"Tham, Y.C., Li, X., Wong, T.Y., Quigley, H.A., Aung, T., Cheng, C.Y.: Global prevalence of glaucoma and projections of glaucoma burden through 2040: a systematic review and meta-analysis. Ophthalmology 121(11), 2081\u20132090 (2014). https:\/\/doi.org\/10.1016\/j.ophtha.2014.05.013","journal-title":"Ophthalmology"},{"issue":"13","key":"11_CR2","doi-asserted-by":"publisher","first-page":"3850","DOI":"10.3390\/JCM11133850","volume":"11","author":"J Camara","year":"2022","unstructured":"Camara, J., Rezende, R., Pires, I.M., Cunha, A.: Retinal glaucoma public datasets: what do we have and what is missing? J. Clin. Med. 11(13), 3850 (2022). https:\/\/doi.org\/10.3390\/JCM11133850","journal-title":"J. Clin. Med."},{"issue":"2021","key":"11_CR3","doi-asserted-by":"publisher","first-page":"485","DOI":"10.1016\/j.procs.2021.12.040","volume":"196","author":"A Neto","year":"2021","unstructured":"Neto, A., Camera, J., Oliveira, S., Cl\u00e1udia, A., Cunha, A.: Optic disc and cup segmentations for glaucoma assessment using cup-to-disc ratio. Proc. Comput. Sci. 196(2021), 485\u2013492 (2021). https:\/\/doi.org\/10.1016\/j.procs.2021.12.040","journal-title":"Proc. Comput. Sci."},{"key":"11_CR4","doi-asserted-by":"publisher","first-page":"100233","DOI":"10.1016\/j.xops.2022.100233","volume":"3","author":"R Fan","year":"2023","unstructured":"Fan, R., et al.: Detecting glaucoma from fundus photographs using deep learning without convolutions transformer for improved generalization. Ophthalmol. Sci. 3, 100233 (2023). https:\/\/doi.org\/10.1016\/j.xops.2022.100233","journal-title":"Ophthalmol. Sci."},{"key":"11_CR5","doi-asserted-by":"publisher","unstructured":"He, J., Wang, J., Han, Z., Ma, J., Wang, C., Qi, M.: An interpretable transformer network for the retinal disease classification using optical coherence tomography. Sci. Rep. 13, 3637. https:\/\/doi.org\/10.1038\/s41598-023-30853-z. 123AD","DOI":"10.1038\/s41598-023-30853-z"},{"key":"11_CR6","doi-asserted-by":"publisher","unstructured":"D\u2019Souza, G., Siddalingaswamy, P.C., Pandya, M.A.: AlterNet-K: a small and compact model for the detection of glaucoma 1, 3. https:\/\/doi.org\/10.1007\/s13534-023-00307-6","DOI":"10.1007\/s13534-023-00307-6"},{"key":"11_CR7","unstructured":"Karrothu, A., Chunduru, A.: Glaucoma detection using computer vision and vision transformers (2023). https:\/\/journal.uob.edu.bh\/handle\/123456789\/5206. Accessed 13 Sept 2023"},{"key":"11_CR8","unstructured":"Nakayama, L.F., et al.: A Brazilian multilabel ophthalmological dataset (BRSET) v1.0.0 (2023). https:\/\/physionet.org\/content\/brazilian-ophthalmological\/1.0.0\/. Accessed 13 Sept 2023"},{"issue":"2","key":"11_CR9","doi-asserted-by":"publisher","first-page":"413","DOI":"10.1109\/TMI.2019.2927226","volume":"39","author":"L Li","year":"2020","unstructured":"Li, L., et al.: A large-scale database and a CNN model for attention-based glaucoma detection. IEEE Trans. Med. Imaging 39(2), 413\u2013424 (2020). https:\/\/doi.org\/10.1109\/TMI.2019.2927226","journal-title":"IEEE Trans. Med. Imaging"},{"issue":"10","key":"11_CR10","doi-asserted-by":"publisher","first-page":"2702","DOI":"10.4103\/IJO.IJO_92_21","volume":"69","author":"S Ajitha","year":"2021","unstructured":"Ajitha, S., Akkara, J.D., Judy, M.V.: Identification of glaucoma from fundus images using deep learning techniques. Indian J. Ophthalmol. 69(10), 2702\u20132709 (2021). https:\/\/doi.org\/10.4103\/IJO.IJO_92_21","journal-title":"Indian J. Ophthalmol."},{"key":"11_CR11","doi-asserted-by":"publisher","unstructured":"Teixeira, I., Morais, R., Sousa, J.J., Cunha, A.: Deep learning models for the classification of crops in aerial imagery: a review. Agriculture 13(5) (2023). https:\/\/doi.org\/10.3390\/AGRICULTURE13050965","DOI":"10.3390\/AGRICULTURE13050965"},{"key":"11_CR12","unstructured":"Dosovitskiy, A., et al.: An image is worth 16\u00d716 words: transformers for image recognition at scale. In: ICLR 2021 - 9th International Conference on Learning Representations, October 2020. Accessed 14 Sept 2023. https:\/\/arxiv.org\/abs\/2010.11929v2"},{"key":"11_CR13","doi-asserted-by":"publisher","unstructured":"Wassel, M., Hamdi, A.M., Adly, N., Torki, M.: Vision transformers based classification for glaucomatous eye condition. In: Proceedings - International Conference on Pattern Recognition, vol. 2022-August, pp. 5082\u20135088 (2022). https:\/\/doi.org\/10.1109\/ICPR56361.2022.9956086","DOI":"10.1109\/ICPR56361.2022.9956086"},{"issue":"2021","key":"11_CR14","doi-asserted-by":"publisher","first-page":"454","DOI":"10.1016\/j.procs.2021.12.036","volume":"196","author":"D Leite","year":"2021","unstructured":"Leite, D., et al.: Machine Learning automatic assessment for glaucoma and myopia based on Corvis ST data. Proc. Comput. Sci. 196(2021), 454\u2013460 (2021). https:\/\/doi.org\/10.1016\/j.procs.2021.12.036","journal-title":"Proc. Comput. Sci."},{"key":"11_CR15","doi-asserted-by":"publisher","unstructured":"Leite, D.R.A., de Moraes, R.M., Lopes, L.W.: Different performances of machine learning models to classify dysphonic and non-dysphonic voices (2022). https:\/\/doi.org\/10.1016\/j.jvoice.2022.11.001","DOI":"10.1016\/j.jvoice.2022.11.001"}],"container-title":["Lecture Notes of the Institute for Computer Sciences, Social Informatics and Telecommunications Engineering","Wireless Mobile Communication and Healthcare"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-60665-6_11","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,6,27]],"date-time":"2024-06-27T19:33:27Z","timestamp":1719516807000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-60665-6_11"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024]]},"ISBN":["9783031606649","9783031606656"],"references-count":15,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-60665-6_11","relation":{},"ISSN":["1867-8211","1867-822X"],"issn-type":[{"type":"print","value":"1867-8211"},{"type":"electronic","value":"1867-822X"}],"subject":[],"published":{"date-parts":[[2024]]},"assertion":[{"value":"28 June 2024","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"MobiHealth","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Conference on Wireless Mobile Communication and Healthcare","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Vila Real","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Portugal","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2023","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"29 November 2023","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"30 November 2023","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"mobihealth2023","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/mobihealth.eai-conferences.org\/2023\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Single-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Confy +","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"111","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"33","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"0","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"30% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Yes","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}