{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,6,6]],"date-time":"2025-06-06T09:49:22Z","timestamp":1749203362400,"version":"3.40.3"},"publisher-location":"Cham","reference-count":23,"publisher":"Springer Nature Switzerland","isbn-type":[{"type":"print","value":"9783031366154"},{"type":"electronic","value":"9783031366161"}],"license":[{"start":{"date-parts":[[2023,1,1]],"date-time":"2023-01-01T00:00:00Z","timestamp":1672531200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2023,1,1]],"date-time":"2023-01-01T00:00:00Z","timestamp":1672531200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2023]]},"DOI":"10.1007\/978-3-031-36616-1_18","type":"book-chapter","created":{"date-parts":[[2023,6,24]],"date-time":"2023-06-24T18:03:41Z","timestamp":1687629821000},"page":"223-237","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":1,"title":["Learning to\u00a0Search for\u00a0and Detect Objects in\u00a0Foveal Images Using Deep Learning"],"prefix":"10.1007","author":[{"ORCID":"https:\/\/orcid.org\/0000-0001-6153-7838","authenticated-orcid":false,"given":"Beatriz","family":"Paula","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-0496-2050","authenticated-orcid":false,"given":"Plinio","family":"Moreno","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2023,6,25]]},"reference":[{"issue":"1","key":"18_CR1","doi-asserted-by":"publisher","first-page":"185","DOI":"10.1109\/TPAMI.2012.89","volume":"35","author":"A Borji","year":"2012","unstructured":"Borji, A., Itti, L.: State-of-the-art in visual attention modeling. IEEE Trans. Pattern Anal. Mach. Intell. 35(1), 185\u2013207 (2012)","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"18_CR2","doi-asserted-by":"crossref","unstructured":"Bandera, C., Scott, P.D.: Foveal machine vision systems. In Conference Proceedings. In: IEEE International Conference on Systems, Man and Cybernetics, pp. 596\u2013599. IEEE (1989)","DOI":"10.1109\/ICSMC.1989.71367"},{"issue":"11","key":"18_CR3","doi-asserted-by":"publisher","first-page":"2278","DOI":"10.1109\/5.726791","volume":"86","author":"Y LeCun","year":"1998","unstructured":"LeCun, Y., Bottou, L., Bengio, Y., Haffner, P.: Gradient-based learning applied to document recognition. Proc. IEEE 86(11), 2278\u20132324 (1998)","journal-title":"Proc. IEEE"},{"issue":"6","key":"18_CR4","doi-asserted-by":"publisher","first-page":"84","DOI":"10.1145\/3065386","volume":"60","author":"A Krizhevsky","year":"2017","unstructured":"Krizhevsky, A., Sutskever, I., Hinton, G.E.: ImageNet classification with deep convolutional neural networks. Commun. ACM 60(6), 84\u201390 (2017)","journal-title":"Commun. ACM"},{"issue":"2","key":"18_CR5","doi-asserted-by":"publisher","first-page":"119","DOI":"10.1016\/0893-6080(88)90014-7","volume":"1","author":"K Fukushima","year":"1988","unstructured":"Fukushima, K.: Neocognitron: a hierarchical neural network capable of visual pattern recognition. Neural Netw. 1(2), 119\u2013130 (1988)","journal-title":"Neural Netw."},{"key":"18_CR6","doi-asserted-by":"crossref","unstructured":"Akbas, E., Eckstein, M.P.: Object detection through search with a foveated visual system. PLoS Comput. Biol. 13(10), e1005743 (2017)","DOI":"10.1371\/journal.pcbi.1005743"},{"key":"18_CR7","doi-asserted-by":"crossref","unstructured":"James, W.: The Principles of Psychology, vol. 1. Henry Holt and Co. (1890)","DOI":"10.1037\/10538-000"},{"issue":"3","key":"18_CR8","doi-asserted-by":"publisher","first-page":"201","DOI":"10.1038\/nrn755","volume":"3","author":"M Corbetta","year":"2002","unstructured":"Corbetta, M., Shulman, G.L.: Control of goal-directed and stimulus-driven attention in the brain. Nat. Rev. Neurosci. 3(3), 201\u2013215 (2002)","journal-title":"Nat. Rev. Neurosci."},{"key":"18_CR9","volume-title":"Eye Movements and Vision","author":"AL Yarbus","year":"2013","unstructured":"Yarbus, A.L.: Eye Movements and Vision. Springer, Heidelberg (2013)"},{"key":"18_CR10","doi-asserted-by":"crossref","unstructured":"Ngo, T., Manjunath, B.S.: Saccade gaze prediction using a recurrent neural network. In: 2017 IEEE International Conference on Image Processing (ICIP), pp. 3435\u20133439. IEEE (2017)","DOI":"10.1109\/ICIP.2017.8296920"},{"key":"18_CR11","doi-asserted-by":"publisher","first-page":"37","DOI":"10.1007\/978-3-642-24797-2_4","volume-title":"Supervised Sequence Labelling with Recurrent Neural Networks","author":"A Graves","year":"2012","unstructured":"Graves, A.: Long short-term memory. In: Graves, A. (ed.) Supervised Sequence Labelling with Recurrent Neural Networks, vol. 385, pp. 37\u201345. Sprnger, Heidelberg (2012). https:\/\/doi.org\/10.1007\/978-3-642-24797-2_4"},{"key":"18_CR12","unstructured":"Kreiman, G., Zhang, M.: Finding any Waldo: zero-shot invariant and efficient visual search (2018)"},{"key":"18_CR13","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"280","DOI":"10.1007\/978-3-030-50347-5_25","volume-title":"Image Analysis and Recognition","author":"A Nunes","year":"2020","unstructured":"Nunes, A., Figueiredo, R., Moreno, P.: Learning to search for objects in images from human gaze sequences. In: Campilho, A., Karray, F., Wang, Z. (eds.) ICIAR 2020. LNCS, vol. 12131, pp. 280\u2013292. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-50347-5_25"},{"key":"18_CR14","unstructured":"Shi, X., Chen, Z., Wang, H., Yeung, D.Y., Wong, W.K., Woo, W.C.: Convolutional LSTM network: A machine learning approach for precipitation nowcasting. Adv. Neural Inf. Process. Syst. 28 (2015)"},{"issue":"1","key":"18_CR15","doi-asserted-by":"crossref","first-page":"1","DOI":"10.1038\/s41598-020-79139-8","volume":"11","author":"Y Chen","year":"2021","unstructured":"Chen, Y., Yang, Z., Ahn, S., Samaras, D., Hoai, M., Zelinsky, G.: COCO-search18 fixation dataset for predicting goal-directed attention control. Sci. Rep. 11(1), 1\u201311 (2021)","journal-title":"Sci. Rep."},{"key":"18_CR16","doi-asserted-by":"crossref","unstructured":"Yang, Z., et al.: Predicting goal-directed human attention using inverse reinforcement learning. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 193\u2013202 (2020)","DOI":"10.1109\/CVPR42600.2020.00027"},{"key":"18_CR17","doi-asserted-by":"crossref","unstructured":"Kirillov, A., He, K., Girshick, R., Rother, C., Doll\u00e1r, P.: Panoptic segmentation. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 9404\u20139413 (2019)","DOI":"10.1109\/CVPR.2019.00963"},{"key":"18_CR18","doi-asserted-by":"crossref","unstructured":"Kirillov, A., Girshick, R., He, K., Doll\u00e1r, P.: Panoptic feature pyramid networks. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 6399\u20136408 (2019)","DOI":"10.1109\/CVPR.2019.00656"},{"key":"18_CR19","unstructured":"Simonyan, K., Zisserman, A.: Very deep convolutional networks for large-scale image recognition. arXiv preprint arXiv:1409.1556 (2014)"},{"key":"18_CR20","doi-asserted-by":"crossref","unstructured":"Deng, J., Dong, W., Socher, R., Li, L.J., Li, K., Fei-Fei, L.: ImageNet: a large-scale hierarchical image database. In: 2009 IEEE Conference on Computer Vision and Pattern Recognition, pp. 248\u2013255. IEEE (2009)","DOI":"10.1109\/CVPR.2009.5206848"},{"key":"18_CR21","unstructured":"Cabarr\u00e3o, B.: Learning to search for objects in foveal images using deep learning, Master\u2019s thesis, Universidade de Lisboa - Instituto Superior T\u00e9cnico (2022)"},{"key":"18_CR22","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"740","DOI":"10.1007\/978-3-319-10602-1_48","volume-title":"Computer Vision \u2013 ECCV 2014","author":"T-Y Lin","year":"2014","unstructured":"Lin, T.-Y., et al.: Microsoft COCO: common objects in context. In: Fleet, D., Pajdla, T., Schiele, B., Tuytelaars, T. (eds.) ECCV 2014. LNCS, vol. 8693, pp. 740\u2013755. Springer, Cham (2014). https:\/\/doi.org\/10.1007\/978-3-319-10602-1_48"},{"key":"18_CR23","unstructured":"Kingma, D.P., Ba, J.: Adam: a method for stochastic optimization. arXiv preprint arXiv:1412.6980 (2014)"}],"container-title":["Lecture Notes in Computer Science","Pattern Recognition and Image Analysis"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-36616-1_18","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,10,22]],"date-time":"2024-10-22T23:47:18Z","timestamp":1729640838000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-36616-1_18"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023]]},"ISBN":["9783031366154","9783031366161"],"references-count":23,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-36616-1_18","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"type":"print","value":"0302-9743"},{"type":"electronic","value":"1611-3349"}],"subject":[],"published":{"date-parts":[[2023]]},"assertion":[{"value":"25 June 2023","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"IbPRIA","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Iberian Conference on Pattern Recognition and Image Analysis","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Alicante","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Spain","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2023","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"27 June 2023","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"30 June 2023","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"11","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"ibpria2022b","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Single-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Easy Chair","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"86","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"56","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"0","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"65% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"2.9","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"2.2","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Yes","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}