{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,10,28]],"date-time":"2025-10-28T15:10:53Z","timestamp":1761664253745},"reference-count":36,"publisher":"IEEE","content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2021,5,12]]},"DOI":"10.23919\/fruct52173.2021.9435534","type":"proceedings-article","created":{"date-parts":[[2021,5,25]],"date-time":"2021-05-25T20:10:41Z","timestamp":1621973441000},"page":"3-13","source":"Crossref","is-referenced-by-count":2,"title":["Deep Image Captioning Survey: A Resource Availability Perspective"],"prefix":"10.23919","author":[{"given":"Mousa Al","family":"Sulaimi","sequence":"first","affiliation":[]},{"given":"Imtiaz","family":"Ahmad","sequence":"additional","affiliation":[]},{"given":"Mohammad","family":"Jeragh","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2020\/128"},{"key":"ref32","first-page":"1","article-title":"Variational graph auto-encoders","author":"kipf","year":"0","journal-title":"Proc NIPS Workshop Bayesian Deep Learn"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.01042"},{"key":"ref30","article-title":"Partially-Supervised Image Captioning","author":"anderson","year":"2019","journal-title":"ArXiv org"},{"journal-title":"Wasserstein GAN","year":"2017","author":"arjovsky","key":"ref36"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/ICVGIP.2008.47"},{"key":"ref34","first-page":"740","article-title":"Microsoft COCO: Common objects in context","author":"lin","year":"0","journal-title":"Proc Eur Conf Comput Vis"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.445"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.323"},{"key":"ref12","article-title":"Image Captioning based on Deep Learning Methods: A Survey","author":"wang","year":"2019","journal-title":"ArXiv org"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1016\/j.neucom.2018.05.080"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1007\/s00371-018-1566-y"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1613\/jair.4900"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.3233\/HIS-170246"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-01418-6_18"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.01071"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.131"},{"key":"ref28","article-title":"Show, Tell and Discriminate: Image Captioning by Self-retrieval with Partially Labeled Data","author":"liu","year":"2019","journal-title":"ArXiv org"},{"key":"ref4","article-title":"Neural Machine Translation by Jointly Learning to Align and Translate","author":"bahdanau","year":"2019","journal-title":"ArXiv org"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1145\/3123266.3123366"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1145\/3065386"},{"key":"ref6","article-title":"Show, Attend and Tell: Neural Image Caption Generation with Visual Attention","author":"xu","year":"2019","journal-title":"ArXiv org"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/D19-1208"},{"key":"ref5","article-title":"Show and Tell: A Neural Image Caption Generator","author":"vinyals","year":"2019","journal-title":"ArXiv org"},{"key":"ref8","article-title":"Unsupervised Image Captioning","author":"feng","year":"2018","journal-title":"ArXiv org"},{"key":"ref7","first-page":"2672","article-title":"Generative Adversarial Nets","author":"goodfellow","year":"2014","journal-title":"Advances in Neural Information Processing Systems 27 (NIPS 2014)"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1145\/3295748"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00751"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1006\/jvci.1999.0413"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.64"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2018.2869276"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1145\/3132847.3132920"},{"key":"ref24","article-title":"Dual Learning for Machine Translation","author":"xia","year":"2019","journal-title":"ArXiv org"},{"journal-title":"Diverse and Accurate Image Description Using a Variational Auto-Encoder with an Additive Gaussian Encoding Space","year":"2017","author":"wang","key":"ref23"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1145\/3343031.3350996"},{"key":"ref25","article-title":"Unpaired Image Captioning by Language Pivoting","author":"gu","year":"2019","journal-title":"ArXiv org"}],"event":{"name":"2021 29th Conference of Open Innovations Association (FRUCT)","start":{"date-parts":[[2021,5,12]]},"location":"Tampere, Finland","end":{"date-parts":[[2021,5,14]]}},"container-title":["2021 29th Conference of Open Innovations Association (FRUCT)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9435428\/9435420\/09435534.pdf?arnumber=9435534","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2021,6,14]],"date-time":"2021-06-14T20:39:11Z","timestamp":1623703151000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9435534\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2021,5,12]]},"references-count":36,"URL":"https:\/\/doi.org\/10.23919\/fruct52173.2021.9435534","relation":{},"subject":[],"published":{"date-parts":[[2021,5,12]]}}}