{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2024,9,11]],"date-time":"2024-09-11T08:35:45Z","timestamp":1726043745473},"publisher-location":"Cham","reference-count":21,"publisher":"Springer International Publishing","isbn-type":[{"type":"print","value":"9783030304898"},{"type":"electronic","value":"9783030304904"}],"license":[{"start":{"date-parts":[[2019,1,1]],"date-time":"2019-01-01T00:00:00Z","timestamp":1546300800000},"content-version":"tdm","delay-in-days":0,"URL":"http:\/\/www.springer.com\/tdm"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2019]]},"DOI":"10.1007\/978-3-030-30490-4_25","type":"book-chapter","created":{"date-parts":[[2019,9,8]],"date-time":"2019-09-08T23:02:47Z","timestamp":1567983767000},"page":"300-312","update-policy":"http:\/\/dx.doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":6,"title":["Conditional GANs for Image Captioning with Sentiments"],"prefix":"10.1007","author":[{"given":"Tushar","family":"Karayil","sequence":"first","affiliation":[]},{"given":"Asif","family":"Irfan","sequence":"additional","affiliation":[]},{"given":"Federico","family":"Raue","sequence":"additional","affiliation":[]},{"given":"J\u00f6rn","family":"Hees","sequence":"additional","affiliation":[]},{"given":"Andreas","family":"Dengel","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2019,9,9]]},"reference":[{"key":"25_CR1","doi-asserted-by":"publisher","unstructured":"Blandfort, P., Karayil, T., Borth, D., Dengel, A.: Image captioning in the wild: how people caption images on Flickr. In: Proceedings of the Workshop on Multimodal Understanding of Social, Affective and Subjective Attributes, pp. 21\u201329. ACM (2017), https:\/\/doi.org\/10.1145\/3132515.3132522","DOI":"10.1145\/3132515.3132522"},{"key":"25_CR2","doi-asserted-by":"publisher","unstructured":"Borth, D., Ji, R., Chen, T., Breuel, T., Chang, S.F.: Large-scale visual sentiment ontology and detectors using adjective noun pairs. In: Proceedings of the 21st ACM International Conference on Multimedia, pp. 223\u2013232. ACM (2013). https:\/\/doi.org\/10.1145\/2502081.2502282","DOI":"10.1145\/2502081.2502282"},{"key":"25_CR3","doi-asserted-by":"publisher","unstructured":"Dai, B., Fidler, S., Urtasun, R., Lin, D.: Towards diverse and natural image descriptions via a conditional GAN. In: Proceedings of the IEEE International Conference on Computer Vision, pp. 2970\u20132979 (2017). https:\/\/doi.org\/10.1109\/ICCV.2017.323","DOI":"10.1109\/ICCV.2017.323"},{"key":"25_CR4","doi-asserted-by":"crossref","unstructured":"Denkowski, M., Lavie, A.: Meteor universal: language specific translation evaluation for any target language. In: Proceedings of the Ninth Workshop on Statistical Machine Translation, pp. 376\u2013380 (2014)","DOI":"10.3115\/v1\/W14-3348"},{"key":"25_CR5","unstructured":"Goodfellow, I., et al.: Generative adversarial nets. In: Advances in Neural Information Processing Systems, pp. 2672\u20132680 (2014)"},{"key":"25_CR6","doi-asserted-by":"publisher","unstructured":"Karayil, T., Blandfort, P., Hees, J., Dengel, A.: The focus-aspect-value model for explainable prediction of subjective visual interpretation. In: International Conference of Multimedia Retrieval (2019). https:\/\/doi.org\/10.1145\/3323873.3325026","DOI":"10.1145\/3323873.3325026"},{"key":"25_CR7","unstructured":"Lin, C.Y.: ROUGE: a package for automatic evaluation of summaries. Text Summarization Branches Out (2004)"},{"key":"25_CR8","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"740","DOI":"10.1007\/978-3-319-10602-1_48","volume-title":"Computer Vision \u2013 ECCV 2014","author":"T-Y Lin","year":"2014","unstructured":"Lin, T.-Y., et al.: Microsoft COCO: common objects in context. In: Fleet, D., Pajdla, T., Schiele, B., Tuytelaars, T. (eds.) ECCV 2014. LNCS, vol. 8693, pp. 740\u2013755. Springer, Cham (2014). https:\/\/doi.org\/10.1007\/978-3-319-10602-1_48"},{"key":"25_CR9","doi-asserted-by":"crossref","unstructured":"Mathews, A.P., Xie, L., He, X.: SentiCap: generating image descriptions with sentiments. In: Thirtieth AAAI Conference on Artificial Intelligence (2016)","DOI":"10.1609\/aaai.v30i1.10475"},{"key":"25_CR10","unstructured":"Mirza, M., Osindero, S.: Conditional generative adversarial nets. arXiv preprint (2014)"},{"key":"25_CR11","unstructured":"Nezami, O.M., Dras, M., Wan, S., Paris, C.: Senti-attend: image captioning using sentiment and attention. arXiv preprint arXiv:1811.09789 (2018)"},{"key":"25_CR12","doi-asserted-by":"publisher","unstructured":"Papineni, K., Roukos, S., Ward, T., Zhu, W.J.: BLEU: a method for automatic evaluation of machine translation. In: Proceedings of the 40th Annual Meeting on Association for Computational Linguistics, pp. 311\u2013318. Association for Computational Linguistics (2002). https:\/\/doi.org\/10.3115\/1073083.1073135","DOI":"10.3115\/1073083.1073135"},{"key":"25_CR13","doi-asserted-by":"crossref","unstructured":"Sharma, P., Ding, N., Goodman, S., Soricut, R.: Conceptual captions: a cleaned, hypernymed, image alt-text dataset for automatic image captioning. In: Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), vol. 1, pp. 2556\u20132565 (2018)","DOI":"10.18653\/v1\/P18-1238"},{"key":"25_CR14","doi-asserted-by":"crossref","unstructured":"Shin, A., Ushiku, Y., Harada, T.: Image captioning with sentiment terms via weakly-supervised sentiment dataset. In: BMVC (2016)","DOI":"10.5244\/C.30.53"},{"key":"25_CR15","doi-asserted-by":"publisher","unstructured":"Vedantam, R., Lawrence Zitnick, C., Parikh, D.: CIDEr: consensus-based image description evaluation. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 4566\u20134575 (2015). https:\/\/doi.org\/10.1109\/CVPR.2015.729908","DOI":"10.1109\/CVPR.2015.729908"},{"key":"25_CR16","doi-asserted-by":"publisher","unstructured":"Vinyals, O., Toshev, A., Bengio, S., Erhan, D.: Show and tell: a neural image caption generator. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 3156\u20133164 (2015). https:\/\/doi.org\/10.1109\/CVPR.2015.7298935","DOI":"10.1109\/CVPR.2015.7298935"},{"key":"25_CR17","unstructured":"Xu, K., et al.: Show, attend and tell: neural image caption generation with visual attention. In: International Conference on Machine Learning, pp. 2048\u20132057 (2015)"},{"key":"25_CR18","unstructured":"You, Q., Jin, H., Luo, J.: Image captioning at will: a versatile scheme for effectively injecting sentiments into image descriptions. arXiv preprint arXiv:1801.10121 (2018)"},{"key":"25_CR19","doi-asserted-by":"publisher","unstructured":"You, Q., Jin, H., Wang, Z., Fang, C., Luo, J.: Image captioning with semantic attention. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 4651\u20134659 (2016). https:\/\/doi.org\/10.1109\/CVPR.2016.503","DOI":"10.1109\/CVPR.2016.503"},{"key":"25_CR20","doi-asserted-by":"crossref","unstructured":"Yu, L., Zhang, W., Wang, J., Yu, Y.: SeqGAN: sequence generative adversarial nets with policy gradient. In: Thirty-First AAAI Conference on Artificial Intelligence (2017)","DOI":"10.1609\/aaai.v31i1.10804"},{"key":"25_CR21","doi-asserted-by":"publisher","unstructured":"Zhang, H., et al: StackGAN: text to photo-realistic image synthesis with stacked generative adversarial networks. In: Proceedings of the IEEE International Conference on Computer Vision, pp. 5907\u20135915 (2017). https:\/\/doi.org\/10.1109\/ICCV.2017.629","DOI":"10.1109\/ICCV.2017.629"}],"container-title":["Lecture Notes in Computer Science","Artificial Neural Networks and Machine Learning \u2013 ICANN 2019: Text and Time Series"],"original-title":[],"language":"en","link":[{"URL":"http:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-030-30490-4_25","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,9,27]],"date-time":"2022-09-27T21:30:04Z","timestamp":1664314204000},"score":1,"resource":{"primary":{"URL":"http:\/\/link.springer.com\/10.1007\/978-3-030-30490-4_25"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2019]]},"ISBN":["9783030304898","9783030304904"],"references-count":21,"URL":"https:\/\/doi.org\/10.1007\/978-3-030-30490-4_25","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"type":"print","value":"0302-9743"},{"type":"electronic","value":"1611-3349"}],"subject":[],"published":{"date-parts":[[2019]]},"assertion":[{"value":"9 September 2019","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ICANN","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Conference on Artificial Neural Networks","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Munich","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Germany","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2019","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"17 September 2019","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"19 September 2019","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"28","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"icann2019","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/e-nns.org\/icann2019\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}