{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,2,21]],"date-time":"2025-02-21T19:38:21Z","timestamp":1740166701596,"version":"3.37.3"},"reference-count":85,"publisher":"Springer Science and Business Media LLC","issue":"4","license":[{"start":{"date-parts":[[2020,10,26]],"date-time":"2020-10-26T00:00:00Z","timestamp":1603670400000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"},{"start":{"date-parts":[[2020,10,26]],"date-time":"2020-10-26T00:00:00Z","timestamp":1603670400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Int J Multimed Info Retr"],"published-print":{"date-parts":[[2020,12]]},"DOI":"10.1007\/s13735-020-00198-8","type":"journal-article","created":{"date-parts":[[2020,10,26]],"date-time":"2020-10-26T21:04:03Z","timestamp":1603746243000},"page":"291-316","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["MRECN: mixed representation enhanced (de)compositional network for caption generation from visual features, modeling as pseudo tensor product representation"],"prefix":"10.1007","volume":"9","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-1563-9304","authenticated-orcid":false,"given":"Chiranjib","family":"Sur","sequence":"first","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2020,10,26]]},"reference":[{"issue":"22","key":"198_CR1","doi-asserted-by":"publisher","first-page":"32187","DOI":"10.1007\/s11042-019-08021-1","volume":"78","author":"C Sur","year":"2019","unstructured":"Sur C (2019) Survey of deep learning and architectures for visual captioning\u2013transitioning between media and natural languages. Multimed Tools Appl 78(22):32187\u201332237","journal-title":"Multimed Tools Appl"},{"key":"198_CR2","doi-asserted-by":"crossref","unstructured":"Karpathy A, Fei-Fei L (2015) Deep visual-semantic alignments for generating image descriptions. In: Proceedings of the IEEE conference on computer vision and pattern recognition","DOI":"10.1109\/CVPR.2015.7298932"},{"key":"198_CR3","doi-asserted-by":"crossref","unstructured":"Chen X, Lawrence Zitnick C (2015) Mind\u2019s eye: a recurrent visual representation for image caption generation. In: Proceedings of the IEEE conference on computer vision and pattern recognition","DOI":"10.1109\/CVPR.2015.7298856"},{"key":"198_CR4","unstructured":"Devlin J, Gupta S, Girshick R, Mitchell M, Zitnick CL (2015) Exploring nearest neighbor approaches for image captioning. arXiv:1505.04467"},{"key":"198_CR5","unstructured":"Xu K et al (2015) Show, attend and tell: neural image caption generation with visual attention. In: International conference on machine learning"},{"key":"198_CR6","doi-asserted-by":"crossref","unstructured":"Vinyals O et al (2015) Show and tell: A neural image caption generator. In: Proceedings of the IEEE conference on computer vision and pattern recognition","DOI":"10.1109\/CVPR.2015.7298935"},{"key":"198_CR7","unstructured":"Mao J et al (2014) Deep captioning with multimodal recurrent neural networks (m-rnn). arXiv preprint arXiv:1412.6632"},{"key":"198_CR8","doi-asserted-by":"crossref","unstructured":"Devlin J et al (2015) Language models for image captioning: the quirks and what works. arXiv:1505.01809","DOI":"10.3115\/v1\/P15-2017"},{"key":"198_CR9","doi-asserted-by":"crossref","unstructured":"Yao T, Pan Y, Li Y, Qiu Z, Mei T (2017) Boosting image captioning with attributes. In: IEEE international conference on computer vision, ICCV, pp 22\u201329","DOI":"10.1109\/ICCV.2017.524"},{"key":"198_CR10","doi-asserted-by":"crossref","unstructured":"Rennie SJ, Marcheret E, Mroueh Y, Ross J, Goel V (2017) Self-critical sequence training for image captioning. In: CVPR, vol 1, issue 2, p 3","DOI":"10.1109\/CVPR.2017.131"},{"key":"198_CR11","doi-asserted-by":"crossref","unstructured":"Chen H, Ding G, Lin Z, Zhao S, Han J (2018) Show, observe and tell: attribute-driven attention model for image captioning. In: IJCAI, pp 606\u2013612","DOI":"10.24963\/ijcai.2018\/84"},{"key":"198_CR12","doi-asserted-by":"crossref","unstructured":"Gan Z et al (2016) Semantic compositional networks for visual captioning. arXiv:1611.08002","DOI":"10.1109\/CVPR.2017.127"},{"key":"198_CR13","doi-asserted-by":"crossref","unstructured":"Anderson P, He X, Buehler C, Teney D, Johnson M, Gould S, Zhang L (2018) Bottom-up and top-down attention for image captioning and visual question answering. In: CVPR, vol. 3, issue 5, p 6","DOI":"10.1109\/CVPR.2018.00636"},{"key":"198_CR14","unstructured":"Sur C (2020) SACT: self-aware multi-space feature composition transformer for multinomial attention for video captioning. arXiv:2006.14262"},{"key":"198_CR15","unstructured":"Sur C (2020) Self-segregating and coordinated-segregating transformer for focused deep multi-modular network for visual question answering. arXiv:2006.14264"},{"key":"198_CR16","unstructured":"Sur C (2020) ReLGAN: generalization of consistency for gan with disjoint constraints and relative learning of generative processes for multiple transformation learning. arXiv:2006.07809"},{"key":"198_CR17","doi-asserted-by":"publisher","first-page":"229","DOI":"10.1007\/s42979-020-00238-4","volume":"1","author":"C Sur","year":"2020","unstructured":"Sur C (2020) AACR: feature fusion effects of algebraic amalgamation composed representation on (de)compositional network for caption generation for images. SN Comput Sci 1:229. https:\/\/doi.org\/10.1007\/s42979-020-00238-4","journal-title":"SN Comput Sci"},{"key":"198_CR18","unstructured":"Sur C (2020) Gaussian smoothen semantic features (GSSF)\u2014exploring the linguistic aspects of visual captioning in Indian languages (Bengali) using MSCOCO framework. arXiv:2002.06701"},{"key":"198_CR19","doi-asserted-by":"crossref","unstructured":"Sur C (2020) MRRC: multiple role representation crossover interpretation for image captioning with R-CNN feature distribution composition (FDC). arXiv:2002.06436","DOI":"10.1007\/s11042-021-10578-9"},{"key":"198_CR20","doi-asserted-by":"crossref","unstructured":"Sur C (2020) aiTPR: attribute interaction-tensor product representation for image caption. arXiv:2001.09545","DOI":"10.1007\/s11063-021-10438-5"},{"key":"198_CR21","doi-asserted-by":"crossref","unstructured":"Sur C (2019) CRUR: coupled-recurrent unit for unification, conceptualization and context capture for language representation\u2014a generalization of bi directional LSTM. arXiv:1911.10132","DOI":"10.1007\/s11042-020-09865-8"},{"issue":"1","key":"198_CR22","doi-asserted-by":"publisher","first-page":"22","DOI":"10.1007\/s42452-019-1765-9","volume":"2","author":"C Sur","year":"2020","unstructured":"Sur C (2020) RBN: enhancement in language attribute prediction using global representation of natural language transfer learning technology like Google BERT. SN Appl Sci 2(1):22","journal-title":"SN Appl Sci"},{"key":"198_CR23","unstructured":"Sur C (2019) Tpsgtr: neural-symbolic tensor product scene-graph-triplet representation for image captioning. arXiv:1911.10115"},{"key":"198_CR24","unstructured":"Sur C (2018) Feature fusion effects of tensor product representation on (de) compositional network for caption generation for images. arXiv:1812.06624"},{"issue":"11","key":"198_CR25","doi-asserted-by":"publisher","first-page":"2483","DOI":"10.1007\/s11517-019-02038-2","volume":"57","author":"C Sur","year":"2019","unstructured":"Sur C (2019) GSIAR: gene-subcategory interaction-based improved deep representation learning for breast cancer subcategorical analysis using gene expression, applicable for precision medicine. Med Biol Eng Comput 57(11):2483\u20132515","journal-title":"Med Biol Eng Comput"},{"issue":"9","key":"198_CR26","doi-asserted-by":"publisher","first-page":"3573","DOI":"10.1007\/s12652-018-1084-9","volume":"10","author":"C Sur","year":"2019","unstructured":"Sur C (2019) DeepSeq: learning browsing log data based personalized security vulnerabilities and counter intelligent measures. J Ambient Intell Humaniz Comput 10(9):3573\u20133602","journal-title":"J Ambient Intell Humaniz Comput"},{"key":"198_CR27","doi-asserted-by":"crossref","unstructured":"Sur C, Liu P, Zhou Y, Wu D (2019) Semantic tensor product for image captioning. In: 2019 5th international conference on big data computing and communications (BIGCOM). IEEE, pp 33\u201337","DOI":"10.1109\/BIGCOM.2019.00013"},{"key":"198_CR28","doi-asserted-by":"crossref","unstructured":"You Q et al (2016) Image captioning with semantic attention. In: Proceedings of the IEEE conference on computer vision and pattern recognition","DOI":"10.1109\/CVPR.2016.503"},{"key":"198_CR29","doi-asserted-by":"crossref","unstructured":"Lu J, Xiong C, Parikh D, Socher R (2017) Knowing when to look: adaptive attention via a visual sentinel for image captioning. In: Proceedings of the IEEE conference on computer vision and pattern recognition (CVPR), vol 6, p 2","DOI":"10.1109\/CVPR.2017.345"},{"key":"198_CR30","doi-asserted-by":"crossref","unstructured":"Lu D, Whitehead S, Huang L, Ji H, Chang SF (2018) Entity-aware image caption generation. arXiv:1804.07889","DOI":"10.18653\/v1\/D18-1435"},{"key":"198_CR31","doi-asserted-by":"crossref","unstructured":"Lu J, Yang J, Batra D, Parikh D (2018) Neural baby talk. In: Proceedings of the IEEE conference on computer vision and pattern recognition, pp 7219\u20137228","DOI":"10.1109\/CVPR.2018.00754"},{"key":"198_CR32","unstructured":"You Q, Jin H, Luo J (2018) Image captioning at will: a versatile scheme for effectively injecting sentiments into image descriptions. arXiv:1801.10121"},{"key":"198_CR33","unstructured":"Melnyk I, Sercu T, Dognin PL, Ross J, Mroueh Y (2018) Improved image captioning with adversarial semantic alignment. arXiv:1805.00063"},{"key":"198_CR34","unstructured":"Wu J, Hu Z, Mooney RJ (2018) Joint image captioning and question answering. arXiv:1805.08389"},{"key":"198_CR35","doi-asserted-by":"crossref","unstructured":"Chen F, Ji R, Su J, Wu Y, Wu Y (2017) Structcap: structured semantic embedding for image captioning. In: Proceedings of the 2017 ACM on multimedia conference. ACM, pp 46\u201354","DOI":"10.1145\/3123266.3123275"},{"key":"198_CR36","doi-asserted-by":"crossref","unstructured":"Jiang W, Ma L, Chen X, Zhang H, Liu W (2018) Learning to guide decoding for image captioning. arXiv:1804.00887","DOI":"10.1609\/aaai.v32i1.12283"},{"key":"198_CR37","doi-asserted-by":"publisher","first-page":"100","DOI":"10.1016\/j.image.2018.06.002","volume":"67","author":"C Wu","year":"2018","unstructured":"Wu C, Wei Y, Chu X, Su F, Wang L (2018) Modeling visual and word-conditional semantic attention for image captioning. Signal Process Image Commun 67:100\u2013107","journal-title":"Signal Process Image Commun"},{"key":"198_CR38","first-page":"1","volume":"99","author":"K Fu","year":"2018","unstructured":"Fu K, Li J, Jin J, Zhang C (2018) Image-text surgery: efficient concept learning in image captioning by generating pseudopairs. IEEE Trans Neural Netw Learn Syst 99:1\u201312","journal-title":"IEEE Trans Neural Netw Learn Syst"},{"issue":"2","key":"198_CR39","first-page":"48","volume":"14","author":"M Cornia","year":"2018","unstructured":"Cornia M, Baraldi L, Serra G, Cucchiara R (2018) Paying more attention to saliency: image captioning with saliency and context attention. ACM Trans Multimed Comput Commun Appl (TOMM) 14(2):48","journal-title":"ACM Trans Multimed Comput Commun Appl (TOMM)"},{"key":"198_CR40","doi-asserted-by":"crossref","unstructured":"Zhao W, Wang B, Ye J, Yang M, Zhao Z, Luo R, Qiao Y (2018) A Multi-task learning approach for image captioning. In: IJCAI, pp 1205\u20131211","DOI":"10.24963\/ijcai.2018\/168"},{"key":"198_CR41","doi-asserted-by":"crossref","unstructured":"Li X, Wang X, Xu C, Lan W, Wei Q, Yang G, Xu J (2018) COCO-CN for cross-lingual image tagging, captioning and retrieval. arXiv:1805.08661","DOI":"10.1109\/TMM.2019.2896494"},{"key":"198_CR42","doi-asserted-by":"crossref","unstructured":"Chen M, Ding G, Zhao S, Chen H, Liu Q, Han J (2017) Reference based LSTM for image captioning. In: AAAI, pp 3981\u20133987","DOI":"10.1609\/aaai.v31i1.11198"},{"key":"198_CR43","unstructured":"Chen H, Zhang H, Chen PY, Yi J, Hsieh CJ (2017) Show-and-fool: Crafting adversarial examples for neural image captioning. arXiv:1712.02051"},{"issue":"11","key":"198_CR44","doi-asserted-by":"publisher","first-page":"5514","DOI":"10.1109\/TIP.2018.2855406","volume":"27","author":"S Ye","year":"2018","unstructured":"Ye S, Liu N, Han J (2018) Attentive linear transformation for image captioning. IEEE Trans Image Process 27(11):5514\u20135524","journal-title":"IEEE Trans Image Process"},{"key":"198_CR45","doi-asserted-by":"crossref","unstructured":"Wang Y, Lin Z, Shen X, Cohen S, Cottrell GW (2017) Skeleton key: Image captioning by skeleton-attribute decomposition. arXiv:1704.06972","DOI":"10.1109\/CVPR.2017.780"},{"key":"198_CR46","doi-asserted-by":"crossref","unstructured":"Chen T, Zhang Z, You Q, Fang C, Wang Z, Jin H, Luo J (2018) \u201cFactual\u201d or \u201cEmotional\u201d: stylized image captioning with adaptive learning and attention. arXiv:1807.03871","DOI":"10.1007\/978-3-030-01249-6_32"},{"key":"198_CR47","doi-asserted-by":"crossref","unstructured":"Chen F, Ji R, Sun X, Wu Y, Su J (2018) GroupCap: group-based image captioning with structured relevance and diversity constraints. In: Proceedings of the IEEE conference on computer vision and pattern recognition, pp 1345\u20131353","DOI":"10.1109\/CVPR.2018.00146"},{"key":"198_CR48","doi-asserted-by":"crossref","unstructured":"Liu C, Sun F, Wang C, Wang F, Yuille A (2017) MAT: a multimodal attentive translator for image captioning. arXiv:1702.05658","DOI":"10.24963\/ijcai.2017\/563"},{"key":"198_CR49","doi-asserted-by":"crossref","unstructured":"Harzig P, Brehm S, Lienhart R, Kaiser C, Schallner R (2018) Multimodal image captioning for marketing analysis. arXiv:1802.01958","DOI":"10.1109\/MIPR.2018.00035"},{"key":"198_CR50","doi-asserted-by":"crossref","unstructured":"Liu X, Li H, Shao J, Chen D, Wang X (2018) Show, tell and discriminate: image captioning by self-retrieval with partially labeled data. arXiv:1803.08314","DOI":"10.1007\/978-3-030-01267-0_21"},{"key":"198_CR51","unstructured":"Chunseong Park C, Kim B, Kim G (2017) Attend to you: personalized image captioning with context sequence memory networks. In: Proceedings of the IEEE conference on computer vision and pattern recognition, pp 895\u2013903"},{"key":"198_CR52","doi-asserted-by":"crossref","unstructured":"Sharma P, Ding N, Goodman S, Soricut R (2018) Conceptual captions: a cleaned, hypernymed, image alt-text dataset for automatic image captioning. In: Proceedings of the 56th annual meeting of the association for computational linguistics (volume 1: Long Papers), vol 1, pp 2556\u20132565","DOI":"10.18653\/v1\/P18-1238"},{"key":"198_CR53","doi-asserted-by":"crossref","unstructured":"Yao T, Pan Y, Li Y, Mei T (2017) Incorporating copying mechanism in image captioning for learning novel objects. In: 2017 IEEE Conference on computer vision and pattern recognition (CVPR). IEEE, pp 5263\u20135271","DOI":"10.1109\/CVPR.2017.559"},{"key":"198_CR54","unstructured":"Zhang L, Sung F, Liu F, Xiang T, Gong S, Yang Y, Hospedales TM (2017) Actor-critic sequence training for image captioning. arXiv:1706.09601"},{"issue":"12","key":"198_CR55","doi-asserted-by":"publisher","first-page":"2321","DOI":"10.1109\/TPAMI.2016.2642953","volume":"39","author":"K Fu","year":"2017","unstructured":"Fu K, Jin J, Cui R, Sha F, Zhang C (2017) Aligning where to see and what to tell: image captioning with region-based attention and scene-specific contexts. IEEE Trans Pattern Anal Mach Intell 39(12):2321\u20132334","journal-title":"IEEE Trans Pattern Anal Mach Intell"},{"key":"198_CR56","doi-asserted-by":"crossref","unstructured":"Ren Z, Wang X, Zhang N, Lv X, Li LJ (2017) Deep reinforcement learning-based image captioning with embedding reward. arXiv:1704.03899","DOI":"10.1109\/CVPR.2017.128"},{"key":"198_CR57","doi-asserted-by":"crossref","unstructured":"Liu S, Zhu Z, Ye N, Guadarrama S, Murphy K (2017) Improved image captioning via policy gradient optimization of spider. In: Proceedings of the IEEE international conference on computer vision, vol 3, p 3","DOI":"10.1109\/ICCV.2017.100"},{"key":"198_CR58","doi-asserted-by":"crossref","unstructured":"Cohn-Gordon R, Goodman N, Potts C (2018) Pragmatically informative image captioning with character-level reference. arXiv:1804.05417","DOI":"10.18653\/v1\/N18-2070"},{"key":"198_CR59","doi-asserted-by":"crossref","unstructured":"Liu C, Mao J, Sha F, Yuille AL (2017) Attention correctness in neural image captioning. In: AAAI, pp 4176\u20134182","DOI":"10.1609\/aaai.v31i1.11197"},{"issue":"4","key":"198_CR60","doi-asserted-by":"publisher","first-page":"652","DOI":"10.1109\/TPAMI.2016.2587640","volume":"39","author":"O Vinyals","year":"2017","unstructured":"Vinyals O, Toshev A, Bengio S, Erhan D (2017) Show and tell: Lessons learned from the 2015 mscoco image captioning challenge. IEEE Trans Pattern Anal Mach Intell 39(4):652\u2013663","journal-title":"IEEE Trans Pattern Anal Mach Intell"},{"key":"198_CR61","doi-asserted-by":"crossref","unstructured":"Zhang M, Yang Y, Zhang H, Ji Y, Shen HT, Chua TS (2018) More is better: precise and detailed image captioning using online positive recall and missing concepts mining. IEEE Trans Image Process","DOI":"10.1109\/TIP.2018.2855415"},{"issue":"4","key":"198_CR62","doi-asserted-by":"publisher","first-page":"999","DOI":"10.1109\/TPAMI.2018.2824816","volume":"41","author":"CC Park","year":"2018","unstructured":"Park CC, Kim B, Kim G (2018) Towards personalized image captioning via multimodal memory networks. IEEE Trans Pattern Anal Mach Intell 41(4):999\u20131012","journal-title":"IEEE Trans Pattern Anal Mach Intell"},{"key":"198_CR63","doi-asserted-by":"crossref","unstructured":"Wu Q, Shen C, Wang P, Dick A, van den Hengel A (2017) Image captioning and visual question answering based on attributes and external knowledge. IEEE Trans Pattern Anal Mach Intell","DOI":"10.1109\/TPAMI.2017.2708709"},{"key":"198_CR64","doi-asserted-by":"crossref","unstructured":"Gan C et al (2017) Stylenet: generating attractive visual captions with styles. In: CVPR","DOI":"10.1109\/CVPR.2017.108"},{"key":"198_CR65","unstructured":"Jin J et al (2015) Aligning where to see and what to tell: image caption with region-based attention and scene factorization. arXiv:1506.06272"},{"key":"198_CR66","unstructured":"Kiros R, Salakhutdinov R, Zemel RS (2014) Unifying visual-semantic embeddings with multimodal neural language models. arXiv:1411.2539"},{"key":"198_CR67","unstructured":"Pu Y et al (2016) Variational autoencoder for deep learning of images, labels and captions. In: Advances in neural information processing systems"},{"key":"198_CR68","doi-asserted-by":"publisher","first-page":"207","DOI":"10.1162\/tacl_a_00177","volume":"2","author":"R Socher","year":"2014","unstructured":"Socher R et al (2014) Grounded compositional semantics for finding and describing images with sentences. Trans Assoc Comput Linguist 2:207\u2013218","journal-title":"Trans Assoc Comput Linguist"},{"key":"198_CR69","unstructured":"Sutskever I, Martens J, Hinton GE (2011) Generating text with recurrent neural networks. In: Proceedings of the 28th International conference on machine learning (ICML-11)"},{"key":"198_CR70","unstructured":"Sutskever I, Vinyals O, Le QV (2014) Sequence to sequence learning with neural networks. In: Advances in neural information processing systems"},{"key":"198_CR71","doi-asserted-by":"crossref","unstructured":"LTran D et al (2015) Learning spatiotemporal features with 3d convolutional networks. In: Proceedings of the IEEE international conference on computer vision","DOI":"10.1109\/ICCV.2015.510"},{"key":"198_CR72","doi-asserted-by":"crossref","unstructured":"Tran K et al (2016) Rich image captioning in the wild. In: Proceedings of the IEEE conference on computer vision and pattern recognition workshops","DOI":"10.1109\/CVPRW.2016.61"},{"key":"198_CR73","doi-asserted-by":"crossref","unstructured":"Girshick R et al (2014) Rich feature hierarchies for accurate object detection and semantic segmentation. In: Proceedings of the IEEE conference on computer vision and pattern recognition","DOI":"10.1109\/CVPR.2014.81"},{"key":"198_CR74","doi-asserted-by":"crossref","unstructured":"Jia X et al (2015) Guiding the long-short term memory model for image caption generation. In: Proceedings of the IEEE International Conference on Computer Vision","DOI":"10.1109\/ICCV.2015.277"},{"issue":"12","key":"198_CR75","doi-asserted-by":"publisher","first-page":"2891","DOI":"10.1109\/TPAMI.2012.162","volume":"35","author":"G Kulkarni","year":"2013","unstructured":"Kulkarni G et al (2013) Babytalk: understanding and generating simple image descriptions. IEEE Trans Pattern Anal Mach Intell 35(12):2891\u20132903","journal-title":"IEEE Trans Pattern Anal Mach Intell"},{"issue":"10","key":"198_CR76","doi-asserted-by":"publisher","first-page":"351","DOI":"10.1162\/tacl_a_00188","volume":"2","author":"P Kuznetsova","year":"2014","unstructured":"Kuznetsova P et al (2014) TREETALK: composition and compression of trees for image descriptions. TACL 2(10):351\u2013362","journal-title":"TACL"},{"key":"198_CR77","doi-asserted-by":"crossref","unstructured":"Mao J et al (2015) Learning like a child: fast novel visual concept learning from sentence descriptions of images. In: Proceedings of the IEEE international conference on computer vision","DOI":"10.1109\/ICCV.2015.291"},{"key":"198_CR78","doi-asserted-by":"crossref","unstructured":"Mathews AP, Xie L, He X (2016) SentiCap: generating image descriptions with sentiments. In: AAAI","DOI":"10.1609\/aaai.v30i1.10475"},{"key":"198_CR79","unstructured":"Yang Y et al (2011) Corpus-guided sentence generation of natural images. In: Proceedings of the Conference on Empirical Methods in Natural Language Processing. Association for Computational Linguistics"},{"key":"198_CR80","doi-asserted-by":"crossref","unstructured":"Donahue J et al (2015) Long-term recurrent convolutional networks for visual recognition and description. In: Proceedings of the IEEE conference on computer vision and pattern recognition","DOI":"10.1109\/CVPR.2015.7298878"},{"key":"198_CR81","doi-asserted-by":"crossref","unstructured":"Fang H et al (2015) From captions to visual concepts and back. In: Proceedings of the IEEE conference on computer vision and pattern recognition","DOI":"10.1109\/CVPR.2015.7298754"},{"issue":"2s","key":"198_CR82","first-page":"40","volume":"14","author":"C Wang","year":"2018","unstructured":"Wang C, Yang H, Meinel C (2018) Image captioning with deep bidirectional LSTMs and multi-task learning. ACM Trans Multimed Comput Commun Appl (TOMM) 14(2s):40","journal-title":"ACM Trans Multimed Comput Commun Appl (TOMM)"},{"key":"198_CR83","unstructured":"Kiros R, Salakhutdinov R, Zemel R (2014) Multimodal neural language models. In: International conference on machine learning, pp 595\u2013603"},{"key":"198_CR84","unstructured":"Yang Z, Yuan Y, Wu Y, Salakhutdinov R, Cohen WW (2016) Encode, review, and decode: reviewer module for caption generation. arXiv:1605.07912"},{"issue":"4","key":"198_CR85","doi-asserted-by":"publisher","first-page":"689","DOI":"10.1007\/s12065-019-00278-7","volume":"12","author":"C Sur","year":"2019","unstructured":"Sur C (2019) UCRLF: unified constrained reinforcement learning framework for phase-aware architectures for autonomous vehicle signaling and trajectory optimization. Evol Intel 12(4):689\u2013712","journal-title":"Evol Intel"}],"container-title":["International Journal of Multimedia Information Retrieval"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s13735-020-00198-8.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s13735-020-00198-8\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s13735-020-00198-8.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,11,24]],"date-time":"2022-11-24T21:02:34Z","timestamp":1669323754000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s13735-020-00198-8"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2020,10,26]]},"references-count":85,"journal-issue":{"issue":"4","published-print":{"date-parts":[[2020,12]]}},"alternative-id":["198"],"URL":"https:\/\/doi.org\/10.1007\/s13735-020-00198-8","relation":{},"ISSN":["2192-6611","2192-662X"],"issn-type":[{"type":"print","value":"2192-6611"},{"type":"electronic","value":"2192-662X"}],"subject":[],"published":{"date-parts":[[2020,10,26]]},"assertion":[{"value":"7 April 2020","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"21 September 2020","order":2,"name":"revised","label":"Revised","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"5 October 2020","order":3,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"26 October 2020","order":4,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}}]}}