{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,31]],"date-time":"2026-03-31T16:40:22Z","timestamp":1774975222545,"version":"3.50.1"},"reference-count":85,"publisher":"Springer Science and Business Media LLC","issue":"12","license":[{"start":{"date-parts":[[2021,2,18]],"date-time":"2021-02-18T00:00:00Z","timestamp":1613606400000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"},{"start":{"date-parts":[[2021,2,18]],"date-time":"2021-02-18T00:00:00Z","timestamp":1613606400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Multimed Tools Appl"],"published-print":{"date-parts":[[2021,5]]},"DOI":"10.1007\/s11042-021-10578-9","type":"journal-article","created":{"date-parts":[[2021,2,19]],"date-time":"2021-02-19T18:49:03Z","timestamp":1613760543000},"page":"18413-18443","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":18,"title":["MRRC: multiple role representation crossover interpretation for image captioning with R-CNN feature distribution composition (FDC)"],"prefix":"10.1007","volume":"80","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-1563-9304","authenticated-orcid":false,"given":"Chiranjib","family":"Sur","sequence":"first","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2021,2,18]]},"reference":[{"key":"10578_CR1","doi-asserted-by":"crossref","unstructured":"Anderson P, He X, Buehler C, Teney D, Johnson M, Gould S, Zhang L (2018) Bottom-up and top-down attention for image captioning and visual question answering. In: CVPR, vol 3, no 5, p 6","DOI":"10.1109\/CVPR.2018.00636"},{"key":"10578_CR2","doi-asserted-by":"crossref","unstructured":"Chen H, Ding G, Lin Z, Zhao S, Han J (2018) Show, observe and tell: Attribute-driven attention model for image captioning. In: IJCAI, pp. 606\u2013612","DOI":"10.24963\/ijcai.2018\/84"},{"key":"10578_CR3","doi-asserted-by":"crossref","unstructured":"Chen M, Ding G, Zhao S, Chen H, Liu Q, Han J (2017) Reference based LSTM for image captioning. In: AAAI, pp 3981\u20133987","DOI":"10.1609\/aaai.v31i1.11198"},{"key":"10578_CR4","doi-asserted-by":"crossref","unstructured":"Chen F, Ji R, Su J, Wu Y, Wu Y (2017) Structcap: Structured semantic embedding for image captioning. In: Proceedings of the 2017 ACM on Multimedia Conference (pp. 46\u201354). ACM","DOI":"10.1145\/3123266.3123275"},{"key":"10578_CR5","doi-asserted-by":"crossref","unstructured":"Chen F, Ji R, Sun X, Wu Y, Su J (2018) Groupcap: Group-based image captioning with structured relevance and diversity constraints. In: Proceedings of the IEEE conference on computer vision and pattern recognition, pp 1345\u20131353","DOI":"10.1109\/CVPR.2018.00146"},{"key":"10578_CR6","doi-asserted-by":"crossref","unstructured":"Chen Xinlei, Lawrence Zitnick C (2015) \u201cMind\u2019s eye: A recurrent visual representation for image caption generation.\u201d Proceedings of the IEEE conference on computer vision and pattern recognition","DOI":"10.1109\/CVPR.2015.7298856"},{"key":"10578_CR7","unstructured":"Chen H, Zhang H, Chen PY, Yi J, Hsieh CJ (2017) Show-and-fool: Crafting adversarial examples for neural image captioning. arXiv:1712.02051"},{"key":"10578_CR8","doi-asserted-by":"crossref","unstructured":"Chen T, Zhang Z, You Q, Fang C, Wang Z, Jin H, Luo J (2018) \u201cFactual\u201d or \u201cEmotional\u201d: Stylized image captioning with adaptive learning and attention. arXiv:1807.03871","DOI":"10.1007\/978-3-030-01249-6_32"},{"key":"10578_CR9","unstructured":"Chunseong Park C, Kim B, Kim G (2017) Attend to you: Personalized image captioning with context sequence memory networks. In: Proceedings of the IEEE conference on computer vision and pattern recognition, pp 895\u2013903"},{"key":"10578_CR10","doi-asserted-by":"crossref","unstructured":"Cohn-Gordon R, Goodman N, Potts C (2018) Pragmatically informative image captioning with character-level reference. arXiv:1804.05417","DOI":"10.18653\/v1\/N18-2070"},{"issue":"2","key":"10578_CR11","first-page":"48","volume":"14","author":"M Cornia","year":"2018","unstructured":"Cornia M, Baraldi L, Serra G, Cucchiara R (2018) Paying more attention to saliency: Image captioning with saliency and context attention. ACM Transactions on Multimedia Computing, Communications, and Applications (TOMM) 14(2):48","journal-title":"ACM Transactions on Multimedia Computing, Communications, and Applications (TOMM)"},{"key":"10578_CR12","unstructured":"Devlin J, Gupta S, Girshick R, Mitchell M, Zitnick CL (2015) Exploring nearest neighbor approaches for image captioning. arXiv preprint arXiv:1505.04467"},{"key":"10578_CR13","doi-asserted-by":"crossref","unstructured":"Devlin J, et al. (2015) \u201cLanguage models for image captioning: The quirks and what works\u201d. arXiv:1505.01809","DOI":"10.3115\/v1\/P15-2017"},{"key":"10578_CR14","doi-asserted-by":"crossref","unstructured":"Donahue J, et al. (2015) \u201cLong-term recurrent convolutional networks for visual recognition and description.\u201d Proceedings of the IEEE conference on computer vision and pattern recognition","DOI":"10.1109\/CVPR.2015.7298878"},{"key":"10578_CR15","doi-asserted-by":"crossref","unstructured":"Fang Hao, et al. (2015) \u201cFrom captions to visual concepts and back.\u201d Proceedings of the IEEE conference on computer vision and pattern recognition","DOI":"10.1109\/CVPR.2015.7298754"},{"issue":"12","key":"10578_CR16","doi-asserted-by":"publisher","first-page":"2321","DOI":"10.1109\/TPAMI.2016.2642953","volume":"39","author":"K Fu","year":"2017","unstructured":"Fu K, Jin J, Cui R, Sha F, Zhang C (2017) Aligning where to see and what to tell: Image captioning with region-based attention and scene-specific contexts. IEEE transactions on pattern analysis and machine intelligence 39(12):2321\u20132334","journal-title":"IEEE transactions on pattern analysis and machine intelligence"},{"key":"10578_CR17","doi-asserted-by":"crossref","unstructured":"Fu K, Li J, Jin J, Zhang C (2018) Image-text surgery: efficient concept learning in image captioning by generating Pseudopairs. IEEE Transactions on Neural Networks and Learning Systems, (99), pp 1\u201312","DOI":"10.1109\/TNNLS.2018.2813306"},{"key":"10578_CR18","doi-asserted-by":"crossref","unstructured":"Gan Z, et al. (2016) \u201cSemantic compositional networks for visual captioning\u201d. arXiv:1611.08002","DOI":"10.1109\/CVPR.2017.127"},{"key":"10578_CR19","doi-asserted-by":"crossref","unstructured":"Gan C, et al. (2017) \u201cStylenet: Generating attractive visual captions with styles.\u201d CVPR","DOI":"10.1109\/CVPR.2017.108"},{"key":"10578_CR20","doi-asserted-by":"crossref","unstructured":"Girshick R, et al. (2014) \u201cRich feature hierarchies for accurate object detection and semantic segmentation.\u201d Proceedings of the IEEE conference on computer vision and pattern recognition","DOI":"10.1109\/CVPR.2014.81"},{"key":"10578_CR21","doi-asserted-by":"crossref","unstructured":"Harzig P, Brehm S, Lienhart R, Kaiser C, Schallner R (2018) Multimodal image captioning for marketing analysis. arXiv:1802.01958","DOI":"10.1109\/MIPR.2018.00035"},{"key":"10578_CR22","doi-asserted-by":"crossref","unstructured":"Jia X, et al. (2015) \u201cGuiding the long-short term memory model for image caption generation.\u201d Proceedings of the IEEE International Conference on Computer Vision","DOI":"10.1109\/ICCV.2015.277"},{"key":"10578_CR23","doi-asserted-by":"crossref","unstructured":"Jiang W, Ma L, Chen X, Zhang H, Liu W (2018) Learning to guide decoding for image captioning. arXiv:1804.00887","DOI":"10.1609\/aaai.v32i1.12283"},{"key":"10578_CR24","unstructured":"Jin J, et al. (2015) \u201cAligning where to see and what to tell: image caption with region-based attention and scene factorization. arXiv:1506.06272"},{"key":"10578_CR25","doi-asserted-by":"crossref","unstructured":"Karpathy A, Li F-F (2015) \u201cDeep visual-semantic alignments for generating image descriptions.\u201d Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition","DOI":"10.1109\/CVPR.2015.7298932"},{"key":"10578_CR26","unstructured":"Kiros R, Salakhutdinov R, Zemel R (2014) Multimodal neural language models. In: International conference on machine learning, pp 595\u2013603"},{"key":"10578_CR27","unstructured":"Kiros R, Salakhutdinov R, Zemel RS (2014) \u201cUnifying visual-semantic embeddings with multimodal neural language models\u201d. arXiv:1411.2539"},{"issue":"12","key":"10578_CR28","doi-asserted-by":"publisher","first-page":"2891","DOI":"10.1109\/TPAMI.2012.162","volume":"35","author":"G Kulkarni","year":"2013","unstructured":"Kulkarni G, et al. (2013) \u201cBabytalk: Understanding and generating simple image descriptions\u201d. IEEE Transactions on Pattern Analysis and Machine Intelligence 35(12):2891\u20132903","journal-title":"IEEE Transactions on Pattern Analysis and Machine Intelligence"},{"issue":"10","key":"10578_CR29","doi-asserted-by":"publisher","first-page":"351","DOI":"10.1162\/tacl_a_00188","volume":"2","author":"P Kuznetsova","year":"2014","unstructured":"Kuznetsova P, et al. (2014) \u201cTREETALK: Composition and Compression of Trees for Image Descriptions\u201d. TACL 2(10):351\u2013362","journal-title":"TACL"},{"key":"10578_CR30","doi-asserted-by":"crossref","unstructured":"LTran D, et al. (2015) \u201cLearning spatiotemporal features with 3d convolutional networks.\u201d Proceedings of the IEEE international conference on computer vision","DOI":"10.1109\/ICCV.2015.510"},{"key":"10578_CR31","doi-asserted-by":"crossref","unstructured":"Li X, Wang X, Xu C, Lan W, Wei Q, Yang G, Xu J (2018) COCO-CN for Cross-Lingual Image Tagging, Captioning and Retrieval. In: arXiv:1805.08661","DOI":"10.1109\/TMM.2019.2896494"},{"key":"10578_CR32","doi-asserted-by":"crossref","unstructured":"Liu X, Li H, Shao J, Chen D, Wang X (2018) Show, tell and discriminate: Image captioning by self-retrieval with partially labeled data. arXiv:1803.08314","DOI":"10.1007\/978-3-030-01267-0_21"},{"key":"10578_CR33","doi-asserted-by":"crossref","unstructured":"Liu C, Mao J, Sha F, Yuille AL (2017) Attention correctness in neural image captioning. In: AAAI, pp 4176\u20134182","DOI":"10.1609\/aaai.v31i1.11197"},{"key":"10578_CR34","doi-asserted-by":"crossref","unstructured":"Liu C, Sun F, Wang C, Wang F, Yuille A (2017) MAT: A multimodal attentive translator for image captioning. arXiv:1702.05658","DOI":"10.24963\/ijcai.2017\/563"},{"key":"10578_CR35","doi-asserted-by":"crossref","unstructured":"Liu S, Zhu Z, Ye N, Guadarrama S, Murphy K (2017) Improved image captioning via policy gradient optimization of spider. In: Proc. IEEE Int. Conf. Comp. Vis, vol 3, p 3","DOI":"10.1109\/ICCV.2017.100"},{"key":"10578_CR36","doi-asserted-by":"crossref","unstructured":"Lu D, Whitehead S, Huang L, Ji H, Chang SF (2018) Entity-aware Image Caption Generation. arXiv:1804.07889","DOI":"10.18653\/v1\/D18-1435"},{"key":"10578_CR37","doi-asserted-by":"crossref","unstructured":"Lu J, Xiong C, Socher R, Parikh D (2017) Knowing when to look: Adaptive attention via a visual sentinel for image captioning. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), vol 6, p 2","DOI":"10.1109\/CVPR.2017.345"},{"key":"10578_CR38","doi-asserted-by":"crossref","unstructured":"Lu J, Yang J, Batra D, Parikh D (2018) Neural baby talk. In: Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 7219\u20137228","DOI":"10.1109\/CVPR.2018.00754"},{"key":"10578_CR39","unstructured":"Mao J, et al. (2014) \u201cDeep captioning with multimodal recurrent neural networks (m-rnn)\u201d. arXiv:1412.6632"},{"key":"10578_CR40","doi-asserted-by":"crossref","unstructured":"Mao J, et al. (2015) \u201cLearning like a child: Fast novel visual concept learning from sentence descriptions of images.\u201d Proceedings of the IEEE International Conference on Computer Vision","DOI":"10.1109\/ICCV.2015.291"},{"key":"10578_CR41","doi-asserted-by":"crossref","unstructured":"Mathews AP, Xie L, He X (2016) \u201cSenticap: Generating Image Descriptions with Sentiments.\u201d AAAI","DOI":"10.1609\/aaai.v30i1.10475"},{"key":"10578_CR42","unstructured":"Melnyk I, Sercu T, Dognin PL, Ross J, Mroueh Y (2018) Improved image captioning with adversarial semantic alignment. arXiv:1805.00063"},{"key":"10578_CR43","doi-asserted-by":"crossref","unstructured":"Park CC, Kim G (2018) Towards personalized image captioning via Multimodal memory networks. IEEE Transactions on Pattern Analysis and Machine Intelligence","DOI":"10.1109\/TPAMI.2018.2824816"},{"key":"10578_CR44","unstructured":"Pu Yunchen, et al. (2016) \u201cVariational autoencoder for deep learning of images, labels and captions.\u201d Advances in Neural Information Processing Systems"},{"key":"10578_CR45","unstructured":"Ren S, He K, Girshick R, Sun J (2015) Faster r-cnn: Towards real-time object detection with region proposal networks. In: Advances in neural information processing systems, pp 91\u201399"},{"key":"10578_CR46","doi-asserted-by":"crossref","unstructured":"Ren Z, Wang X, Zhang N, Lv X, Li LJ (2017) Deep reinforcement learning-based image captioning with embedding reward. arXiv:1704.03899","DOI":"10.1109\/CVPR.2017.128"},{"key":"10578_CR47","doi-asserted-by":"crossref","unstructured":"Rennie SJ, Marcheret E, Mroueh Y, Ross J, Goel V (2017) Self-critical sequence training for image captioning. In: CVPR, vol 1, no 2, p 3","DOI":"10.1109\/CVPR.2017.131"},{"key":"10578_CR48","doi-asserted-by":"crossref","unstructured":"Sharma P, Ding N, Goodman S, Soricut R (2018) Conceptual Captions: A cleaned, hypernymed, image alt-text dataset for automatic image captioning. In: Proceedings of the 56th annual meeting of the association for computational linguistics (Volume 1: Long Papers), vol 1, pp 2556\u20132565","DOI":"10.18653\/v1\/P18-1238"},{"key":"10578_CR49","doi-asserted-by":"publisher","first-page":"207","DOI":"10.1162\/tacl_a_00177","volume":"2","author":"Richard Socher","year":"2014","unstructured":"Socher Richard, et al. (2014) Grounded compositional semantics for finding and describing images with sentences. Transactions of the Association for Computational Linguistics 2:207\u2013218","journal-title":"Transactions of the Association for Computational Linguistics"},{"key":"10578_CR50","unstructured":"Sur C (2018) \u201cFeature fusion effects of tensor product representation on (De) compositional network for caption generation for images\u201d. arXiv:1812.06624"},{"key":"10578_CR51","unstructured":"Sur C (2018) Representation for Language Understanding. Gainesville: University of Florida, pp. 1\u201390. Available at: https:\/\/drive.google.com\/file\/d\/15Fhmt5aM_b0J5jtE9mdWInQPfDS3TqVw"},{"key":"10578_CR52","doi-asserted-by":"crossref","unstructured":"Sur C (2019) Survey of deep learning and architectures for visual captioning\u2014transitioning between media and natural languages. Multimedia Tools and Applications, pp 1\u201351","DOI":"10.1007\/s11042-019-08021-1"},{"issue":"4","key":"10578_CR53","doi-asserted-by":"publisher","first-page":"689","DOI":"10.1007\/s12065-019-00278-7","volume":"12","author":"C Sur","year":"2019","unstructured":"Sur C (2019) \u201cUCRLF: unified constrained reinforcement learning framework for phase-aware architectures for autonomous vehicle signaling and trajectory optimization\u201d. Evol Intel 12(4):689\u2013712","journal-title":"Evol Intel"},{"key":"10578_CR54","doi-asserted-by":"crossref","unstructured":"Sur C (2019) \u201cCRUR: Coupled-recurrent unit for unification. Conceptualization and context capture for language representation\u2013a generalization of BI directional LSTM\u201d. arXiv:1911.10132","DOI":"10.1007\/s11042-020-09865-8"},{"key":"10578_CR55","unstructured":"Sur C (2019) \u201cTpsgtr: Neural-symbolic tensor product scene-graph-triplet representation for image captioning\u201d. arXiv:1911.10115"},{"key":"10578_CR56","unstructured":"Sur C (2020) \u201cSACT:, self-aware multi-space feature composition transformer for multinomial attention for video captioning\u201d. arXiv:2006.14262"},{"key":"10578_CR57","unstructured":"Sur C (2020) \u201cSelf-segregating and coordinated-segregating transformer for focused deep multi-modular network for visual question answering\u201d. arXiv:2006.14264"},{"key":"10578_CR58","unstructured":"Sur C (2020) \u201cReLGAN: generalization of consistency for GAN with disjoint constraints and relative learning of generative processes for multiple transformation learning\u201d. arXiv:2006.07809"},{"key":"10578_CR59","doi-asserted-by":"publisher","unstructured":"Sur C (2020) AACR: Feature fusion effects of algebraic amalgamation composed representation on (De)Compositional network for caption generation for images, vol 1. https:\/\/doi.org\/10.1007\/s42979-020-00238-4","DOI":"10.1007\/s42979-020-00238-4"},{"key":"10578_CR60","doi-asserted-by":"publisher","first-page":"228","DOI":"10.1007\/s42979-020-00234-8","volume":"1","author":"C Sur","year":"2020","unstructured":"Sur C (2020) GenAtSeq GAN with heuristic reforms for knowledge centric network with browsing characteristics learning, individual tracking and malware detection with website2Vec. SN COMPUT. SCI. 1:228. https:\/\/doi.org\/10.1007\/s42979-020-00234-8","journal-title":"SN COMPUT. SCI."},{"key":"10578_CR61","unstructured":"Sur C (2020) \u201cGaussian smoothen semantic features (GSSF)\u2013exploring the linguistic aspects of visual captioning in indian languages (Bengali) using MSCOCO framework\u201d. arXiv:2002.06701"},{"key":"10578_CR62","doi-asserted-by":"crossref","unstructured":"Sur C (2020) \u201caiTPR: attribute interaction-tensor product representation for image caption\u201d. arXiv:2001.09545","DOI":"10.1007\/s11063-021-10438-5"},{"issue":"1","key":"10578_CR63","doi-asserted-by":"publisher","first-page":"22","DOI":"10.1007\/s42452-019-1765-9","volume":"2","author":"C Sur","year":"2020","unstructured":"Sur C (2020) \u201cRBN: enhancement in language attribute prediction using global representation of natural language transfer learning technology like Google BERT\u201d. SN Applied Sciences 2(1):22","journal-title":"SN Applied Sciences"},{"key":"10578_CR64","doi-asserted-by":"crossref","unstructured":"Sur C, Liu P, Zhou Y, Wu D (2019) \u201cSemantic Tensor Product for Image Captioning\u201d. In: 2019 5th international conference on big data computing and communications (BIGCOM), pp 33\u201337. IEEE","DOI":"10.1109\/BIGCOM.2019.00013"},{"key":"10578_CR65","unstructured":"Sutskever I, Martens J, Hinton GE (2011) \u201cGenerating text with recurrent neural networks.\u201d Proceedings of the 28th International Conference on Machine Learning (ICML-11)"},{"key":"10578_CR66","unstructured":"Sutskever I, Vinyals O, Le QV (2014) \u201cSequence to sequence learning with neural networks.\u201d Advances in neural information processing systems"},{"key":"10578_CR67","doi-asserted-by":"crossref","unstructured":"Tran K, et al. (2016) \u201cRich image captioning in the wild.\u201d Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition Workshops","DOI":"10.1109\/CVPRW.2016.61"},{"issue":"4","key":"10578_CR68","doi-asserted-by":"publisher","first-page":"652","DOI":"10.1109\/TPAMI.2016.2587640","volume":"39","author":"O Vinyals","year":"2017","unstructured":"Vinyals O, Toshev A, Bengio S, Erhan D (2017) Show and tell: Lessons learned from the 2015 mscoco image captioning challenge. IEEE transactions on pattern analysis and machine intelligence 39(4):652\u2013663","journal-title":"IEEE transactions on pattern analysis and machine intelligence"},{"key":"10578_CR69","doi-asserted-by":"crossref","unstructured":"Vinyals Oriol, et al. (2015) \u201cShow and tell: A neural image caption generator.\u201d Proceedings of the IEEE conference on computer vision and pattern recognition","DOI":"10.1109\/CVPR.2015.7298935"},{"key":"10578_CR70","doi-asserted-by":"crossref","unstructured":"Wang Y, Lin Z, Shen X, Cohen S, Cottrell GW (2017) Skeleton key: Image captioning by skeleton-attribute decomposition. arXiv:1704.06972","DOI":"10.1109\/CVPR.2017.780"},{"key":"10578_CR71","first-page":"40","volume":"14.2s","author":"C Wang","year":"2018","unstructured":"Wang C, Yang H, Meinel C (2018) Image captioning with deep bidirectional LSTMs and multi-task learning. ACM Transactions on Multimedia Computing, Communications, and Applications (TOMM) 14.2s:40","journal-title":"ACM Transactions on Multimedia Computing, Communications, and Applications (TOMM)"},{"key":"10578_CR72","unstructured":"Wu J, Hu Z, Mooney RJ (2018) Joint image captioning and question answering. arXiv:1805.08389"},{"key":"10578_CR73","doi-asserted-by":"crossref","unstructured":"Wu Q, Shen C, Wang P, Dick A, van den Hengel A (2017) Image captioning and visual question answering based on attributes and external knowledge. IEEE transactions on pattern analysis and machine intelligence","DOI":"10.1109\/TPAMI.2017.2708709"},{"key":"10578_CR74","doi-asserted-by":"crossref","unstructured":"Wu C, Wei Y, Chu X, Su F, Wang L (2018) Modeling visual and word-conditional semantic attention for image captioning. Signal Processing: Image Communication","DOI":"10.1016\/j.image.2018.06.002"},{"key":"10578_CR75","unstructured":"Xu Kelvin, et al. (2015) \u201cShow, attend and tell: Neural image caption generation with visual attention.\u201d International conference on machine learning"},{"key":"10578_CR76","unstructured":"Yang Z, Yuan Y, Wu Y, Salakhutdinov R, Cohen WW (2016) Encode, review, and decode: Reviewer module for caption generation. arXiv. arXiv:1605.07912"},{"key":"10578_CR77","unstructured":"Yang Y, et al. (2011) \u201cCorpus-guided sentence generation of natural images.\u201d Proceedings of the Conference on Empirical Methods in Natural Language Processing. Association for Computational Linguistics"},{"key":"10578_CR78","doi-asserted-by":"crossref","unstructured":"Yao T, Pan Y, Li Y, Mei T (2017) Incorporating copying mechanism in image captioning for learning novel objects. In: 2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (pp. 5263\u20135271). IEEE","DOI":"10.1109\/CVPR.2017.559"},{"key":"10578_CR79","doi-asserted-by":"crossref","unstructured":"Yao T, Pan Y, Li Y, Qiu Z, Mei T (2017) Boosting image captioning with attributes. In: IEEE International conference on computer vision, ICCV, pp 22\u201329","DOI":"10.1109\/ICCV.2017.524"},{"key":"10578_CR80","doi-asserted-by":"crossref","unstructured":"Ye S, Han J (2018) Attentive linear transformation for image captioning. IEEE Transactions on Image Processing","DOI":"10.1109\/TIP.2018.2855406"},{"key":"10578_CR81","unstructured":"You Q, Jin H, Luo J (2018) Image captioning at will: A versatile scheme for effectively injecting sentiments into image descriptions. arXiv:1801.10121"},{"key":"10578_CR82","doi-asserted-by":"crossref","unstructured":"You Q, et al. (2016) \u201cImage captioning with semantic attention.\u201d Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition","DOI":"10.1109\/CVPR.2016.503"},{"key":"10578_CR83","unstructured":"Zhang L, Sung F, Liu F, Xiang T, Gong S, Yang Y, Hospedales TM (2017) Actor-critic sequence training for image captioning. arXiv:1706.09601"},{"key":"10578_CR84","doi-asserted-by":"crossref","unstructured":"Zhang M, Yang Y, Zhang H, Ji Y, Shen HT, Chua TS (2018) More is Better: precise and detailed image captioning using online positive recall and missing concepts mining. IEEE Transactions on Image Processing","DOI":"10.1109\/TIP.2018.2855415"},{"key":"10578_CR85","doi-asserted-by":"crossref","unstructured":"Zhao W, Wang B, Ye J, Yang M, Zhao Z, Luo R, Qiao Y (2018) A multi-task learning approach for image captioning. In: IJCAI, pp. 1205\u20131211","DOI":"10.24963\/ijcai.2018\/168"}],"container-title":["Multimedia Tools and Applications"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11042-021-10578-9.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s11042-021-10578-9\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11042-021-10578-9.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,12,18]],"date-time":"2022-12-18T06:57:20Z","timestamp":1671346640000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s11042-021-10578-9"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2021,2,18]]},"references-count":85,"journal-issue":{"issue":"12","published-print":{"date-parts":[[2021,5]]}},"alternative-id":["10578"],"URL":"https:\/\/doi.org\/10.1007\/s11042-021-10578-9","relation":{},"ISSN":["1380-7501","1573-7721"],"issn-type":[{"value":"1380-7501","type":"print"},{"value":"1573-7721","type":"electronic"}],"subject":[],"published":{"date-parts":[[2021,2,18]]},"assertion":[{"value":"11 April 2020","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"29 September 2020","order":2,"name":"revised","label":"Revised","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"20 January 2021","order":3,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"18 February 2021","order":4,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}}]}}