{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,29]],"date-time":"2026-01-29T13:44:36Z","timestamp":1769694276727,"version":"3.49.0"},"reference-count":46,"publisher":"Springer Science and Business Media LLC","issue":"2","license":[{"start":{"date-parts":[[2019,2,18]],"date-time":"2019-02-18T00:00:00Z","timestamp":1550448000000},"content-version":"tdm","delay-in-days":0,"URL":"http:\/\/www.springer.com\/tdm"},{"start":{"date-parts":[[2019,2,18]],"date-time":"2019-02-18T00:00:00Z","timestamp":1550448000000},"content-version":"vor","delay-in-days":0,"URL":"http:\/\/www.springer.com\/tdm"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["61772359"],"award-info":[{"award-number":["61772359"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["61472275"],"award-info":[{"award-number":["61472275"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["61572356"],"award-info":[{"award-number":["61572356"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100010905","name":"Major Research Plan","doi-asserted-by":"publisher","award":["61872267"],"award-info":[{"award-number":["61872267"]}],"id":[{"id":"10.13039\/501100010905","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Neural Process Lett"],"published-print":{"date-parts":[[2020,10]]},"DOI":"10.1007\/s11063-019-09997-5","type":"journal-article","created":{"date-parts":[[2019,2,18]],"date-time":"2019-02-18T16:46:24Z","timestamp":1550508384000},"page":"1057-1067","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":17,"title":["Hierarchical Deep Neural Network for Image Captioning"],"prefix":"10.1007","volume":"52","author":[{"given":"Yuting","family":"Su","sequence":"first","affiliation":[]},{"given":"Yuqian","family":"Li","sequence":"additional","affiliation":[]},{"given":"Ning","family":"Xu","sequence":"additional","affiliation":[]},{"given":"An-An","family":"Liu","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2019,2,18]]},"reference":[{"key":"9997_CR1","unstructured":"Anderson P, He X, Buehler C, Teney D, Johnson M, Gould S, Zhang L (2017) Bottom-up and top-down attention for image captioning and VQA. arXiv preprint arXiv:1707.07998"},{"key":"9997_CR2","unstructured":"Banerjee S, Lavie A (2005) Meteor: an automatic metric for MT evaluation with improved correlation with human judgments. In: IEEvaluation@ACL, pp 65\u201372"},{"key":"9997_CR3","doi-asserted-by":"crossref","unstructured":"Chen J, Zhang H, He X, Nie L, Liu W, Chua TS (2017) Attentive collaborative filtering: multimedia recommendation with item-and component-level attention. In: ACM SIGIR, ACM, pp 335\u2013344","DOI":"10.1145\/3077136.3080797"},{"key":"9997_CR4","doi-asserted-by":"crossref","unstructured":"Chen L, Zhang H, Xiao J, Nie L, Shao J, Liu W, Chua TS (2017) Sca-cnn: Spatial and channel-wise attention in convolutional networks for image captioning. In: CVPR, IEEE, pp 6298\u20136306","DOI":"10.1109\/CVPR.2017.667"},{"key":"9997_CR5","doi-asserted-by":"crossref","unstructured":"Chen X, Ma L, Jiang W, Yao J, Liu W (2018) Regularizing RNNs for caption generation by reconstructing the past with the present. arXiv preprint arXiv:1803.11439","DOI":"10.1109\/CVPR.2018.00834"},{"key":"9997_CR6","doi-asserted-by":"crossref","unstructured":"Farhadi A, Hejrati M, Sadeghi MA, Young P, Rashtchian C, Hockenmaier J, Forsyth D (2010) Every picture tells a story: generating sentences from images. In: ECCV, Springer, Berlin, pp 15\u201329","DOI":"10.1007\/978-3-642-15561-1_2"},{"key":"9997_CR7","doi-asserted-by":"publisher","unstructured":"Xu N, Liu AA, Wong YK, Zhang YD, Nie WZ, Su YT, Kankanhalli M (2019) Dual-stream recurrent neural network for video captioning. IEEE Trans Circuits Syst Video Technol. https:\/\/doi.org\/10.1109\/TCSVT.2018.2867286","DOI":"10.1109\/TCSVT.2018.2867286"},{"key":"9997_CR8","doi-asserted-by":"crossref","unstructured":"Liu AA, Xu N, Zhang HW, Nie WZ, Su YT, Zhang YD (2018) Multi-level policy and reward reinforcement learning for image captioning. In: IJCAI, pp 821\u2013827","DOI":"10.24963\/ijcai.2018\/114"},{"key":"9997_CR9","doi-asserted-by":"crossref","unstructured":"Girshick R (2015) Fast r-cnn. In: ICCV, pp 1440\u20131448","DOI":"10.1109\/ICCV.2015.169"},{"key":"9997_CR10","doi-asserted-by":"crossref","unstructured":"He K, Zhang X, Ren S, Sun J (2016) Deep residual learning for image recognition. In: CVPR, pp 770\u2013778","DOI":"10.1109\/CVPR.2016.90"},{"key":"9997_CR11","doi-asserted-by":"crossref","unstructured":"He X, Chua TS (2017) Neural factorization machines for sparse predictive analytics. In: ACM SIGIR, ACM, pp 355\u2013364","DOI":"10.1145\/3077136.3080777"},{"issue":"6","key":"9997_CR12","doi-asserted-by":"publisher","first-page":"109","DOI":"10.1109\/MSP.2017.2741510","volume":"34","author":"X He","year":"2017","unstructured":"He X, Deng L (2017) Deep learning for image-to-text generation: a technical overview. IEEE Signal Proc Mag 34(6):109\u2013116","journal-title":"IEEE Signal Proc Mag"},{"key":"9997_CR13","doi-asserted-by":"crossref","unstructured":"He X, He Z, Du X, Chua TS (2018) Adversarial personalized ranking for recommendation. In: ACM SIGIR, ACM, Cambridge, pp 355\u2013364","DOI":"10.1145\/3209978.3209981"},{"issue":"12","key":"9997_CR14","doi-asserted-by":"publisher","first-page":"2354","DOI":"10.1109\/TKDE.2018.2831682","volume":"30","author":"X He","year":"2018","unstructured":"He X, He Z, Song J, Liu Z, Jiang YG, Chua TS (2018) NAIS: neural attentive item similarity model for recommendation. IEEE Trans Knowl Data Eng 30(12):2354\u20132366","journal-title":"IEEE Trans Knowl Data Eng"},{"key":"9997_CR15","unstructured":"Jaderberg M, Simonyan K, Zisserman A, et al (2015) Spatial transformer networks. In: Advances in neural information processing systems, pp 2017\u20132025"},{"key":"9997_CR16","doi-asserted-by":"crossref","unstructured":"Jia X, Gavves E, Fernando B, Tuytelaars T (2015) Guiding long-short term memory for image caption generation. arXiv preprint arXiv:1509.04942","DOI":"10.1109\/ICCV.2015.277"},{"key":"9997_CR17","doi-asserted-by":"crossref","unstructured":"Johnson J, Karpathy A, Fei-Fei L (2016) Densecap: fully convolutional localization networks for dense captioning. In: CVPR, pp 4565\u20134574","DOI":"10.1109\/CVPR.2016.494"},{"key":"9997_CR18","doi-asserted-by":"crossref","unstructured":"Karpathy A, Fei-Fei L (2015) Deep visual-semantic alignments for generating image descriptions. In: CVPR, pp 3128\u20133137","DOI":"10.1109\/CVPR.2015.7298932"},{"key":"9997_CR19","unstructured":"Kiros R, Salakhutdinov R, Zemel RS (2014) Unifying visual-semantic embeddings with multimodal neural language models. In: NIPS"},{"issue":"1","key":"9997_CR20","doi-asserted-by":"publisher","first-page":"32","DOI":"10.1007\/s11263-016-0981-7","volume":"123","author":"R Krishna","year":"2017","unstructured":"Krishna R, Zhu Y, Groth O, Johnson J, Hata K, Kravitz J, Chen S, Kalantidis Y, Li LJ, Shamma DA et al (2017) Visual genome: connecting language and vision using crowdsourced dense image annotations. IJCV 123(1):32\u201373","journal-title":"IJCV"},{"issue":"12","key":"9997_CR21","doi-asserted-by":"publisher","first-page":"2891","DOI":"10.1109\/TPAMI.2012.162","volume":"35","author":"G Kulkarni","year":"2013","unstructured":"Kulkarni G, Premraj V, Ordonez V, Dhar S, Li S, Choi Y, Berg AC, Berg TL (2013) Babytalk: understanding and generating simple image descriptions. IEEE Trans Pattern Anal Mach Intell 35(12):2891\u20132903","journal-title":"IEEE Trans Pattern Anal Mach Intell"},{"key":"9997_CR22","doi-asserted-by":"crossref","unstructured":"Li C, Chen J, Wan W, Li T (2017) Combining object-based attention and attributes for image captioning. In: ICIG, Springer, Berlin, pp 614\u2013625","DOI":"10.1007\/978-3-319-71607-7_54"},{"key":"9997_CR23","unstructured":"Li S, Kulkarni G, Berg TL, Berg AC, Choi Y (2011) Composing simple image descriptions using web-scale n-grams. In: CNLL, association for computational linguistics, pp 220\u2013228"},{"key":"9997_CR24","unstructured":"Lin C (2005) Recall-oriented understudy for gisting evaluation (rouge). Retrieved 20 Aug 2005"},{"key":"9997_CR25","doi-asserted-by":"crossref","unstructured":"Lin TY, Maire M, Belongie S, Hays J, Perona P, Ramanan D, Doll\u00e1r P, Zitnick CL (2014) Microsoft coco: common objects in context. In: ECCV, Springer, Berlin, pp 740\u2013755","DOI":"10.1007\/978-3-319-10602-1_48"},{"issue":"3","key":"9997_CR26","doi-asserted-by":"crossref","first-page":"916","DOI":"10.1109\/TCYB.2017.2664503","volume":"48","author":"AA Liu","year":"2018","unstructured":"Liu AA, Nie WZ, Gao Y, Su YT (2018) View-based 3-D model retrieval: a benchmark. IEEE Trans Cybern 48(3):916\u2013928","journal-title":"IEEE Trans Cybern"},{"key":"9997_CR27","doi-asserted-by":"crossref","unstructured":"Liu C, Sun F, Wang C, Wang F, Yuille A (2017) MAT: A multimodal attentive translator for image captioning. arXiv preprint arXiv:1702.05658","DOI":"10.24963\/ijcai.2017\/563"},{"key":"9997_CR28","doi-asserted-by":"crossref","unstructured":"Liu F, Xiang T, Hospedales TM, Yang W, Sun C (2017) Semantic regularisation for recurrent image annotation. In: CVPR, IEEE, pp 4160\u20134168","DOI":"10.1109\/CVPR.2017.443"},{"key":"9997_CR29","doi-asserted-by":"crossref","unstructured":"Lu J, Xiong C, Parikh D, Socher R (2017) Knowing when to look: adaptive attention via a visual sentinel for image captioning. In: CVPR, pp 3242\u20133250","DOI":"10.1109\/CVPR.2017.345"},{"issue":"4","key":"9997_CR30","doi-asserted-by":"publisher","first-page":"458","DOI":"10.1109\/TBDATA.2017.2723395","volume":"3","author":"W Nie","year":"2017","unstructured":"Nie W, Cheng H, Su Y (2017) Modeling temporal information of mitotic for mitotic event detection. IEEE Trans Big Data 3(4):458\u2013469","journal-title":"IEEE Trans Big Data"},{"key":"9997_CR31","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2018.2852310","author":"WZ Nie","year":"2018","unstructured":"Nie WZ, Liu AA, Gao Y, Su YT (2018) Hyper-clique graph matching and applications. IEEE Trans Circuits Syst Video Technol. https:\/\/doi.org\/10.1109\/TCSVT.2018.2852310","journal-title":"IEEE Trans Circuits Syst Video Technol"},{"key":"9997_CR32","unstructured":"Papineni K, Roukos S, Ward T, Zhu WJ (2002) BLEU: a method for automatic evaluation of machine translation. In: ACL, association for computational linguistics, pp 311\u2013318"},{"issue":"6","key":"9997_CR33","doi-asserted-by":"publisher","first-page":"1137","DOI":"10.1109\/TPAMI.2016.2577031","volume":"39","author":"S Ren","year":"2017","unstructured":"Ren S, He K, Girshick R, Sun J (2017) Faster R-CNN: towards real-time object detection with region proposal networks. IEEE Trans Pattern Anal Mach Intell 39(6):1137\u20131149","journal-title":"IEEE Trans Pattern Anal Mach Intell"},{"key":"9997_CR34","doi-asserted-by":"crossref","unstructured":"Rennie SJ, Marcheret E, Mroueh Y, Ross J, Goel V (2017) Self-critical sequence training for image captioning. In: CVPR, pp 1179\u20131195","DOI":"10.1109\/CVPR.2017.131"},{"key":"9997_CR35","unstructured":"Simonyan K, Zisserman A (2014) Very deep convolutional networks for large-scale image recognition. arXiv preprint arXiv:1409.1556"},{"key":"9997_CR36","doi-asserted-by":"crossref","unstructured":"Vedantam R, Lawrence Zitnick C, Parikh D (2015) Cider: consensus-based image description evaluation. In: CVPR, pp 4566\u20134575","DOI":"10.1109\/CVPR.2015.7299087"},{"issue":"4","key":"9997_CR37","doi-asserted-by":"publisher","first-page":"652","DOI":"10.1109\/TPAMI.2016.2587640","volume":"39","author":"O Vinyals","year":"2017","unstructured":"Vinyals O, Toshev A, Bengio S, Erhan D (2017) Show and tell: lessons learned from the 2015 mscoco image captioning challenge. IEEE Trans Pattern Anal Mach Intell 39(4):652\u2013663","journal-title":"IEEE Trans Pattern Anal Mach Intell"},{"key":"9997_CR38","doi-asserted-by":"crossref","unstructured":"Wu Q, Shen C, Liu L, Dick A, van den Hengel A (2016) What value do explicit high level concepts have in vision to language problems? In: CVPR, pp 203\u2013212","DOI":"10.1109\/CVPR.2016.29"},{"key":"9997_CR39","unstructured":"Xu K, Ba J, Kiros R, Cho K, Courville A, Salakhudinov R, Zemel R, Bengio Y (2015) Show, attend and tell: neural image caption generation with visual attention. In: ICML, pp 2048\u20132057"},{"key":"9997_CR40","doi-asserted-by":"crossref","unstructured":"Yang L, Tang KD, Yang J, Li LJ (2017) Dense captioning with joint inference and visual context. In: CVPR, pp 1978\u20131987","DOI":"10.1109\/CVPR.2017.214"},{"key":"9997_CR41","unstructured":"Yang Z, Yuan Y, Wu Y, Salakhutdinov R, Cohen WW (2016) Encode, review, and decode: reviewer module for caption generation. arXiv preprint arXiv:1605.07912"},{"key":"9997_CR42","doi-asserted-by":"crossref","unstructured":"Yao T, Pan Y, Li Y, Qiu Z, Mei T (2017) Boosting image captioning with attributes. In: ICCV, pp 22\u201329","DOI":"10.1109\/ICCV.2017.524"},{"key":"9997_CR43","doi-asserted-by":"crossref","unstructured":"You Q, Jin H, Wang Z, Fang C, Luo J (2016) Image captioning with semantic attention. In: CVPR, pp 4651\u20134659","DOI":"10.1109\/CVPR.2016.503"},{"key":"9997_CR44","doi-asserted-by":"crossref","unstructured":"Zhang H, Kyaw Z, Chang SF, Chua TS (2017) Visual translation embedding network for visual relation detection. In: CVPR, pp 3107\u20133115","DOI":"10.1109\/CVPR.2017.331"},{"key":"9997_CR45","doi-asserted-by":"crossref","unstructured":"Zhang H, Kyaw Z, Yu J, Chang SF (2017) Ppr-fcn: Weakly supervised visual relation detection via parallel pairwise r-fcn. arXiv preprint arXiv:1708.01956","DOI":"10.1109\/ICCV.2017.454"},{"key":"9997_CR46","doi-asserted-by":"crossref","unstructured":"Zhang H, Niu Y, Chang SF (2018) Grounding referring expressions in images by variational context. In: CVPR, pp 4158\u20134166","DOI":"10.1109\/CVPR.2018.00437"}],"container-title":["Neural Processing Letters"],"original-title":[],"language":"en","link":[{"URL":"http:\/\/link.springer.com\/article\/10.1007\/s11063-019-09997-5\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"http:\/\/link.springer.com\/content\/pdf\/10.1007\/s11063-019-09997-5.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"http:\/\/link.springer.com\/content\/pdf\/10.1007\/s11063-019-09997-5.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,9,12]],"date-time":"2022-09-12T01:41:55Z","timestamp":1662946915000},"score":1,"resource":{"primary":{"URL":"http:\/\/link.springer.com\/10.1007\/s11063-019-09997-5"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2019,2,18]]},"references-count":46,"journal-issue":{"issue":"2","published-print":{"date-parts":[[2020,10]]}},"alternative-id":["9997"],"URL":"https:\/\/doi.org\/10.1007\/s11063-019-09997-5","relation":{},"ISSN":["1370-4621","1573-773X"],"issn-type":[{"value":"1370-4621","type":"print"},{"value":"1573-773X","type":"electronic"}],"subject":[],"published":{"date-parts":[[2019,2,18]]},"assertion":[{"value":"18 February 2019","order":1,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}}]}}