{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2024,10,30]],"date-time":"2024-10-30T05:33:34Z","timestamp":1730266414043,"version":"3.28.0"},"reference-count":42,"publisher":"IEEE","license":[{"start":{"date-parts":[[2019,7,1]],"date-time":"2019-07-01T00:00:00Z","timestamp":1561939200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2019,7,1]],"date-time":"2019-07-01T00:00:00Z","timestamp":1561939200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2019,7,1]],"date-time":"2019-07-01T00:00:00Z","timestamp":1561939200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2019,7]]},"DOI":"10.1109\/ijcnn.2019.8851897","type":"proceedings-article","created":{"date-parts":[[2019,9,30]],"date-time":"2019-09-30T23:44:32Z","timestamp":1569887072000},"page":"1-7","source":"Crossref","is-referenced-by-count":0,"title":["Context Gating with Short Temporal Information for Video Captioning"],"prefix":"10.1109","author":[{"given":"Jinlei","family":"Xu","sequence":"first","affiliation":[]},{"given":"Ting","family":"Xu","sequence":"additional","affiliation":[]},{"given":"Xin","family":"Tian","sequence":"additional","affiliation":[]},{"given":"Chunping","family":"Liu","sequence":"additional","affiliation":[]},{"given":"Yi","family":"Ji","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref39","article-title":"Recurrent memory addressing for describing videos","volume":"1611","author":"agrawal","year":"2016"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.496"},{"article-title":"Very deep convolutional networks for large-scale image recognition","year":"2014","author":"simonyan","key":"ref33"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2015.7298594"},{"key":"ref31","article-title":"Imagenet classification with deep convolutional neural networks","author":"romero","year":"2015","journal-title":"International Conference on Learning Representations"},{"article-title":"Adam: A method for stochastic optimization","year":"2014","author":"kingma","key":"ref30"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.117"},{"article-title":"Delving deeper into convolutional networks for learning video representations","year":"2015","author":"balias","key":"ref36"},{"key":"ref35","article-title":"Integrating language and vision to generate natural language descriptions of videos in the wild","author":"thomason","year":"2014","journal-title":"University of Texas at Austin Austin United States Tech Rep"},{"key":"ref34","first-page":"12","article-title":"Inception-v4, inception-resnet and the impact of residual connections on learning","volume":"4","author":"szegedy","year":"2017","journal-title":"AAAI"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.111"},{"key":"ref40","first-page":"4197","article-title":"Video captioning with listwise supervision","author":"liu","year":"2017","journal-title":"AAAI"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00795"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00784"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1162\/neco.1997.9.8.1735"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1023\/A:1020346032608"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2013.337"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2013.61"},{"key":"ref17","first-page":"6","article-title":"Jointly modeling deep video and compositional text to bridge vision and language in a unified framework","volume":"5","author":"xu","year":"2015","journal-title":"AAAI"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2015.510"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2014.223"},{"key":"ref28","first-page":"65","article-title":"Meteor: An automatic metric for mt evaluation with improved correlation with human judgments","author":"baneijee","year":"2005","journal-title":"Proceedings of the ACL Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and\/or Summarization"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00608"},{"key":"ref27","article-title":"Rouge: A package for automatic evaluation of summaries","author":"lin","year":"2004","journal-title":"Workshop on Text Summarization Branches Out"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00896"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.497"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2015.7299087"},{"article-title":"Translating videos to natural language using deep recurrent neural networks","year":"2014","author":"venugopalan","key":"ref5"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2015.515"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2015.512"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1145\/2818048.2820013"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/ICPR.2016.7900081"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1016\/j.patcog.2017.03.021"},{"article-title":"Can spatiotemporal 3d cnns retrace the history of 2d cnns and imagenet?","year":"2017","author":"hara","key":"ref20"},{"article-title":"Learning phrase representations using mn encoder-decoder for statistical machine translation","year":"2014","author":"cho","key":"ref22"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.90"},{"article-title":"Microsoft coco captions: Data collection and evaluation server","year":"2015","author":"chen","key":"ref42"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.571"},{"article-title":"Memory-augmented attention modelling for videos","year":"2016","author":"fakoor","key":"ref41"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00751"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1074\/jbc.273.14.7906"},{"key":"ref25","first-page":"190","article-title":"Collecting highly parallel data for paraphrase evaluation","author":"chen","year":"2011","journal-title":"Proceedings o\/ the 49th Annual Meeting of the Association for Computational Linguistics Human Language Technologies-Volume 1 Association for Computational Linguistics"}],"event":{"name":"2019 International Joint Conference on Neural Networks (IJCNN)","start":{"date-parts":[[2019,7,14]]},"location":"Budapest, Hungary","end":{"date-parts":[[2019,7,19]]}},"container-title":["2019 International Joint Conference on Neural Networks (IJCNN)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/8840768\/8851681\/08851897.pdf?arnumber=8851897","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,7,17]],"date-time":"2022-07-17T17:48:14Z","timestamp":1658080094000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/8851897\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2019,7]]},"references-count":42,"URL":"https:\/\/doi.org\/10.1109\/ijcnn.2019.8851897","relation":{},"subject":[],"published":{"date-parts":[[2019,7]]}}}