{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,14]],"date-time":"2026-02-14T06:14:44Z","timestamp":1771049684747,"version":"3.50.1"},"reference-count":21,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","license":[{"start":{"date-parts":[[2018,1,1]],"date-time":"2018-01-01T00:00:00Z","timestamp":1514764800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/OAPA.html"}],"funder":[{"DOI":"10.13039\/501100003725","name":"Basic Science Research Program through the National Research Foundation of Korea (NRF)","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100003725","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/100010002","name":"Ministry of Education","doi-asserted-by":"publisher","award":["NRF-2016R1D1A1A09919551"],"award-info":[{"award-number":["NRF-2016R1D1A1A09919551"]}],"id":[{"id":"10.13039\/100010002","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Access"],"published-print":{"date-parts":[[2018]]},"DOI":"10.1109\/access.2018.2814075","type":"journal-article","created":{"date-parts":[[2018,3,19]],"date-time":"2018-03-19T21:47:08Z","timestamp":1521496028000},"page":"16639-16645","source":"Crossref","is-referenced-by-count":15,"title":["Natural Language Description of Video Streams Using Task-Specific Feature Encoding"],"prefix":"10.1109","volume":"6","author":[{"given":"Aniqa","family":"Dilawari","sequence":"first","affiliation":[]},{"given":"Muhammad Usman Ghani","family":"Khan","sequence":"additional","affiliation":[]},{"given":"Ammarah","family":"Farooq","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0001-9968-0330","authenticated-orcid":false,"given":"Zahoor-Ur","family":"Rehman","sequence":"additional","affiliation":[]},{"given":"Seungmin","family":"Rho","sequence":"additional","affiliation":[]},{"given":"Irfan","family":"Mehmood","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.111"},{"key":"ref11","author":"ballas","year":"2015","journal-title":"Delving deeper into convolutional networks for learning video representations"},{"key":"ref12","first-page":"38","article-title":"Natural language descriptions of visual scenes: Corpus generation and analysis","author":"khan","year":"2012","journal-title":"Proc Joint Workshop Exploiting Synergies Between Inf Retr Mach Transl (ESIRMT) Hybrid Approaches Mach Transl (HyTra)"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.3115\/v1\/W14-3348"},{"key":"ref14","first-page":"1","article-title":"Rouge: A package for automatic evaluation of summaries","author":"lin","year":"2004","journal-title":"Proc ACL Workshop Text Summarization Branches Out"},{"key":"ref15","first-page":"27","article-title":"Describing video contents in natural language","author":"khan","year":"2012","journal-title":"Proc Workshop Innov Hybrid Approaches Process Textual Data"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1162\/neco.1997.9.8.1735"},{"key":"ref17","year":"2018","journal-title":"Long short term memory"},{"key":"ref18","author":"simonyan","year":"2014","journal-title":"Very Deep Convolutional Networks for Large-scale Image Recognition"},{"key":"ref19","first-page":"1097","article-title":"ImageNet classification with deep convolutional neural networks","author":"krizhevsky","year":"2012","journal-title":"Proc Adv Neural Inf Process Syst"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2015.7298932"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2015.277"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.496"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2015.7298935"},{"key":"ref8","first-page":"104","article-title":"Spatio-temporal attention models for grounded video captioning","author":"zanfir","year":"2016","journal-title":"Proc Asian Conf Comput Vis"},{"key":"ref7","author":"long","year":"2016","journal-title":"Video captioning with multi-faceted attention"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2015.7298754"},{"key":"ref1","first-page":"2048","article-title":"Show, attend and tell: Neural image caption generation with visual attention","author":"xu","year":"2015","journal-title":"Proc Int Conf Mach Learn"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2017\/381"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1007\/s11263-015-0816-y"},{"key":"ref21","year":"2017","journal-title":"Pytorch Tensors and Dynamic neural networks in Python with strong GPU acceleration"}],"container-title":["IEEE Access"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/6287639\/8274985\/08319487.pdf?arnumber=8319487","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,1,12]],"date-time":"2022-01-12T16:15:57Z","timestamp":1642004157000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/8319487\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2018]]},"references-count":21,"URL":"https:\/\/doi.org\/10.1109\/access.2018.2814075","relation":{},"ISSN":["2169-3536"],"issn-type":[{"value":"2169-3536","type":"electronic"}],"subject":[],"published":{"date-parts":[[2018]]}}}