{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,9,9]],"date-time":"2025-09-09T20:56:23Z","timestamp":1757451383399,"version":"3.37.3"},"reference-count":38,"publisher":"Springer Science and Business Media LLC","issue":"13","license":[{"start":{"date-parts":[[2022,3,8]],"date-time":"2022-03-08T00:00:00Z","timestamp":1646697600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"},{"start":{"date-parts":[[2022,3,8]],"date-time":"2022-03-08T00:00:00Z","timestamp":1646697600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"}],"funder":[{"DOI":"10.13039\/501100019550","name":"Scheme for Promotion of Academic and Research Collaboration","doi-asserted-by":"crossref","award":["P995 of No: SPARC\/2018-2019\/119\/SL (IN)"],"award-info":[{"award-number":["P995 of No: SPARC\/2018-2019\/119\/SL (IN)"]}],"id":[{"id":"10.13039\/501100019550","id-type":"DOI","asserted-by":"crossref"}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Multimed Tools Appl"],"published-print":{"date-parts":[[2022,5]]},"DOI":"10.1007\/s11042-022-12343-y","type":"journal-article","created":{"date-parts":[[2022,3,8]],"date-time":"2022-03-08T00:02:20Z","timestamp":1646697740000},"page":"17989-18009","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":12,"title":["V2T: video to text framework using a novel automatic shot boundary detection algorithm"],"prefix":"10.1007","volume":"81","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-2683-0542","authenticated-orcid":false,"given":"Alok","family":"Singh","sequence":"first","affiliation":[]},{"given":"Thoudam Doren","family":"Singh","sequence":"additional","affiliation":[]},{"given":"Sivaji","family":"Bandyopadhyay","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2022,3,8]]},"reference":[{"issue":"6","key":"12343_CR1","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/3355390","volume":"52","author":"N Aafaq","year":"2019","unstructured":"Aafaq N, Mian A, Liu W, Gilani SZ, Shah M (2019) Video description: a survey of methods, datasets, and evaluation metrics. ACM Comput Surv (CSUR) 52(6):1\u201337","journal-title":"ACM Comput Surv (CSUR)"},{"key":"12343_CR2","unstructured":"Baldi P (2012) Autoencoders, unsupervised learning, and deep architectures. In: Proceedings of ICML workshop on unsupervised and transfer learning, pp 37\u201349"},{"key":"12343_CR3","doi-asserted-by":"crossref","unstructured":"Baraldi L, Grana C, Cucchiara R (2017) Hierarchical boundary-aware neural encoder for video captioning. In: Proceedings of the IEEE conference on computer vision and pattern recognition, pp 1657\u20131666","DOI":"10.1109\/CVPR.2017.339"},{"issue":"7","key":"12343_CR4","doi-asserted-by":"publisher","first-page":"2631","DOI":"10.1109\/TCYB.2018.2831447","volume":"49","author":"Y Bin","year":"2018","unstructured":"Bin Y, Yang Y, Shen F, Xie N, Shen HT, Li X (2018) Describing video with attention-based bidirectional lstm. IEEE Trans on Cybern 49 (7):2631\u20132641","journal-title":"IEEE Trans on Cybern"},{"key":"12343_CR5","doi-asserted-by":"crossref","unstructured":"Chakraborty S, Singh A, Thounaojam DM (2021) A novel bifold-stage shot boundary detection algorithm: invariant to motion and illumination. Vis Comput, 1\u201312","DOI":"10.1007\/s00371-020-02027-9"},{"key":"12343_CR6","doi-asserted-by":"crossref","unstructured":"Chakraborty S, Thounaojam DM (2019) A novel shot boundary detection system using hybrid optimization technique. Appl Intell, 1\u201314","DOI":"10.1007\/s10489-019-01444-1"},{"issue":"3","key":"12343_CR7","doi-asserted-by":"publisher","first-page":"4007","DOI":"10.1007\/s11042-020-09857-8","volume":"80","author":"S Chakraborty","year":"2021","unstructured":"Chakraborty S, Thounaojam DM, Sinha N (2021) A shot boundary detection technique based on visual colour information. Multimed Tools Applic 80 (3):4007\u20134022","journal-title":"Multimed Tools Applic"},{"key":"12343_CR8","doi-asserted-by":"crossref","unstructured":"Chen Y, Wang S, Zhang W, Huang Q (2018) Less is more: picking informative frames for video captioning. In: Proceedings of the European conference on computer vision (ECCV), pp 358\u2013373","DOI":"10.1007\/978-3-030-01261-8_22"},{"key":"12343_CR9","doi-asserted-by":"crossref","unstructured":"Cherian A, Wang J, Hori C, Marks T (2020) Spatio-temporal ranked-attention networks for video captioning. In: The IEEE Winter conference on applications of computer vision, pp 1617\u20131626","DOI":"10.1109\/WACV45572.2020.9093291"},{"key":"12343_CR10","doi-asserted-by":"publisher","first-page":"143","DOI":"10.1016\/j.patrec.2018.09.022","volume":"116","author":"E Daskalakis","year":"2018","unstructured":"Daskalakis E, Tzelepi M, Tefas A (2018) Learning deep spatiotemporal features for video captioning. Pattern Recogn Lett 116:143\u2013149","journal-title":"Pattern Recogn Lett"},{"key":"12343_CR11","doi-asserted-by":"publisher","first-page":"583","DOI":"10.1016\/j.future.2018.10.054","volume":"93","author":"S Ding","year":"2019","unstructured":"Ding S, Qu S, Xi Y, Wan S (2019) A long video caption generation algorithm for big video data retrieval. Futur Gener Comput Syst 93:583\u2013595","journal-title":"Futur Gener Comput Syst"},{"key":"12343_CR12","doi-asserted-by":"crossref","unstructured":"Gao L, Wang X, Song J, Liu Y (2019) Fused gru with semantic-temporal attention for video captioning. Neurocomputing","DOI":"10.1016\/j.neucom.2018.06.096"},{"key":"12343_CR13","unstructured":"Hakeem A, Sheikh Y, Shah M (2004) CaseE: a hierarchical event representation for the analysis of videos. In: AAAI, pp 263\u2013268"},{"key":"12343_CR14","unstructured":"Hassanien A, Elgharib M, Selim A, Bae SH, Hefeeda M, Matusik W (2017) Large-scale, fast and accurate shot boundary detection through spatio-temporal convolutional neural networks. arXiv:1705.03281"},{"issue":"7","key":"12343_CR15","doi-asserted-by":"publisher","first-page":"1237","DOI":"10.1007\/s11760-017-1080-0","volume":"11","author":"T Kar","year":"2017","unstructured":"Kar T, Kanungo P (2017) A motion and illumination resilient framework for automatic shot boundary detection. SIViP 11(7):1237\u20131244","journal-title":"SIViP"},{"key":"12343_CR16","doi-asserted-by":"crossref","unstructured":"Karpathy A, Toderici G, Shetty S, Leung T, Sukthankar R, Fei-Fei L (2014) Large-scale video classification with convolutional neural networks. In: Proceedings of the IEEE conference on computer vision and pattern recognition, pp 1725\u20131732","DOI":"10.1109\/CVPR.2014.223"},{"issue":"2","key":"12343_CR17","doi-asserted-by":"publisher","first-page":"171","DOI":"10.1023\/A:1020346032608","volume":"50","author":"A Kojima","year":"2002","unstructured":"Kojima A, Tamura T, Fukunaga K (2002) Natural language description of human activities from video images based on concept hierarchy of actions. Int J Comput Vis 50(2):171\u2013184","journal-title":"Int J Comput Vis"},{"key":"12343_CR18","doi-asserted-by":"publisher","first-page":"23","DOI":"10.1016\/j.patrec.2017.10.012","volume":"105","author":"W Li","year":"2018","unstructured":"Li W, Guo D, Fang X (2018) Multimodal architecture for video captioning with memory networks and an attention mechanism. Pattern Recogn Lett 105:23\u201329","journal-title":"Pattern Recogn Lett"},{"key":"12343_CR19","doi-asserted-by":"publisher","first-page":"113","DOI":"10.1016\/j.cviu.2017.04.013","volume":"163","author":"AA Liu","year":"2017","unstructured":"Liu AA, Xu N, Wong Y, Li J, Su YT, Kankanhalli M (2017) Hierarchical & multimodal video captioning: Discovering and transferring multimodal knowledge for vision to language. Comput Vis Image Underst 163:113\u2013125","journal-title":"Comput Vis Image Underst"},{"key":"12343_CR20","doi-asserted-by":"publisher","first-page":"173","DOI":"10.1162\/tacl_a_00013","volume":"6","author":"X Long","year":"2018","unstructured":"Long X, Gan C, de Melo G (2018) Video captioning with multi-faceted attention. Trans Assoc Comput Linguis 6:173\u2013184","journal-title":"Trans Assoc Comput Linguis"},{"issue":"6","key":"12343_CR21","doi-asserted-by":"publisher","first-page":"102302","DOI":"10.1016\/j.ipm.2020.102302","volume":"57","author":"M Nabati","year":"2020","unstructured":"Nabati M, Behrad A (2020) Multi-sentence video captioning using content-oriented beam searching and multi-stage refining algorithm. Inform Process Manage 57(6):102302","journal-title":"Inform Process Manage"},{"key":"12343_CR22","doi-asserted-by":"publisher","first-page":"102840","DOI":"10.1016\/j.cviu.2019.102840","volume":"190","author":"M Nabati","year":"2020","unstructured":"Nabati M, Behrad A (2020) Video captioning using boosted and parallel long short-term memory networks. Comput Vis Image Underst 190:102840","journal-title":"Comput Vis Image Underst"},{"key":"12343_CR23","doi-asserted-by":"publisher","first-page":"126","DOI":"10.1016\/j.cviu.2017.06.012","volume":"163","author":"F Nian","year":"2017","unstructured":"Nian F, Li T, Wang Y, Wu X, Ni B, Xu C (2017) Learning explicit video attributes from mid-level representation for video captioning. Comput Vis Image Underst 163:126\u2013138","journal-title":"Comput Vis Image Underst"},{"issue":"10","key":"12343_CR24","doi-asserted-by":"publisher","first-page":"14007","DOI":"10.1007\/s11042-018-7040-z","volume":"78","author":"S Pini","year":"2019","unstructured":"Pini S, Cornia M, Bolelli F, Baraldi L, Cucchiara R (2019) M-vad names: a dataset for video captioning with naming. Multimed Tools Applic 78 (10):14007\u201314027","journal-title":"Multimed Tools Applic"},{"issue":"10","key":"12343_CR25","doi-asserted-by":"publisher","first-page":"4049","DOI":"10.1109\/TIP.2013.2268976","volume":"22","author":"J Ren","year":"2013","unstructured":"Ren J, Jiang X, Yuan J (2013) Noise-resistant local binary pattern with an embedded error-correction mechanism. IEEE Trans Image Process 22 (10):4049\u20134060. https:\/\/doi.org\/10.1109\/TIP.2013.2268976","journal-title":"IEEE Trans Image Process"},{"key":"12343_CR26","first-page":"318","volume-title":"Learning internal representations by error propagation","author":"DE Rumelhart","year":"1986","unstructured":"Rumelhart DE, Hinton GE, Williams RJ (1986) Learning internal representations by error propagation. MIT Press, Cambridge, pp 318\u2013362"},{"key":"12343_CR27","doi-asserted-by":"crossref","unstructured":"Shetty R, Laaksonen J (2016) Frame-and segment-level features and candidate pool evaluation for video caption generation. In: Proceedings of the 24th ACM international conference on nultimedia, pp 1073\u20131076","DOI":"10.1145\/2964284.2984062"},{"key":"12343_CR28","doi-asserted-by":"crossref","unstructured":"Shin A, Ohnishi K, Harada T (2016) Beyond caption to narrative: video captioning with multiple sentences. In: 2016 IEEE International conference on image processing (ICIP), pp 3364\u20133368. IEEE","DOI":"10.1109\/ICIP.2016.7532983"},{"key":"12343_CR29","unstructured":"Singh A, Singh TD, Bandyopadhyay S (2020) A comprehensive review on recent methods and challenges of video description. arXiv:2011.14752"},{"key":"12343_CR30","unstructured":"Singh A, Singh TD, Bandyopadhyay S (2020) Nits-vc system for vatex video captioning challenge 2020. arXiv:2006.04058"},{"key":"12343_CR31","doi-asserted-by":"publisher","unstructured":"Singh A, Thounaojam DM, Chakraborty S (2019) A novel automatic shot boundary detection algorithm: robust to illumination and motion effect. SIViP, 1\u20139. https:\/\/doi.org\/10.1007\/s11760-019-01593-3","DOI":"10.1007\/s11760-019-01593-3"},{"key":"12343_CR32","doi-asserted-by":"publisher","first-page":"73","DOI":"10.1016\/j.image.2017.01.010","volume":"53","author":"AK Tiwari","year":"2017","unstructured":"Tiwari AK, Kanhangad V, Pachori RB (2017) Histogram refinement for texture descriptor based image retrieval. Signal Process Image Commun 53:73\u201385","journal-title":"Signal Process Image Commun"},{"key":"12343_CR33","doi-asserted-by":"crossref","unstructured":"Venugopalan S, Hendricks LA, Mooney R, Saenko K (2016) Improving lstm-based video description with linguistic knowledge mined from text. arXiv:1604.01729","DOI":"10.18653\/v1\/D16-1204"},{"key":"12343_CR34","doi-asserted-by":"crossref","unstructured":"Venugopalan S, Xu H, Donahue J, Rohrbach M, Mooney R, Saenko K (2014) Translating videos to natural language using deep recurrent neural networks. arXiv:1412.4729","DOI":"10.3115\/v1\/N15-1173"},{"key":"12343_CR35","doi-asserted-by":"publisher","first-page":"327","DOI":"10.1016\/j.patrec.2018.07.024","volume":"130","author":"H Wang","year":"2020","unstructured":"Wang H, Gao C, Han Y (2020) Sequence in sequence for video captioning. Pattern Recogn Lett 130:327\u2013334","journal-title":"Pattern Recogn Lett"},{"key":"12343_CR36","doi-asserted-by":"crossref","unstructured":"Xiao H, Shi J (2020) Video captioning with text-based dynamic attention and step-by-step learning. Pattern Recognition Letters","DOI":"10.1016\/j.patrec.2020.03.001"},{"key":"12343_CR37","unstructured":"Xu K, Ba J, Kiros R, Cho K, Courville A, Salakhudinov R, Zemel R, Bengio Y (2015) Show, attend and tell: neural image caption generation with visual attention. In: International conference on machine learning, pp 2048\u20132057"},{"key":"12343_CR38","doi-asserted-by":"publisher","first-page":"24","DOI":"10.1016\/j.neucom.2019.05.027","volume":"357","author":"Y Xu","year":"2019","unstructured":"Xu Y, Yang J, Mao K (2019) Semantic-filtered soft-split-aware video captioning with audio-augmented feature. Neurocomputing 357:24\u201335","journal-title":"Neurocomputing"}],"container-title":["Multimedia Tools and Applications"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11042-022-12343-y.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s11042-022-12343-y\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11042-022-12343-y.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,5,6]],"date-time":"2022-05-06T10:30:00Z","timestamp":1651833000000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s11042-022-12343-y"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022,3,8]]},"references-count":38,"journal-issue":{"issue":"13","published-print":{"date-parts":[[2022,5]]}},"alternative-id":["12343"],"URL":"https:\/\/doi.org\/10.1007\/s11042-022-12343-y","relation":{},"ISSN":["1380-7501","1573-7721"],"issn-type":[{"type":"print","value":"1380-7501"},{"type":"electronic","value":"1573-7721"}],"subject":[],"published":{"date-parts":[[2022,3,8]]},"assertion":[{"value":"5 November 2020","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"4 June 2021","order":2,"name":"revised","label":"Revised","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"18 January 2022","order":3,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"8 March 2022","order":4,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}}]}}