{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,3,28]],"date-time":"2025-03-28T02:43:15Z","timestamp":1743129795947,"version":"3.40.3"},"publisher-location":"Cham","reference-count":35,"publisher":"Springer International Publishing","isbn-type":[{"type":"print","value":"9783030007638"},{"type":"electronic","value":"9783030007645"}],"license":[{"start":{"date-parts":[[2018,1,1]],"date-time":"2018-01-01T00:00:00Z","timestamp":1514764800000},"content-version":"unspecified","delay-in-days":0,"URL":"http:\/\/www.springer.com\/tdm"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2018]]},"DOI":"10.1007\/978-3-030-00764-5_8","type":"book-chapter","created":{"date-parts":[[2018,9,17]],"date-time":"2018-09-17T16:21:31Z","timestamp":1537201291000},"page":"78-88","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":1,"title":["iMakeup: Makeup Instructional Video Dataset for Fine-Grained Dense Video Captioning"],"prefix":"10.1007","author":[{"given":"Xiaozhu","family":"Lin","sequence":"first","affiliation":[]},{"given":"Qin","family":"Jin","sequence":"additional","affiliation":[]},{"given":"Shizhe","family":"Chen","sequence":"additional","affiliation":[]},{"given":"Yuqing","family":"Song","sequence":"additional","affiliation":[]},{"given":"Yida","family":"Zhao","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2018,9,18]]},"reference":[{"key":"8_CR1","doi-asserted-by":"crossref","unstructured":"Venugopalan, S., et al.: Translating Videos to Natural Language Using Deep Recurrent Neural Networks. Computer Science (2014)","DOI":"10.3115\/v1\/N15-1173"},{"key":"8_CR2","doi-asserted-by":"crossref","unstructured":"Yao, L., Torabi, A., et al.: Describing videos by exploiting temporal structure. In: IEEE International Conference on Computer Vision, pp. 4507\u20134515 (2015)","DOI":"10.1109\/ICCV.2015.512"},{"key":"8_CR3","doi-asserted-by":"crossref","unstructured":"Krishna, R., et al.: Dense-captioning events in videos. In: IEEE International Conference on Computer Vision, p. 6 (2017)","DOI":"10.1109\/ICCV.2017.83"},{"issue":"3","key":"8_CR4","doi-asserted-by":"publisher","first-page":"211","DOI":"10.1007\/s11263-015-0816-y","volume":"115","author":"O Russakovsky","year":"2015","unstructured":"Russakovsky, O., et al.: Imagenet large scale visual recognition challenge. Int. J. Comput. Vis. 115(3), 211\u2013252 (2015)","journal-title":"Int. J. Comput. Vis."},{"key":"8_CR5","doi-asserted-by":"crossref","unstructured":"Das, P., et al.: A thousand frames in just a few words: lingual description of videos through latent topics and sparse object stitching. In: IEEE Conference on Computer Vision and Pattern Recognition (2013)","DOI":"10.1109\/CVPR.2013.340"},{"key":"8_CR6","doi-asserted-by":"crossref","unstructured":"Rohrbach, A., et al.: A dataset for movie description. In: IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2015)","DOI":"10.1109\/CVPR.2015.7298940"},{"key":"8_CR7","unstructured":"http:\/\/www.wikihow.com"},{"key":"8_CR8","doi-asserted-by":"crossref","unstructured":"Regneri, M., et al.: Transactions of the Association for Computational Linguistics (TACL), Grounding Action Descriptions in Videos, vol. 1, pp. 25\u201336 (2013)","DOI":"10.1162\/tacl_a_00207"},{"key":"8_CR9","doi-asserted-by":"crossref","unstructured":"Xu, J., et al.: MSR-VTT: a large video description dataset for bridging video and language. In: IEEE International Conference on Computer Vision and Pattern Recognition (CVPR) (2016)","DOI":"10.1109\/CVPR.2016.571"},{"key":"8_CR10","doi-asserted-by":"crossref","unstructured":"Zhou, L., et al.: End-to-End Dense Video Captioning with Masked Transformer. arXiv preprint arXiv:1804.00819 (2018)","DOI":"10.1109\/CVPR.2018.00911"},{"key":"8_CR11","doi-asserted-by":"crossref","unstructured":"Shou, Z., et al.: Temporal Action Localization in Untrimmed Videos via Multi-stage CNNs, pp. 1049\u20131058 (2016)","DOI":"10.1109\/CVPR.2016.119"},{"issue":"8","key":"8_CR12","doi-asserted-by":"publisher","first-page":"1735","DOI":"10.1162\/neco.1997.9.8.1735","volume":"9","author":"S Hochreiter","year":"1997","unstructured":"Hochreiter, S.: Longshort-term memory. Neural Comput. 9(8), 1735\u20131780 (1997)","journal-title":"Neural Comput."},{"key":"8_CR13","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"740","DOI":"10.1007\/978-3-319-10602-1_48","volume-title":"Computer Vision \u2013 ECCV 2014","author":"T-Y Lin","year":"2014","unstructured":"Lin, T.-Y., et al.: Microsoft COCO: common objects in context. In: Fleet, D., Pajdla, T., Schiele, B., Tuytelaars, T. (eds.) ECCV 2014. LNCS, vol. 8693, pp. 740\u2013755. Springer, Cham (2014). https:\/\/doi.org\/10.1007\/978-3-319-10602-1_48"},{"key":"8_CR14","unstructured":"Soomro, K., et al.: UCF101: a dataset of 101 human actions classes from videos in the wild. arXiv preprint arXiv:1212.0402 (2012)"},{"key":"8_CR15","unstructured":"Kay, W., et al.: The kinetics human action video dataset. arXiv preprint arXiv:1705.06950 (2017)"},{"key":"8_CR16","unstructured":"Abu-El-Haija, S., et al.: YouTube-8M: a large-scale video classification benchmark. arXiv preprint arXiv:1609.08675 (2016)"},{"key":"8_CR17","unstructured":"Chen, D.L., Dolan, W.B.: Collecting highly parallel data for paraphrase evaluation. In: ACL, pp. 190\u2013200 (2011)"},{"key":"8_CR18","doi-asserted-by":"crossref","unstructured":"Heilbron, F.C., Escorcia, V., Ghanem, B., Niebles, J.C.: ActivityNet: a large-scale video benchmark for human activity understanding. In: IEEE Conference on Computer Vision and Pattern Recognition, pp. 961\u2013970 (2015)","DOI":"10.1109\/CVPR.2015.7298698"},{"key":"8_CR19","unstructured":"Monfort, M., et al.: Moments in time dataset: one million videos for event understanding (2018)"},{"key":"8_CR20","doi-asserted-by":"crossref","unstructured":"Zhou, L., Xu, C., Corso, J.J.: Towards automatic learning of procedures from web instructional videos. In: AAAI (2018)","DOI":"10.1609\/aaai.v32i1.12342"},{"key":"8_CR21","doi-asserted-by":"crossref","first-page":"25","DOI":"10.1162\/tacl_a_00207","volume":"1","author":"M Regneri","year":"2013","unstructured":"Regneri, M.: Grounding action descriptions in videos. Trans. Assoc. Comput. Linguist. 1, 25\u201336 (2013)","journal-title":"Trans. Assoc. Comput. Linguist."},{"key":"8_CR22","doi-asserted-by":"publisher","first-page":"10","DOI":"10.1007\/BF01210504","volume":"1","author":"HJ Zhang","year":"1993","unstructured":"Zhang, H.J.: Automatic partitioning of full-motion video. Multimed. Syst. 1, 10\u201328 (1993)","journal-title":"Multimed. Syst."},{"key":"8_CR23","doi-asserted-by":"crossref","unstructured":"Lienhart, R., Pfeiffer, S., Effelsberg, W.: Video abstracting. Commun. ACM, 1\u201312 (1997)","DOI":"10.1145\/265563.265572"},{"key":"8_CR24","doi-asserted-by":"publisher","first-page":"168","DOI":"10.1109\/TCSVT.2006.888023","volume":"17","author":"J Yuan","year":"2007","unstructured":"Yuan, J.: A formal study of shot boundary detection. IEEE Trans. Circuits Syst. Video Tech. 17, 168\u2013186 (2007)","journal-title":"IEEE Trans. Circuits Syst. Video Tech."},{"key":"8_CR25","first-page":"189","volume":"95","author":"R Zabih","year":"1995","unstructured":"Zabih, R., Miller, J., Mai, K.: A feature-based algorithm for detecting and classifying scene breaks. ACM Multimed. 95, 189\u2013200 (1995)","journal-title":"ACM Multimed."},{"key":"8_CR26","unstructured":"Porter, S.V., et al.: Video cut detection using frequency domain correlation. In: 15th International Conference on Pattern Recognition, pp. 413\u2013416 (2000)"},{"key":"8_CR27","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"137","DOI":"10.1007\/978-3-319-46493-0_9","volume-title":"Computer Vision \u2013 ECCV 2016","author":"D-A Huang","year":"2016","unstructured":"Huang, D.-A., Fei-Fei, L., Niebles, J.C.: Connectionist temporal modeling for weakly supervised action labeling. In: Leibe, B., Matas, J., Sebe, N., Welling, M. (eds.) ECCV 2016. LNCS, vol. 9908, pp. 137\u2013153. Springer, Cham (2016). https:\/\/doi.org\/10.1007\/978-3-319-46493-0_9"},{"key":"8_CR28","first-page":"78","volume":"163","author":"H Kuehne","year":"2010","unstructured":"Kuehne, H., et al.: Weakly supervised learning of actions from transcripts. CVIU 163, 78\u201389 (2010)","journal-title":"CVIU"},{"key":"8_CR29","doi-asserted-by":"crossref","unstructured":"Jin, Q., Chen, J., Chen, S., et al.: Describing videos using multi-modal fusion. In: ACM on Multimedia Conference, pp. 1087\u20131091. ACM (2016)","DOI":"10.1145\/2964284.2984065"},{"key":"8_CR30","doi-asserted-by":"crossref","unstructured":"Tran, D., Bourdev, L., et al.: Learning spatiotemporal features with 3D convolutional networks. In: Proceedings of the IEEE International Conference on Computer Vision, pp. 4489\u20134497 (2015)","DOI":"10.1109\/ICCV.2015.510"},{"key":"8_CR31","doi-asserted-by":"crossref","unstructured":"Karpathy, A., et al.: Large-scale video classification with convolutional neural networks. In: CVPR (2014)","DOI":"10.1109\/CVPR.2014.223"},{"key":"8_CR32","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"628","DOI":"10.1007\/978-3-319-10602-1_41","volume-title":"Computer Vision \u2013 ECCV 2014","author":"P Bojanowski","year":"2014","unstructured":"Bojanowski, P., et al.: Weakly supervised action labeling in videos under ordering constraints. In: Fleet, D., Pajdla, T., Schiele, B., Tuytelaars, T. (eds.) ECCV 2014. LNCS, vol. 8693, pp. 628\u2013643. Springer, Cham (2014). https:\/\/doi.org\/10.1007\/978-3-319-10602-1_41"},{"key":"8_CR33","doi-asserted-by":"crossref","unstructured":"Szegedy, C., et al.: Inception-v4, inception-ResNet and the impact of residual connections on learning (2016)","DOI":"10.1609\/aaai.v31i1.11231"},{"issue":"4","key":"8_CR34","doi-asserted-by":"publisher","first-page":"357366","DOI":"10.1109\/TASSP.1980.1163420","volume":"28","author":"S Davis","year":"1980","unstructured":"Davis, S., Mermelstein, P.: Comparison of parametric representations for monosyllabic word recognition in continuously spoken sentences. IEEE Trans. Acoust. Speech Signal Process. 28(4), 357366 (1980)","journal-title":"IEEE Trans. Acoust. Speech Signal Process."},{"key":"8_CR35","doi-asserted-by":"crossref","unstructured":"Papineni, K., Roukos, S., Ward, T., Zhu, W.-J.: BLEU: a method for automatic evaluation of machine translation. In: ACL, pp. 311\u2013318 (2002)","DOI":"10.3115\/1073083.1073135"}],"container-title":["Lecture Notes in Computer Science","Advances in Multimedia Information Processing \u2013 PCM 2018"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-030-00764-5_8","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,3,13]],"date-time":"2024-03-13T12:07:16Z","timestamp":1710331636000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-030-00764-5_8"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2018]]},"ISBN":["9783030007638","9783030007645"],"references-count":35,"URL":"https:\/\/doi.org\/10.1007\/978-3-030-00764-5_8","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"type":"print","value":"0302-9743"},{"type":"electronic","value":"1611-3349"}],"subject":[],"published":{"date-parts":[[2018]]},"assertion":[{"value":"18 September 2018","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"PCM","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Pacific Rim Conference on Multimedia","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Hefei","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"China","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2018","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"21 September 2018","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"22 September 2018","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"19","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"pcm2018","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"http:\/\/pcm2018.org\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}