{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,21]],"date-time":"2026-03-21T02:17:51Z","timestamp":1774059471900,"version":"3.50.1"},"publisher-location":"Cham","reference-count":23,"publisher":"Springer Nature Switzerland","isbn-type":[{"value":"9783031198380","type":"print"},{"value":"9783031198397","type":"electronic"}],"license":[{"start":{"date-parts":[[2022,1,1]],"date-time":"2022-01-01T00:00:00Z","timestamp":1640995200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"},{"start":{"date-parts":[[2022,1,1]],"date-time":"2022-01-01T00:00:00Z","timestamp":1640995200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2022]]},"DOI":"10.1007\/978-3-031-19839-7_17","type":"book-chapter","created":{"date-parts":[[2022,10,22]],"date-time":"2022-10-22T11:40:06Z","timestamp":1666438806000},"page":"285-300","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":7,"title":["AutoTransition: Learning to\u00a0Recommend Video Transition Effects"],"prefix":"10.1007","author":[{"given":"Yaojie","family":"Shen","sequence":"first","affiliation":[]},{"given":"Libo","family":"Zhang","sequence":"additional","affiliation":[]},{"given":"Kai","family":"Xu","sequence":"additional","affiliation":[]},{"given":"Xiaojie","family":"Jin","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2022,10,23]]},"reference":[{"key":"17_CR1","unstructured":"Frey, N., Chi, P., Yang, W., Essa, I.: Automatic non-linear video editing transfer. arXiv preprint arXiv:2105.06988 (2021)"},{"issue":"6","key":"17_CR2","first-page":"1","volume":"38","author":"M Wang","year":"2019","unstructured":"Wang, M., Yang, G.W., Hu, S.M., Yau, S.T., Shamir, A.: Write-a-video: computational video montage from themed text. ACM Trans. Graph. 38(6), 1\u2013177 (2019)","journal-title":"ACM Trans. Graph."},{"key":"17_CR3","doi-asserted-by":"crossref","unstructured":"Koorathota, S., Adelman, P., Cotton, K., Sajda, P.: Editing like humans: a contextual, multimodal framework for automated video editing. In: 2021 IEEE\/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW), IEEE Computer Society, pp. 1701\u20131709 (2021)","DOI":"10.1109\/CVPRW53098.2021.00186"},{"issue":"4","key":"17_CR4","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/2766966","volume":"34","author":"Z Liao","year":"2015","unstructured":"Liao, Z., Yu, Y., Gong, B., Cheng, L.: Audeosynth: music-driven video montage. ACM Trans. Graph. (TOG) 34(4), 1\u201310 (2015)","journal-title":"ACM Trans. Graph. (TOG)"},{"key":"17_CR5","doi-asserted-by":"crossref","unstructured":"Pardo, A., Caba, F., Alc\u00e1zar, J.L., Thabet, A.K., Ghanem, B.: Learning to cut by watching movies. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 6858\u20136868 (2021)","DOI":"10.1109\/ICCV48922.2021.00678"},{"key":"17_CR6","doi-asserted-by":"crossref","unstructured":"Hendricks, L.A., Wang, O., Shechtman, E., Sivic, J., Darrell, T., Russell, B.: Localizing moments in video with natural language. In: 2017 IEEE International Conference on Computer Vision (ICCV), IEEE Computer Society, pp. 5804\u20135813 (2017)","DOI":"10.1109\/ICCV.2017.618"},{"key":"17_CR7","unstructured":"Akbari, H., et al.: Vatt: transformers for multimodal self-supervised learning from raw video, audio and text. In: Advances in Neural Information Processing Systems (2021)"},{"issue":"4","key":"17_CR8","doi-asserted-by":"publisher","first-page":"664","DOI":"10.1109\/TPAMI.2016.2598339","volume":"39","author":"A Karpathy","year":"2017","unstructured":"Karpathy, A., Fei-Fei, L.: Deep visual-semantic alignments for generating image descriptions. IEEE Trans. Pattern Anal. Mach. Intell. 39(4), 664\u2013676 (2017)","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"17_CR9","doi-asserted-by":"crossref","unstructured":"Miech, A., Zhukov, D., Alayrac, J.B., Tapaswi, M., Laptev, I., Sivic, J.: Howto100m: learning a text-video embedding by watching hundred million narrated video clips. In: 2019 IEEE\/CVF International Conference on Computer Vision (ICCV), pp. 2630\u20132640. IEEE (2019)","DOI":"10.1109\/ICCV.2019.00272"},{"key":"17_CR10","unstructured":"Escorcia, V., Soldan, M., Sivic, J., Ghanem, B., Russell, B.: Temporal localization of moments in video collections with natural language. arXiv preprint arXiv:1907.12763 (2019)"},{"key":"17_CR11","unstructured":"Schultz, M., Joachims, T.: Learning a distance metric from relative comparisons. In: Advances in Neural Information Processing Systems, vol. 16 (2003)"},{"key":"17_CR12","unstructured":"Faghri, F., Fleet, D.J., Kiros, J.R., Fidler, S.: Vse++: improving visual-semantic embeddings with hard negatives. arXiv preprint arXiv:1707.05612 (2017)"},{"key":"17_CR13","doi-asserted-by":"crossref","unstructured":"Schroff, F., Kalenichenko, D., Philbin, J.: Facenet: a unified embedding for face recognition and clustering. In: 2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), IEEE Computer Society pp. 815\u2013823 (2015)","DOI":"10.1109\/CVPR.2015.7298682"},{"key":"17_CR14","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"84","DOI":"10.1007\/978-3-319-24261-3_7","volume-title":"Similarity-Based Pattern Recognition","author":"E Hoffer","year":"2015","unstructured":"Hoffer, E., Ailon, N.: Deep metric learning using triplet network. In: Feragen, A., Pelillo, M., Loog, M. (eds.) SIMBAD 2015. LNCS, vol. 9370, pp. 84\u201392. Springer, Cham (2015). https:\/\/doi.org\/10.1007\/978-3-319-24261-3_7"},{"key":"17_CR15","unstructured":"Vaswani, A., et al.: Attention is all you need. In: Advances in neural information processing systems, vol. 30 (2017)"},{"key":"17_CR16","doi-asserted-by":"crossref","unstructured":"Arnab, A., Dehghani, M., Heigold, G., Sun, C., Lu\u010di\u0107, M., Schmid, C.: Vivit: a video vision transformer. arXiv preprint arXiv:2103.15691 (2021)","DOI":"10.1109\/ICCV48922.2021.00676"},{"key":"17_CR17","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"214","DOI":"10.1007\/978-3-030-58548-8_13","volume-title":"Computer Vision \u2013 ECCV 2020","author":"V Gabeur","year":"2020","unstructured":"Gabeur, V., Sun, C., Alahari, K., Schmid, C.: Multi-modal transformer for video retrieval. In: Vedaldi, A., Bischof, H., Brox, T., Frahm, J.-M. (eds.) ECCV 2020. LNCS, vol. 12349, pp. 214\u2013229. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-58548-8_13"},{"key":"17_CR18","unstructured":"Lin, T., Wang, Y., Liu, X., Qiu, X.: A survey of transformers. arXiv preprint arXiv:2106.04554 (2021)"},{"key":"17_CR19","doi-asserted-by":"publisher","first-page":"570","DOI":"10.1162\/tacl_a_00385","volume":"9","author":"LA Hendricks","year":"2021","unstructured":"Hendricks, L.A., Mellor, J., Schneider, R., Alayrac, J.B., Nematzadeh, A.: Decoupling the role of data, attention, and losses in multimodal transformers. Trans. Assoc. Comput. Linguist. 9, 570\u2013585 (2021)","journal-title":"Trans. Assoc. Comput. Linguist."},{"key":"17_CR20","doi-asserted-by":"crossref","unstructured":"Lin, X., Bertasius, G., Wang, J., Chang, S.F., Parikh, D., Torresani, L.: Vx2text: end-to-end learning of video-based text generation from multimodal inputs. In: 2021 IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), IEEE Computer Society, pp. 7001\u20137011 (2021)","DOI":"10.1109\/CVPR46437.2021.00693"},{"key":"17_CR21","doi-asserted-by":"crossref","unstructured":"Feichtenhofer, C., Fan, H., Malik, J., He, K.: Slowfast networks for video recognition. In: 2019 IEEE\/CVF International Conference on Computer Vision (ICCV), IEEE Computer Society, pp. 6201\u20136210 (2019)","DOI":"10.1109\/ICCV.2019.00630"},{"key":"17_CR22","doi-asserted-by":"crossref","unstructured":"Won, M., Chun, S., Nieto, O., Serrc, X.: Data-driven harmonic filters for audio representation learning. In: ICASSP 2020\u20132020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 536\u2013540. IEEE (2020)","DOI":"10.1109\/ICASSP40776.2020.9053669"},{"key":"17_CR23","doi-asserted-by":"crossref","unstructured":"Lei, J., Wang, L., Shen, Y., Yu, D., Berg, T.L., Bansal, M.: Mart: memory-augmented recurrent transformer for coherent video paragraph captioning. arXiv preprint arXiv:2005.05402 (2020)","DOI":"10.18653\/v1\/2020.acl-main.233"}],"container-title":["Lecture Notes in Computer Science","Computer Vision \u2013 ECCV 2022"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-19839-7_17","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,3,7]],"date-time":"2024-03-07T12:22:58Z","timestamp":1709814178000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-19839-7_17"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022]]},"ISBN":["9783031198380","9783031198397"],"references-count":23,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-19839-7_17","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2022]]},"assertion":[{"value":"23 October 2022","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ECCV","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"European Conference on Computer Vision","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Tel Aviv","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Israel","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2022","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"23 October 2022","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"27 October 2022","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"17","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"eccv2022","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/eccv2022.ecva.net\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Double-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"CMT","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"5804","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"1645","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"0","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"28% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3.21","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3.91","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Yes","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}