{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,7]],"date-time":"2026-03-07T18:35:21Z","timestamp":1772908521332,"version":"3.50.1"},"publisher-location":"Cham","reference-count":31,"publisher":"Springer International Publishing","isbn-type":[{"value":"9783030586096","type":"print"},{"value":"9783030586102","type":"electronic"}],"license":[{"start":{"date-parts":[[2020,1,1]],"date-time":"2020-01-01T00:00:00Z","timestamp":1577836800000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2020,1,1]],"date-time":"2020-01-01T00:00:00Z","timestamp":1577836800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2020]]},"DOI":"10.1007\/978-3-030-58610-2_38","type":"book-chapter","created":{"date-parts":[[2020,10,6]],"date-time":"2020-10-06T13:02:49Z","timestamp":1601989369000},"page":"645-660","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":181,"title":["Video Super-Resolution with Recurrent Structure-Detail Network"],"prefix":"10.1007","author":[{"given":"Takashi","family":"Isobe","sequence":"first","affiliation":[]},{"given":"Xu","family":"Jia","sequence":"additional","affiliation":[]},{"given":"Shuhang","family":"Gu","sequence":"additional","affiliation":[]},{"given":"Songjiang","family":"Li","sequence":"additional","affiliation":[]},{"given":"Shengjin","family":"Wang","sequence":"additional","affiliation":[]},{"given":"Qi","family":"Tian","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2020,10,7]]},"reference":[{"key":"38_CR1","doi-asserted-by":"crossref","unstructured":"Caballero, J., et al.: Real-time video super-resolution with spatio-temporal networks and motion compensation. In: CVPR (2017)","DOI":"10.1109\/CVPR.2017.304"},{"key":"38_CR2","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"184","DOI":"10.1007\/978-3-319-10593-2_13","volume-title":"Computer Vision \u2013 ECCV 2014","author":"C Dong","year":"2014","unstructured":"Dong, C., Loy, C.C., He, K., Tang, X.: Learning a deep convolutional network for image super-resolution. In: Fleet, D., Pajdla, T., Schiele, B., Tuytelaars, T. (eds.) ECCV 2014. LNCS, vol. 8692, pp. 184\u2013199. Springer, Cham (2014). https:\/\/doi.org\/10.1007\/978-3-319-10593-2_13"},{"key":"38_CR3","doi-asserted-by":"crossref","unstructured":"Du, W., Wang, Y., Qiao, Y.: RPAN: an end-to-end recurrent pose-attention network for action recognition in videos. In: CVPR (2017)","DOI":"10.1109\/ICCV.2017.402"},{"key":"38_CR4","doi-asserted-by":"crossref","unstructured":"Fuoli, D., Gu, S., Timofte, R.: Efficient video super-resolution through recurrent latent space propagation. CoRR abs\/1909.08080 (2019)","DOI":"10.1109\/ICCVW.2019.00431"},{"key":"38_CR5","unstructured":"Glorot, X., Bordes, A., Bengio, Y.: Deep sparse rectifier neural networks. In: AISTATS (2011)"},{"key":"38_CR6","doi-asserted-by":"crossref","unstructured":"Haris, M., Shakhnarovich, G., Ukita, N.: Deep back-projection networks for super-resolution. In: CVPR (2018)","DOI":"10.1109\/CVPR.2018.00179"},{"key":"38_CR7","doi-asserted-by":"crossref","unstructured":"Haris, M., Shakhnarovich, G., Ukita, N.: Recurrent back-projection network for video super-resolution. In: CVPR (2019)","DOI":"10.1109\/CVPR.2019.00402"},{"key":"38_CR8","doi-asserted-by":"crossref","unstructured":"Huang, G., Liu, Z., Van Der Maaten, L., Weinberger, K.Q.: Densely connected convolutional networks. In: CVPR (2017)","DOI":"10.1109\/CVPR.2017.243"},{"key":"38_CR9","unstructured":"Huang, Y., Wang, W., Wang, L.: Bidirectional recurrent convolutional networks for multi-frame super-resolution. In: NeurIPS (2015)"},{"key":"38_CR10","doi-asserted-by":"crossref","unstructured":"Isobe, T., et al.: Video super-resolution with temporal group attention. In: CVPR (2020)","DOI":"10.1109\/CVPR42600.2020.00803"},{"key":"38_CR11","unstructured":"Jia, X., De Brabandere, B., Tuytelaars, T., Gool, L.V.: Dynamic filter networks. In: NeurIPS (2016)"},{"key":"38_CR12","doi-asserted-by":"crossref","unstructured":"Jo, Y., Wug Oh, S., Kang, J., Joo Kim, S.: Deep video super-resolution network using dynamic upsampling filters without explicit motion compensation. In: CVPR (2018)","DOI":"10.1109\/CVPR.2018.00340"},{"issue":"2","key":"38_CR13","doi-asserted-by":"publisher","first-page":"109","DOI":"10.1109\/TCI.2016.2532323","volume":"2","author":"A Kappeler","year":"2016","unstructured":"Kappeler, A., Yoo, S., Dai, Q., Katsaggelos, A.K.: Video super-resolution with convolutional neural networks. IEEE Trans. Comput. Imaging 2(2), 109\u2013122 (2016)","journal-title":"IEEE Trans. Comput. Imaging"},{"key":"38_CR14","doi-asserted-by":"crossref","unstructured":"Kim, J., Lee, J.K., Lee, K.M.: Accurate image super-resolution using very deep convolutional networks. In: CVPR (2016)","DOI":"10.1109\/CVPR.2016.182"},{"key":"38_CR15","doi-asserted-by":"crossref","unstructured":"Kim, J., Lee, J.K., Lee, K.M.: Deeply-recursive convolutional network for image super-resolution. In: CVPR (2016)","DOI":"10.1109\/CVPR.2016.181"},{"key":"38_CR16","unstructured":"Kingma, D.P., Ba, J.: Adam: a method for stochastic optimization. In: ICLR (2015)"},{"issue":"11","key":"38_CR17","doi-asserted-by":"publisher","first-page":"2599","DOI":"10.1109\/TPAMI.2018.2865304","volume":"41","author":"WS Lai","year":"2018","unstructured":"Lai, W.S., Huang, J.B., Ahuja, N., Yang, M.H.: Fast and accurate image super-resolution with deep laplacian pyramid networks. IEEE Trans. Pattern Anal. Mach. Intell. 41(11), 2599\u20132613 (2018)","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"38_CR18","doi-asserted-by":"crossref","unstructured":"Ledig, C., et al.: Photo-realistic single image super-resolution using a generative adversarial network. In: CVPR (2017)","DOI":"10.1109\/CVPR.2017.19"},{"key":"38_CR19","doi-asserted-by":"crossref","unstructured":"Lim, B., Son, S., Kim, H., Nah, S., Mu Lee, K.: Enhanced deep residual networks for single image super-resolution. In: CVPR Workshops (2017)","DOI":"10.1109\/CVPRW.2017.151"},{"issue":"2","key":"38_CR20","doi-asserted-by":"publisher","first-page":"346","DOI":"10.1109\/TPAMI.2013.127","volume":"36","author":"C Liu","year":"2013","unstructured":"Liu, C., Sun, D.: On bayesian adaptive video super resolution. IEEE Trans. Pattern Anal. Mach. Intell. 36(2), 346\u2013360 (2013)","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"38_CR21","doi-asserted-by":"crossref","unstructured":"Liu, D., et al.: Robust video super-resolution with learned temporal dynamics. In: ICCV (2017)","DOI":"10.1109\/ICCV.2017.274"},{"key":"38_CR22","doi-asserted-by":"crossref","unstructured":"Pan, J., et al.: Learning dual convolutional neural networks for low-level vision. In: CVPR (2018)","DOI":"10.1109\/CVPR.2018.00324"},{"key":"38_CR23","doi-asserted-by":"crossref","unstructured":"Sajjadi, M.S., Vemulapalli, R., Brown, M.: Frame-recurrent video super-resolution. In: CVPR (2018)","DOI":"10.1109\/CVPR.2018.00693"},{"key":"38_CR24","doi-asserted-by":"crossref","unstructured":"Singh, B., Marks, T.K., Jones, M., Tuzel, O., Shao, M.: A multi-stream bi-directional recurrent neural network for fine-grained action detection. In: CVPR (2016)","DOI":"10.1109\/CVPR.2016.216"},{"key":"38_CR25","doi-asserted-by":"crossref","unstructured":"Tao, X., Gao, H., Liao, R., Wang, J., Jia, J.: Detail-revealing deep video super-resolution. In: ICCV (2017)","DOI":"10.1109\/ICCV.2017.479"},{"key":"38_CR26","doi-asserted-by":"crossref","unstructured":"Wang, X., Chan, K.C., Yu, K., Dong, C., Change Loy, C.: EDVR: Video restoration with enhanced deformable convolutional networks. In: CVPR Workshops (2019)","DOI":"10.1109\/CVPRW.2019.00247"},{"issue":"8","key":"38_CR27","doi-asserted-by":"publisher","first-page":"1106","DOI":"10.1007\/s11263-018-01144-2","volume":"127","author":"T Xue","year":"2019","unstructured":"Xue, T., Chen, B., Wu, J., Wei, D., Freeman, W.T.: Video enhancement with task-oriented flow. Int. J. Comput. Vis. 127(8), 1106\u20131125 (2019)","journal-title":"Int. J. Comput. Vis."},{"issue":"12","key":"38_CR28","doi-asserted-by":"publisher","first-page":"3106","DOI":"10.1109\/TMM.2019.2919431","volume":"21","author":"W Yang","year":"2019","unstructured":"Yang, W., Zhang, X., Tian, Y., Wang, W., Xue, J.H., Liao, Q.: Deep learning for single image super-resolution: a brief review. IEEE Trans. Multimed. 21(12), 3106\u20133121 (2019)","journal-title":"IEEE Trans. Multimed."},{"key":"38_CR29","doi-asserted-by":"crossref","unstructured":"Yi, P., Wang, Z., Jiang, K., Jiang, J., Ma, J.: Progressive fusion video super-resolution network via exploiting non-local spatio-temporal correlations. In: ICCV (2019)","DOI":"10.1109\/ICCV.2019.00320"},{"key":"38_CR30","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"294","DOI":"10.1007\/978-3-030-01234-2_18","volume-title":"Computer Vision \u2013 ECCV 2018","author":"Y Zhang","year":"2018","unstructured":"Zhang, Y., Li, K., Li, K., Wang, L., Zhong, B., Fu, Y.: Image super-resolution using very deep residual channel attention networks. In: Ferrari, V., Hebert, M., Sminchisescu, C., Weiss, Y. (eds.) ECCV 2018. LNCS, vol. 11211, pp. 294\u2013310. Springer, Cham (2018). https:\/\/doi.org\/10.1007\/978-3-030-01234-2_18"},{"key":"38_CR31","doi-asserted-by":"crossref","unstructured":"Zhang, Y., Tian, Y., Kong, Y., Zhong, B., Fu, Y.: Residual dense network for image super-resolution. In: CVPR (2018)","DOI":"10.1109\/CVPR.2018.00262"}],"container-title":["Lecture Notes in Computer Science","Computer Vision \u2013 ECCV 2020"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-030-58610-2_38","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,10,6]],"date-time":"2024-10-06T00:39:49Z","timestamp":1728175189000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-030-58610-2_38"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2020]]},"ISBN":["9783030586096","9783030586102"],"references-count":31,"URL":"https:\/\/doi.org\/10.1007\/978-3-030-58610-2_38","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2020]]},"assertion":[{"value":"7 October 2020","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ECCV","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"European Conference on Computer Vision","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Glasgow","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"United Kingdom","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2020","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"23 August 2020","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"28 August 2020","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"16","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"eccv2020","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/eccv2020.eu\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Double-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"OpenReview","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"5025","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"1360","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"0","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"27% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"7","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Yes","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"The conference was held virtually due to the COVID-19 pandemic. From the ECCV Workshops 249 full papers, 18 short papers, and 21 further contributions were published out of a total of 467 submissions.","order":10,"name":"additional_info_on_review_process","label":"Additional Info on Review Process","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"This content has been made available to all.","name":"free","label":"Free to read"}]}}