{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,3,27]],"date-time":"2025-03-27T06:29:51Z","timestamp":1743056991594,"version":"3.40.3"},"publisher-location":"Singapore","reference-count":24,"publisher":"Springer Nature Singapore","isbn-type":[{"type":"print","value":"9789819985517"},{"type":"electronic","value":"9789819985524"}],"license":[{"start":{"date-parts":[[2023,12,28]],"date-time":"2023-12-28T00:00:00Z","timestamp":1703721600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2023,12,28]],"date-time":"2023-12-28T00:00:00Z","timestamp":1703721600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2024]]},"DOI":"10.1007\/978-981-99-8552-4_38","type":"book-chapter","created":{"date-parts":[[2023,12,27]],"date-time":"2023-12-27T07:02:36Z","timestamp":1703660556000},"page":"482-493","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["Discriminative Activation of\u00a0Information Is What You Need in\u00a0Image Super-Resolution Transformer"],"prefix":"10.1007","author":[{"given":"Yixin","family":"Qian","sequence":"first","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2023,12,28]]},"reference":[{"key":"38_CR1","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"213","DOI":"10.1007\/978-3-030-58452-8_13","volume-title":"Computer Vision \u2013 ECCV 2020","author":"N Carion","year":"2020","unstructured":"Carion, N., Massa, F., Synnaeve, G., Usunier, N., Kirillov, A., Zagoruyko, S.: End-to-end object detection with transformers. In: Vedaldi, A., Bischof, H., Brox, T., Frahm, J.-M. (eds.) ECCV 2020. LNCS, vol. 12346, pp. 213\u2013229. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-58452-8_13"},{"key":"38_CR2","doi-asserted-by":"crossref","unstructured":"Chen, X., Wang, X., Zhou, J., Dong, C.: Activating more pixels in image super-resolution transformer. arXiv e-prints (2022)","DOI":"10.1109\/CVPR52729.2023.02142"},{"key":"38_CR3","doi-asserted-by":"crossref","unstructured":"Dai, T., Cai, J., Zhang, Y., Xia, S.T., Zhang, L.: Second-order attention network for single image super-resolution. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 11065\u201311074 (2019)","DOI":"10.1109\/CVPR.2019.01132"},{"issue":"2","key":"38_CR4","doi-asserted-by":"publisher","first-page":"295","DOI":"10.1109\/TPAMI.2015.2439281","volume":"38","author":"C Dong","year":"2016","unstructured":"Dong, C., Loy, C.C., He, K., Tang, X.: Image super-resolution using deep convolutional networks. IEEE Trans. Pattern Anal. Mach. Intell. 38(2), 295\u2013307 (2016)","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"38_CR5","unstructured":"Dosovitskiy, A., et al.: An image is worth 16 $$\\times $$ 16 words: transformers for image recognition at scale. arXiv preprint arXiv:2010.11929 (2020)"},{"key":"38_CR6","doi-asserted-by":"crossref","unstructured":"Kim, J., Lee, J.K., Lee, K.M.: Accurate image super-resolution using very deep convolutional networks. IEEE (2016)","DOI":"10.1109\/CVPR.2016.182"},{"key":"38_CR7","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"527","DOI":"10.1007\/978-3-030-01237-3_32","volume-title":"Computer Vision \u2013 ECCV 2018","author":"J Li","year":"2018","unstructured":"Li, J., Fang, F., Mei, K., Zhang, G.: Multi-scale residual network for image super-resolution. In: Ferrari, V., Hebert, M., Sminchisescu, C., Weiss, Y. (eds.) ECCV 2018. LNCS, vol. 11212, pp. 527\u2013542. Springer, Cham (2018). https:\/\/doi.org\/10.1007\/978-3-030-01237-3_32"},{"key":"38_CR8","unstructured":"Li, J., Pei, Z., Zeng, T.: From beginner to master: a survey for deep learning-based single-image super-resolution. arXiv preprint arXiv:2109.14335 (2021)"},{"key":"38_CR9","doi-asserted-by":"crossref","unstructured":"Li, K., et al.: UniFormer: unifying convolution and self-attention for visual recognition. IEEE Trans. Pattern Anal. Mach. Intell. 45, 12581\u201312600 (2023)","DOI":"10.1109\/TPAMI.2023.3282631"},{"key":"38_CR10","unstructured":"Li, W., Lu, X., Lu, J., Zhang, X., Jia, J.: On efficient transformer and image pre-training for low-level vision. arXiv e-prints (2021)"},{"key":"38_CR11","doi-asserted-by":"crossref","unstructured":"Liang, J., Zeng, H., Zhang, L.: Details or artifacts: a locally discriminative learning approach to realistic image super-resolution. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 5657\u20135666 (2022)","DOI":"10.1109\/CVPR52688.2022.00557"},{"key":"38_CR12","doi-asserted-by":"crossref","unstructured":"Liang, J., Cao, J., Sun, G., Zhang, K., Timofte, R.: SwinIR: image restoration using swin transformer. IEEE (2021)","DOI":"10.1109\/ICCVW54120.2021.00210"},{"key":"38_CR13","doi-asserted-by":"crossref","unstructured":"Lim, B., Son, S., Kim, H., Nah, S., Lee, K.M.: Enhanced deep residual networks for single image super-resolution. IEEE (2017)","DOI":"10.1109\/CVPRW.2017.151"},{"key":"38_CR14","unstructured":"Touvron, H., Cord, M., Douze, M., Massa, F., Sablayrolles, A., J\u00e9gou, H.: Training data-efficient image transformers & distillation through attention. In: International Conference on Machine Learning, pp. 10347\u201310357. PMLR (2021)"},{"key":"38_CR15","unstructured":"Vaswani, A., et al.: Attention is all you need. In: Advances in Neural Information Processing Systems 30 (2017)"},{"issue":"5","key":"38_CR16","doi-asserted-by":"publisher","first-page":"1259","DOI":"10.1109\/TCSVT.2018.2839879","volume":"29","author":"Y Wang","year":"2018","unstructured":"Wang, Y., Wang, L., Wang, H., Li, P.: Resolution-aware network for image super-resolution. IEEE Trans. Circuits Syst. Video Technol. 29(5), 1259\u20131269 (2018)","journal-title":"IEEE Trans. Circuits Syst. Video Technol."},{"key":"38_CR17","doi-asserted-by":"crossref","unstructured":"Wu, H., et al.: CvT: introducing convolutions to vision transformers. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 22\u201331 (2021)","DOI":"10.1109\/ICCV48922.2021.00009"},{"key":"38_CR18","first-page":"30392","volume":"34","author":"T Xiao","year":"2021","unstructured":"Xiao, T., Singh, M., Mintun, E., Darrell, T., Doll\u00e1r, P., Girshick, R.: Early convolutions help transformers see better. Adv. Neural. Inf. Process. Syst. 34, 30392\u201330400 (2021)","journal-title":"Adv. Neural. Inf. Process. Syst."},{"key":"38_CR19","doi-asserted-by":"crossref","unstructured":"Yang, F., Yang, H., Fu, J., Lu, H., Guo, B.: Learning texture transformer network for image super-resolution. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 5791\u20135800 (2020)","DOI":"10.1109\/CVPR42600.2020.00583"},{"key":"38_CR20","doi-asserted-by":"crossref","unstructured":"Yuan, K., Guo, S., Liu, Z., Zhou, A., Yu, F., Wu, W.: Incorporating convolution designs into visual transformers. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 579\u2013588 (2021)","DOI":"10.1109\/ICCV48922.2021.00062"},{"key":"38_CR21","unstructured":"Zewei, H., Siliang, T., Jiangxin, Y., Yanlong, C., Ying, Y.M., Yanpeng, C.: Cascaded deep networks with multiple receptive fields for infrared image super-resolution. IEEE Trans. Circ. Syst. Video Technol. 1 (2018)"},{"key":"38_CR22","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"294","DOI":"10.1007\/978-3-030-01234-2_18","volume-title":"Computer Vision \u2013 ECCV 2018","author":"Y Zhang","year":"2018","unstructured":"Zhang, Y., Li, K., Li, K., Wang, L., Zhong, B., Fu, Y.: Image super-resolution using very deep residual channel attention networks. In: Ferrari, V., Hebert, M., Sminchisescu, C., Weiss, Y. (eds.) ECCV 2018. LNCS, vol. 11211, pp. 294\u2013310. Springer, Cham (2018). https:\/\/doi.org\/10.1007\/978-3-030-01234-2_18"},{"key":"38_CR23","doi-asserted-by":"crossref","unstructured":"Zhang, Y., Tian, Y., Kong, Y., Zhong, B., Fu, Y.: Residual dense network for image super-resolution. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 2472\u20132481 (2018)","DOI":"10.1109\/CVPR.2018.00262"},{"key":"38_CR24","unstructured":"Zhao, Y., Wang, G., Tang, C., Luo, C., Zeng, W., Zha, Z.J.: A battle of network structures: an empirical study of CNN, transformer, and MLP. arXiv preprint arXiv:2108.13002 (2021)"}],"container-title":["Lecture Notes in Computer Science","Pattern Recognition and Computer Vision"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-981-99-8552-4_38","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,12,27]],"date-time":"2023-12-27T07:19:02Z","timestamp":1703661542000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-981-99-8552-4_38"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,12,28]]},"ISBN":["9789819985517","9789819985524"],"references-count":24,"URL":"https:\/\/doi.org\/10.1007\/978-981-99-8552-4_38","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"type":"print","value":"0302-9743"},{"type":"electronic","value":"1611-3349"}],"subject":[],"published":{"date-parts":[[2023,12,28]]},"assertion":[{"value":"28 December 2023","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"PRCV","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Chinese Conference on Pattern Recognition and Computer Vision  (PRCV)","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Xiamen","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"China","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2023","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"13 October 2023","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"15 October 2023","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"6","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"ccprcv2023","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/prcv2023.xmu.edu.cn\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Double-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Microsoft CMT","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"1420","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"532","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"0","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"37% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3,78","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3,69","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"No","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}