{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,3,26]],"date-time":"2025-03-26T13:39:26Z","timestamp":1742996366192,"version":"3.40.3"},"publisher-location":"Singapore","reference-count":29,"publisher":"Springer Nature Singapore","isbn-type":[{"type":"print","value":"9789819913534"},{"type":"electronic","value":"9789819913541"}],"license":[{"start":{"date-parts":[[2023,1,1]],"date-time":"2023-01-01T00:00:00Z","timestamp":1672531200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2023,1,1]],"date-time":"2023-01-01T00:00:00Z","timestamp":1672531200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2023]]},"DOI":"10.1007\/978-981-99-1354-1_18","type":"book-chapter","created":{"date-parts":[[2023,3,29]],"date-time":"2023-03-29T14:03:27Z","timestamp":1680098607000},"page":"202-218","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["AU-GAN: Attention U-Net Based on\u00a0a\u00a0Built-In Attention for\u00a0Multi-domain Image-to-Image Translation"],"prefix":"10.1007","author":[{"given":"Caie","family":"Xu","sequence":"first","affiliation":[]},{"given":"Jin","family":"Gan","sequence":"additional","affiliation":[]},{"given":"Mingyang","family":"Wu","sequence":"additional","affiliation":[]},{"given":"Dandan","family":"Ni","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2023,3,30]]},"reference":[{"key":"18_CR1","unstructured":"Krizhevsky, A., Sutskever, I., Hinton, G.: ImageNet classification with deep convolutional neural networks. In: NIPS, vol. 25. Curran Associates Inc. (2012)"},{"key":"18_CR2","doi-asserted-by":"publisher","unstructured":"He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In: 2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), Las Vegas, NV, USA, pp. 770\u2013778 (2016). https:\/\/doi.org\/10.1109\/CVPR.2016.90","DOI":"10.1109\/CVPR.2016.90"},{"key":"18_CR3","unstructured":"Simonyan, K., Zisserman, A.: Very deep convolutional networks for large-scale image recognition. arXiv (2014)"},{"issue":"3","key":"18_CR4","doi-asserted-by":"publisher","first-page":"211","DOI":"10.1007\/s11263-015-0816-y","volume":"115","author":"O Russakovsky","year":"2015","unstructured":"Russakovsky, O., Deng, J., Su, H., Krause, J., Satheesh, S., Ma, S., et al.: ImageNet large scale visual recognition challenge. Int. J. Comput. Vis. 115(3), 211\u2013252 (2015). https:\/\/doi.org\/10.1007\/s11263-015-0816-y","journal-title":"Int. J. Comput. Vis."},{"key":"18_CR5","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"740","DOI":"10.1007\/978-3-319-10602-1_48","volume-title":"Computer Vision \u2013 ECCV 2014","author":"T-Y Lin","year":"2014","unstructured":"Lin, T.-Y., et al.: Microsoft COCO: common objects in context. In: Fleet, D., Pajdla, T., Schiele, B., Tuytelaars, T. (eds.) ECCV 2014. LNCS, vol. 8693, pp. 740\u2013755. Springer, Cham (2014). https:\/\/doi.org\/10.1007\/978-3-319-10602-1_48"},{"key":"18_CR6","doi-asserted-by":"crossref","unstructured":"Alom, M.Z., Hasan, M., Yakopcic, C., Taha, T.M., Asari, V.K.: Recurrent residual convolutional neural network based on U-Net (R2U-Net) for medical image (2018)","DOI":"10.1109\/NAECON.2018.8556686"},{"key":"18_CR7","unstructured":"Vaswani, A., Shazeer, N., Parmar, N., Uszkoreit, J., Jones, L., Gomez, A.N., et al.: Attention is all you need. arXiv (2017)"},{"issue":"1","key":"18_CR8","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1137\/19M1247620","volume":"2","author":"S G\u00fcnther","year":"2018","unstructured":"G\u00fcnther, S., Ruthotto, L., Schroder, J.B., Cyr, E.C., Gauger, N.R.: Layer-parallel training of deep residual neural networks. SIAM J. Math. Data Sci. 2(1), 1\u201323 (2018)","journal-title":"SIAM J. Math. Data Sci."},{"key":"18_CR9","unstructured":"Ming, L., Hu, X.: Recurrent convolutional neural network for object recognition. In: IEEE Conference on Computer Vision and Pattern Recognition, pp. 3367\u20133375. IEEE Computer Society (2015)"},{"key":"18_CR10","doi-asserted-by":"publisher","first-page":"279","DOI":"10.1007\/s00521-018-3627-6","volume":"32","author":"AM Zahangir","year":"2017","unstructured":"Zahangir, A.M., Mahmudul, H., Yakopcic, C., Taha, T.M., Asari, V.K.: Improved inception-residual convolutional neural network for object recognition. Neural Comput. Appl. 32, 279\u2013293 (2017). https:\/\/doi.org\/10.1007\/s00521-018-3627-6","journal-title":"Neural Comput. Appl."},{"key":"18_CR11","doi-asserted-by":"crossref","unstructured":"Donahue, J., Hendricks, L.A., Guadarrama, S., Rohrbach, M., Venugopalan, S., Saenko, K., et al.: Long-term recurrent convolutional networks for visual recognition and description. Elsevier (2015)","DOI":"10.21236\/ADA623249"},{"key":"18_CR12","doi-asserted-by":"crossref","unstructured":"Alom, M.Z., Hasan, M., Yakopcic, C., Taha, T.M.: Inception recurrent convolutional neural network for object recognition (2017)","DOI":"10.1109\/IJCNN.2018.8489635"},{"key":"18_CR13","unstructured":"Goodfellow, I.J., Pouget-Abadie, J., Mirza, M., Xu, B., Warde-Farley, D., Ozair, S., et al.: Generative adversarial networks (2014)"},{"key":"18_CR14","doi-asserted-by":"crossref","unstructured":"Isola, P., Zhu, J.Y., Zhou, T., Efros, A.A.: Image-to-image translation with conditional adversarial networks. In: IEEE Conference on Computer Vision and Pattern Recognition (2016)","DOI":"10.1109\/CVPR.2017.632"},{"key":"18_CR15","doi-asserted-by":"crossref","unstructured":"Zhu, J.Y., Park, T., Isola, P., Efros, A.A.: Unpaired image-to-image translation using cycle-consistent adversarial networks. IEEE (2017)","DOI":"10.1109\/ICCV.2017.244"},{"key":"18_CR16","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"574","DOI":"10.1007\/978-3-030-01261-8_34","volume-title":"Computer Vision \u2013 ECCV 2018","author":"X Liang","year":"2018","unstructured":"Liang, X., Zhang, H., Lin, L., Xing, E.: Generative semantic manipulation with mask-contrasting GAN. In: Ferrari, V., Hebert, M., Sminchisescu, C., Weiss, Y. (eds.) ECCV 2018. LNCS, vol. 11217, pp. 574\u2013590. Springer, Cham (2018). https:\/\/doi.org\/10.1007\/978-3-030-01261-8_34"},{"key":"18_CR17","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"167","DOI":"10.1007\/978-3-030-01216-8_11","volume-title":"Computer Vision \u2013 ECCV 2018","author":"X Chen","year":"2018","unstructured":"Chen, X., Xu, C., Yang, X., Tao, D.: Attention-GAN for object transfiguration in wild images. In: Ferrari, V., Hebert, M., Sminchisescu, C., Weiss, Y. (eds.) ECCV 2018. LNCS, vol. 11206, pp. 167\u2013184. Springer, Cham (2018). https:\/\/doi.org\/10.1007\/978-3-030-01216-8_11"},{"key":"18_CR18","doi-asserted-by":"crossref","unstructured":"Tang, H., Xu, D., Sebe, N., Yan, Y.: Attention-guided generative adversarial networks for unsupervised image-to-image translation. In: 2019 International Joint Conference on Neural Networks (IJCNN). IEEE (2019)","DOI":"10.1109\/IJCNN.2019.8851881"},{"key":"18_CR19","unstructured":"Zhang, H., Goodfellow, I., Metaxas, D., Odena, A.: Self-attention generative adversarial networks (2018)"},{"key":"18_CR20","doi-asserted-by":"crossref","unstructured":"Fu, J., Liu, J., Tian, H., Li, Y., Bao, Y., Fang, Z., et al.: Dual attention network for scene segmentation (2018)","DOI":"10.1109\/CVPR.2019.00326"},{"key":"18_CR21","unstructured":"Oktay, O., Schlemper, J., Folgoc, L.L., Lee, M., Heinrich, M., Misawa, K., et al.: Attention U-Net: learning where to look for the pancreas (2018)"},{"key":"18_CR22","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"234","DOI":"10.1007\/978-3-319-24574-4_28","volume-title":"Medical Image Computing and Computer-Assisted Intervention \u2013 MICCAI 2015","author":"O Ronneberger","year":"2015","unstructured":"Ronneberger, O., Fischer, P., Brox, T.: U-Net: convolutional networks for biomedical image segmentation. In: Navab, N., Hornegger, J., Wells, W.M., Frangi, A.F. (eds.) MICCAI 2015. LNCS, vol. 9351, pp. 234\u2013241. Springer, Cham (2015). https:\/\/doi.org\/10.1007\/978-3-319-24574-4_28"},{"key":"18_CR23","doi-asserted-by":"crossref","unstructured":"Choi, Y., Choi, M., Kim, M., Ha, J.W., Choo, J.: StarGAN: unified generative adversarial networks for multi-domain image-to-image translation. In: 2018 IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR). IEEE (2018)","DOI":"10.1109\/CVPR.2018.00916"},{"key":"18_CR24","unstructured":"Gulrajani, I., Ahmed, F., Arjovsky, M., Dumoulin, V., Courville, A.: Improved training of Wasserstein GANs (2017)"},{"issue":"4\/5","key":"18_CR25","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1147\/JRD.2019.2945519","volume":"63","author":"P Sattigeri","year":"2019","unstructured":"Sattigeri, P., Hoffman, S.C., Chenthamarakshan, V., Varshney, K.R.: Fairness GAN: generating datasets with fairness properties using a generative adversarial network. IBM J. Res. Dev. 63(4\/5), 1 (2019)","journal-title":"IBM J. Res. Dev."},{"key":"18_CR26","unstructured":"Karras, T., Aila, T., Laine, S., Lehtinen, J.: Progressive growing of GANs for improved quality, stability, and variation (2017)"},{"key":"18_CR27","unstructured":"Sch\u00f6lkopf, B.: The kernel trick for distances. MIT Press (2000)"},{"key":"18_CR28","series-title":"Advances in Intelligent Systems and Computing","doi-asserted-by":"publisher","first-page":"102","DOI":"10.1007\/978-3-030-63322-6_8","volume-title":"Software Engineering Perspectives in Intelligent Systems","author":"A Obukhov","year":"2020","unstructured":"Obukhov, A., Krasnyanskiy, M.: Quality assessment method for GAN based on modified metrics inception score and Fr\u00e9chet inception distance. In: Silhavy, R., Silhavy, P., Prokopova, Z. (eds.) CoMeSySo 2020. AISC, vol. 1294, pp. 102\u2013114. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-63322-6_8"},{"key":"18_CR29","unstructured":"Barratt, S., Sharma, R.: A note on the inception score (2018)"}],"container-title":["Communications in Computer and Information Science","Web and Big Data. APWeb-WAIM 2022 International Workshops"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-981-99-1354-1_18","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,3,29]],"date-time":"2023-03-29T14:06:53Z","timestamp":1680098813000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-981-99-1354-1_18"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023]]},"ISBN":["9789819913534","9789819913541"],"references-count":29,"URL":"https:\/\/doi.org\/10.1007\/978-981-99-1354-1_18","relation":{},"ISSN":["1865-0929","1865-0937"],"issn-type":[{"type":"print","value":"1865-0929"},{"type":"electronic","value":"1865-0937"}],"subject":[],"published":{"date-parts":[[2023]]},"assertion":[{"value":"30 March 2023","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"APWeb-WAIM","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Asia-Pacific Web (APWeb) and Web-Age Information Management (WAIM) Joint International Conference on Web and Big Data","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Nanjing","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"China","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2022","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"11 August 2022","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"13 August 2022","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"6","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"apwebwaim2022","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/apweb-waim2022.com\/proceedings","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Double-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"CMT","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"297","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"75","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"45","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"25% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"5","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Yes","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"5 Demo papers + 23 workshop papers","order":10,"name":"additional_info_on_review_process","label":"Additional Info on Review Process","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}