{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,3,27]],"date-time":"2025-03-27T21:18:07Z","timestamp":1743110287328,"version":"3.40.3"},"publisher-location":"Singapore","reference-count":22,"publisher":"Springer Nature Singapore","isbn-type":[{"type":"print","value":"9789819717101"},{"type":"electronic","value":"9789819717118"}],"license":[{"start":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T00:00:00Z","timestamp":1704067200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T00:00:00Z","timestamp":1704067200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2024]]},"DOI":"10.1007\/978-981-97-1711-8_18","type":"book-chapter","created":{"date-parts":[[2024,3,27]],"date-time":"2024-03-27T19:03:26Z","timestamp":1711566206000},"page":"238-249","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["Facial Nerve Disorder Rehabilitation via Generative Adversarial Network"],"prefix":"10.1007","author":[{"given":"Donald Jasper","family":"Su","sequence":"first","affiliation":[]},{"given":"Chia Cheng","family":"Lin","sequence":"additional","affiliation":[]},{"given":"Fang","family":"Yu","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,3,28]]},"reference":[{"key":"18_CR1","doi-asserted-by":"crossref","unstructured":"Choi, Y., Choi, M., Kim, M., Ha, J.W., Kim, S., Choo, J.: Stargan: unified generative adversarial networks for multi-domain image-to-image translation. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 8789\u20138797 (2018)","DOI":"10.1109\/CVPR.2018.00916"},{"key":"18_CR2","unstructured":"Csurka, G.: Domain adaptation for visual applications: a comprehensive survey. arXiv preprint arXiv:1702.05374 (2017)"},{"key":"18_CR3","doi-asserted-by":"publisher","first-page":"2007","DOI":"10.1007\/s11063-019-10163-0","volume":"51","author":"O Elharrouss","year":"2020","unstructured":"Elharrouss, O., Almaadeed, N., Al-Maadeed, S., Akbari, Y.: Image inpainting: a review. Neural Process. Lett. 51, 2007\u20132028 (2020)","journal-title":"Neural Process. Lett."},{"issue":"11","key":"18_CR4","doi-asserted-by":"publisher","first-page":"139","DOI":"10.1145\/3422622","volume":"63","author":"I Goodfellow","year":"2020","unstructured":"Goodfellow, I., et al.: Generative adversarial networks. Commun. ACM 63(11), 139\u2013144 (2020)","journal-title":"Commun. ACM"},{"key":"18_CR5","series-title":"Lecture Notes in Computer Science (Lecture Notes in Artificial Intelligence)","doi-asserted-by":"publisher","first-page":"63","DOI":"10.1007\/978-3-030-14815-7_6","volume-title":"Integrated Uncertainty in Knowledge Modelling and Decision Making","author":"GM Guanoluisa","year":"2019","unstructured":"Guanoluisa, G.M., Pilatasig, J.A., Andaluz, V.H.: Gy medic: analysis and rehabilitation system for patients with facial paralysis. In: Seki, H., Nguyen, C.H., Huynh, V.-N., Inuiguchi, M. (eds.) IUKM 2019. LNCS (LNAI), vol. 11471, pp. 63\u201375. Springer, Cham (2019). https:\/\/doi.org\/10.1007\/978-3-030-14815-7_6"},{"key":"18_CR6","first-page":"1","volume":"30","author":"I Gulrajani","year":"2017","unstructured":"Gulrajani, I., Ahmed, F., Arjovsky, M., Dumoulin, V., Courville, A.C.: Improved training of wasserstein gans. Adv. Neural Inf. Process. Syst. 30, 1\u201311 (2017)","journal-title":"Adv. Neural Inf. Process. Syst."},{"key":"18_CR7","doi-asserted-by":"crossref","unstructured":"Gupta, R.K., Chia, A.Y.S., Rajan, D., Ng, E.S., Zhiyong, H.: Image colorization using similar images. In: Proceedings of the 20th ACM International Conference on Multimedia, pp. 369\u2013 378 (2012)","DOI":"10.1145\/2393347.2393402"},{"key":"18_CR8","doi-asserted-by":"crossref","unstructured":"Jin, Y., Li, Z., Yi, P.: Review of methods applying on facial alignment. In: 2022 IEEE 2nd International Conference on Electronic Technology, Communication and Information (ICETCI). pp. 553\u2013557. IEEE (2022)","DOI":"10.1109\/ICETCI55101.2022.9832267"},{"key":"18_CR9","doi-asserted-by":"publisher","first-page":"235","DOI":"10.1007\/s12194-019-00520-y","volume":"12","author":"S Kaji","year":"2019","unstructured":"Kaji, S., Kida, S.: Overview of image-to-image translation by use of deep neural networks: denoising, super-resolution, modality conversion, and reconstruction in medical imaging. Radiol. Phys. Technol. 12, 235\u2013248 (2019)","journal-title":"Radiol. Phys. Technol."},{"issue":"7","key":"18_CR10","doi-asserted-by":"publisher","first-page":"2253","DOI":"10.3390\/app10072253","volume":"10","author":"HW Kim","year":"2020","unstructured":"Kim, H.W., Kim, H.J., Rho, S., Hwang, E.: Augmented emtcnn: a fast and accurate facial landmark detection network. Appl. Sci. 10(7), 2253 (2020)","journal-title":"Appl. Sci."},{"key":"18_CR11","doi-asserted-by":"publisher","DOI":"10.1016\/j.patcog.2020.107343","volume":"105","author":"R Li","year":"2020","unstructured":"Li, R., Cao, W., Jiao, Q., Wu, S., Wong, H.S.: Simplified unsupervised image translation for semantic segmentation adaptation. Pattern Recogn. 105, 107343 (2020)","journal-title":"Pattern Recogn."},{"key":"18_CR12","unstructured":"Lugaresi, C., et al.: Mediapipe: a framework for building perception pipelines. arXiv preprint arXiv:1906.08172 (2019)"},{"issue":"5","key":"18_CR13","doi-asserted-by":"publisher","first-page":"1044","DOI":"10.1002\/lary.26356","volume":"127","author":"RE Luijmes","year":"2017","unstructured":"Luijmes, R.E., Pouwels, S., Beurskens, C.H., Kleiss, I.J., Siemann, I., Ingels, K.J.: Quality of life before and after different treatment modalities in peripheral facial palsy: a systematic review. Laryngoscope 127(5), 1044\u20131051 (2017)","journal-title":"Laryngoscope"},{"issue":"1","key":"18_CR14","doi-asserted-by":"publisher","first-page":"173","DOI":"10.1109\/TMECH.2016.2618771","volume":"22","author":"W Meng","year":"2016","unstructured":"Meng, W., Xie, S.Q., Liu, Q., Lu, C.Z., Ai, Q.: Robust iterative feedback tuning control of a compliant rehabilitation robot for repetitive ankle training. IEEE\/ASME Trans. Mechatron. 22(1), 173\u2013184 (2016)","journal-title":"IEEE\/ASME Trans. Mechatron."},{"key":"18_CR15","doi-asserted-by":"crossref","unstructured":"Nayak, S., Das, R.K.: Application of artificial intelligence (ai) in prosthetic and orthotic rehabilitation. In: Service Robotics. IntechOpen (2020)","DOI":"10.5772\/intechopen.93903"},{"key":"18_CR16","doi-asserted-by":"crossref","unstructured":"Pumarola, A., Agudo, A., Martinez, A.M., Sanfeliu, A., Moreno-Noguer, F.: Ganimation: Anatomically-aware facial animation from a single image. In: Proceedings of the European Conference on Computer Vision (ECCV), pp. 818\u2013833 (2018)","DOI":"10.1007\/978-3-030-01249-6_50"},{"issue":"3","key":"18_CR17","doi-asserted-by":"publisher","first-page":"207","DOI":"10.1016\/S0385-8146(99)00049-8","volume":"27","author":"Y Satoh","year":"2000","unstructured":"Satoh, Y., Kanzaki, J., Yoshihara, S.: A comparison and conversion table of \u2018the house\u2013 brackmann facial nerve grading system\u2019and \u2018the yanagihara grading system.\u2019 Auris Nasus Larynx 27(3), 207\u2013212 (2000)","journal-title":"Auris Nasus Larynx"},{"key":"18_CR18","doi-asserted-by":"crossref","unstructured":"Thies, J., Zollhofer, M., Stamminger, M., Theobalt, C., Nie\u00dfner, M.: Face2face: Real-time face capture and reenactment of RGB videos. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 2387\u20132395 (2016)","DOI":"10.1109\/CVPR.2016.262"},{"key":"18_CR19","doi-asserted-by":"crossref","unstructured":"Wu, Y., Gou, C., Ji, Q.: Simultaneous facial landmark detection, pose and deformation estimation under facial occlusion. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 3471\u20133480 (2017)","DOI":"10.1109\/CVPR.2017.606"},{"issue":"1","key":"18_CR20","doi-asserted-by":"publisher","first-page":"118","DOI":"10.1097\/00129492-200301000-00023","volume":"24","author":"TL Yen","year":"2003","unstructured":"Yen, T.L., Driscoll, C.L., Lalwani, A.K.: Significance of house-brackmann facial nerve grading global score in the setting of differential facial nerve function. Otol. Neurotol. 24(1), 118\u2013122 (2003)","journal-title":"Otol. Neurotol."},{"key":"18_CR21","doi-asserted-by":"crossref","unstructured":"Zhu, J.Y., Park, T., Isola, P., Efros, A.A.: Unpaired image-to-image translation using cycleconsistent adversarial networks. In: Proceedings of the IEEE International Conference on Computer Vision, pp. 2223\u20132232 (2017)","DOI":"10.1109\/ICCV.2017.244"},{"key":"18_CR22","doi-asserted-by":"crossref","unstructured":"Zhu, X., Lei, Z., Yan, J., Yi, D., Li, S.Z.: High-fidelity pose and expression normalization for face recognition in the wild. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 787\u2013796 (2015)","DOI":"10.1109\/CVPR.2015.7298679"}],"container-title":["Communications in Computer and Information Science","Technologies and Applications of Artificial Intelligence"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-981-97-1711-8_18","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,11,15]],"date-time":"2024-11-15T04:00:43Z","timestamp":1731643243000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-981-97-1711-8_18"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024]]},"ISBN":["9789819717101","9789819717118"],"references-count":22,"URL":"https:\/\/doi.org\/10.1007\/978-981-97-1711-8_18","relation":{},"ISSN":["1865-0929","1865-0937"],"issn-type":[{"type":"print","value":"1865-0929"},{"type":"electronic","value":"1865-0937"}],"subject":[],"published":{"date-parts":[[2024]]},"assertion":[{"value":"28 March 2024","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"TAAI","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Conference on Technologies and Applications of Artificial Intelligence","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Yunlin","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Taiwan","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2023","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"1 December 2023","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2 December 2023","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"28","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"taai2023","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/taai2023.org.tw","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Single-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Our build submission system","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"193","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"35","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"12","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"18% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"4","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Yes","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}