{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,10]],"date-time":"2026-04-10T16:22:18Z","timestamp":1775838138567,"version":"3.50.1"},"publisher-location":"Cham","reference-count":28,"publisher":"Springer Nature Switzerland","isbn-type":[{"value":"9783031164453","type":"print"},{"value":"9783031164460","type":"electronic"}],"license":[{"start":{"date-parts":[[2022,1,1]],"date-time":"2022-01-01T00:00:00Z","timestamp":1640995200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2022,1,1]],"date-time":"2022-01-01T00:00:00Z","timestamp":1640995200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2022]]},"DOI":"10.1007\/978-3-031-16446-0_50","type":"book-chapter","created":{"date-parts":[[2022,9,16]],"date-time":"2022-09-16T09:02:47Z","timestamp":1663318967000},"page":"527-537","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":13,"title":["Classification-Aided High-Quality PET Image Synthesis via Bidirectional Contrastive GAN with Shared Information Maximization"],"prefix":"10.1007","author":[{"given":"Yuchen","family":"Fei","sequence":"first","affiliation":[]},{"given":"Chen","family":"Zu","sequence":"additional","affiliation":[]},{"given":"Zhengyang","family":"Jiao","sequence":"additional","affiliation":[]},{"given":"Xi","family":"Wu","sequence":"additional","affiliation":[]},{"given":"Jiliu","family":"Zhou","sequence":"additional","affiliation":[]},{"given":"Dinggang","family":"Shen","sequence":"additional","affiliation":[]},{"given":"Yan","family":"Wang","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2022,9,17]]},"reference":[{"issue":"1","key":"50_CR1","doi-asserted-by":"publisher","first-page":"110","DOI":"10.1002\/ana.24546","volume":"79","author":"KA Johnson","year":"2016","unstructured":"Johnson, K.A., Schultz, A., Betensky, R.A., et al.: Tau positron emission tomographic imaging in aging and early Alzheimer disease. Ann. Neurol. 79(1), 110\u2013119 (2016)","journal-title":"Ann. Neurol."},{"key":"50_CR2","doi-asserted-by":"crossref","unstructured":"Daerr, S., Brendel, M., Zach, C., et al.: Evaluation of early-phase [18 F]-florbetaben PET acquisition in clinical routine cases. NeuroImage Clin. 14, 77\u201386 (2017)","DOI":"10.1016\/j.nicl.2016.10.005"},{"issue":"1","key":"50_CR3","first-page":"166","volume":"251","author":"B Huang","year":"2009","unstructured":"Huang, B., Law, M.W.M., Khong, P.L.: Whole-body PET\/CT scanning: estimation of radiation dose and cancer risk. Med. Phys. 251(1), 166\u2013174 (2009)","journal-title":"Med. Phys."},{"issue":"2","key":"50_CR4","doi-asserted-by":"publisher","first-page":"791","DOI":"10.1088\/0031-9155\/61\/2\/791","volume":"61","author":"Y Wang","year":"2016","unstructured":"Wang, Y., Zhang, P., Ma, G., et al.: Predicting standard-dose PET image from low-dose PET and multimodal MR images using mapping-based sparse representation. Phys. Med. Biol. 61(2), 791\u2013812 (2016)","journal-title":"Phys. Med. Biol."},{"issue":"9","key":"50_CR5","doi-asserted-by":"publisher","first-page":"5301","DOI":"10.1118\/1.4928400","volume":"42","author":"J Kang","year":"2015","unstructured":"Kang, J., Gao, Y., Shi, F., et al.: Prediction of standard-dose brain PET image by using MRI and low-dose brain [18F] FDG PET images. Med. Phys. 42(9), 5301\u20135309 (2015)","journal-title":"Med. Phys."},{"issue":"3","key":"50_CR6","doi-asserted-by":"publisher","first-page":"569","DOI":"10.1109\/TBME.2016.2564440","volume":"64","author":"Y Wang","year":"2016","unstructured":"Wang, Y., Ma, G., An, L., et al.: Semi-supervised tripled dictionary learning for standard-dose PET image prediction using low-dose PET and multimodal MRI. IEEE Trans. Biomed. Eng. 64(3), 569\u2013579 (2016)","journal-title":"IEEE Trans. Biomed. Eng."},{"key":"50_CR7","doi-asserted-by":"publisher","first-page":"102339","DOI":"10.1016\/j.media.2021.102339","volume":"77","author":"B Zhan","year":"2022","unstructured":"Zhan, B., Xiao, J., Cao, C., et al.: Multi-constraint generative adversarial network for dose prediction in radiotherapy. Med. Image Anal. 77, 102339 (2022)","journal-title":"Med. Image Anal."},{"key":"50_CR8","doi-asserted-by":"crossref","unstructured":"Xiang, L., Wang, Q., Nie, D., et al.: Deep embedding convolutional neural network for synthesizing CT image from T1-Weighted MR image. Med. Image Anal. 47, 31\u201344 (2018)","DOI":"10.1016\/j.media.2018.03.011"},{"key":"50_CR9","doi-asserted-by":"publisher","first-page":"108215","DOI":"10.1016\/j.knosys.2022.108215","volume":"241","author":"P Tang","year":"2022","unstructured":"Tang, P., Yang, P., et al.: Unified medical image segmentation by learning from uncertainty in an end-to-end manner. Knowl.-Based Syst. 241, 108215 (2022)","journal-title":"Knowl.-Based Syst."},{"key":"50_CR10","doi-asserted-by":"publisher","first-page":"108566","DOI":"10.1016\/j.patcog.2022.108566","volume":"126","author":"Y Shi","year":"2022","unstructured":"Shi, Y., Zu, C., Hong, M., et al.: ASMFS: adaptive-similarity-based multi-modality feature selection for classification of Alzheimer\u2019s disease. Pattern Recogn. 126, 108566 (2022)","journal-title":"Pattern Recogn."},{"key":"50_CR11","doi-asserted-by":"publisher","first-page":"108021","DOI":"10.1016\/j.knosys.2021.108021","volume":"239","author":"L Hu","year":"2022","unstructured":"Hu, L., Li, J., Peng, X., et al.: Semi-supervised NPC segmentation with uncertainty and attention guided consistency. Knowl.-Based Syst. 239, 108021 (2022)","journal-title":"Knowl.-Based Syst."},{"key":"50_CR12","doi-asserted-by":"publisher","first-page":"550","DOI":"10.1016\/j.neuroimage.2018.03.045","volume":"174","author":"Y Wang","year":"2018","unstructured":"Wang, Y., Yu, B., Wang, L., et al.: 3D conditional generative adversarial networks for high-quality PET image estimation at low dose. Neuroimage 174, 550\u2013562 (2018)","journal-title":"Neuroimage"},{"key":"50_CR13","doi-asserted-by":"publisher","first-page":"108324","DOI":"10.1016\/j.knosys.2022.108324","volume":"241","author":"H Li","year":"2022","unstructured":"Li, H., Peng, X., Zeng, J., et al.: Explainable attention guided adversarial deep network for 3D radiotherapy dose distribution prediction. Knowl.-Based Syst. 241, 108324 (2022)","journal-title":"Knowl.-Based Syst."},{"key":"50_CR14","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"43","DOI":"10.1007\/978-3-319-67564-0_5","volume-title":"Molecular Imaging, Reconstruction and Analysis of Moving Body Organs, and Stroke Imaging and Treatment","author":"L Bi","year":"2017","unstructured":"Bi, L., Kim, J., Kumar, A., Feng, D., Fulham, M.: Synthesis of positron emission tomography (PET) images via multi-channel generative adversarial networks (GANs). In: Cardoso, M.J. (ed.) CMMI\/SWITCH\/RAMBO -2017. LNCS, vol. 10555, pp. 43\u201351. Springer, Cham (2017). https:\/\/doi.org\/10.1007\/978-3-319-67564-0_5"},{"issue":"6","key":"50_CR15","doi-asserted-by":"publisher","first-page":"1328","DOI":"10.1109\/TMI.2018.2884053","volume":"38","author":"Y Wang","year":"2019","unstructured":"Wang, Y., Zhou, L., Yu, B., et al.: 3D auto-context-based locality adaptive multi-modality GANs for PET synthesis. IEEE Trans. Med. Imaging 38(6), 1328\u20131339 (2019)","journal-title":"IEEE Trans. Med. Imaging"},{"issue":"21","key":"50_CR16","doi-asserted-by":"publisher","first-page":"215017","DOI":"10.1088\/1361-6560\/ab4891","volume":"64","author":"Y Lei","year":"2019","unstructured":"Lei, Y., Dong, X., Wang, T., et al.: Whole-body PET estimation from low count statistics using cycle-consistent generative adversarial networks. Phys. Med. Biol. 64(21), 215017 (2019)","journal-title":"Phys. Med. Biol."},{"key":"50_CR17","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"276","DOI":"10.1007\/978-3-030-87231-1_27","volume-title":"Medical Image Computing and Computer Assisted Intervention \u2013 MICCAI 2021","author":"Y Luo","year":"2021","unstructured":"Luo, Y., et al.: 3D transformer-GAN for high-quality PET reconstruction. In: de Bruijne, M., et al. (eds.) MICCAI 2021. LNCS, vol. 12906, pp. 276\u2013285. Springer, Cham (2021). https:\/\/doi.org\/10.1007\/978-3-030-87231-1_27"},{"key":"50_CR18","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"516","DOI":"10.1007\/978-3-030-59713-9_50","volume-title":"Medical Image Computing and Computer Assisted Intervention \u2013 MICCAI 2020","author":"X Xie","year":"2020","unstructured":"Xie, X., Chen, J., Li, Y., Shen, L., Ma, K., Zheng, Y.: MI$^2$GAN: generative adversarial network for medical image domain adaptation using mutual information constraint. In: Martel, A.L., Abolmaesumi, P., Stoyanov, D., Mateus, D., Zuluaga, M.A., Zhou, S.K., Racoceanu, D., Joskowicz, L. (eds.) MICCAI 2020. LNCS, vol. 12262, pp. 516\u2013525. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-59713-9_50"},{"key":"50_CR19","doi-asserted-by":"publisher","first-page":"102447","DOI":"10.1016\/j.media.2022.102447","volume":"79","author":"K Wang","year":"2022","unstructured":"Wang, K., Zhan, B., Zu, C., et al.: Semi-supervised medical image segmentation via a tripled-uncertainty guided mean teacher model with contrastive learning. Med. Image Anal. 79, 102447 (2022)","journal-title":"Med. Image Anal."},{"key":"50_CR20","doi-asserted-by":"crossref","unstructured":"Hadsell, R., Chopra, S., LeCun, Y.: Dimensionality reduction by learning an invariant mapping. In: IEEE Computer Society Conference on Computer Vision and Pattern Recognition, New York, pp. 1735\u20131742. IEEE (2006)","DOI":"10.1109\/CVPR.2006.100"},{"key":"50_CR21","doi-asserted-by":"crossref","unstructured":"Xie, E., Ding, J., Wang, W., et al.: DetCo: unsupervised contrastive learning for object detection. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 8392\u20138401. IEEE (2021)","DOI":"10.1109\/ICCV48922.2021.00828"},{"key":"50_CR22","doi-asserted-by":"crossref","unstructured":"He, K., Fan, H., Wu, Y., et al: Momentum contrast for unsupervised visual representation learning. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 9726\u20139735. IEEE (2020)","DOI":"10.1109\/CVPR42600.2020.00975"},{"key":"50_CR23","doi-asserted-by":"crossref","unstructured":"Zeng, J., Xie, P.: Contrastive self-supervised learning for graph classification. In: Proceedings of the AAAI Conference on Artificial Intelligence, vol. 35, no. 12, pp. 10824\u201310832. AAAI (2021)","DOI":"10.1609\/aaai.v35i12.17293"},{"key":"50_CR24","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"457","DOI":"10.1007\/978-3-030-32248-9_51","volume-title":"Medical Image Computing and Computer Assisted Intervention \u2013 MICCAI 2019","author":"S Chen","year":"2019","unstructured":"Chen, S., Bortsova, G., Garc\u00eda-Uceda Ju\u00e1rez, A., van Tulder, G., de Bruijne, M.: Multi-task attention-based semi-supervised learning for medical image segmentation. In: Shen, D., et al. (eds.) MICCAI 2019. LNCS, vol. 11766, pp. 457\u2013465. Springer, Cham (2019). https:\/\/doi.org\/10.1007\/978-3-030-32248-9_51"},{"key":"50_CR25","doi-asserted-by":"crossref","unstructured":"Isola, P., Zhu, J.Y., Zhou, T., et al.: Image-to-image translation with conditional adversarial networks. In: IEEE Computer Society Conference on Computer Vision and Pattern Recognition, Honolulu, pp. 1125\u20131134. IEEE (2017)","DOI":"10.1109\/CVPR.2017.632"},{"issue":"12","key":"50_CR26","doi-asserted-by":"publisher","first-page":"2720","DOI":"10.1109\/TBME.2018.2814538","volume":"65","author":"D Nie","year":"2018","unstructured":"Nie, D., Trullo, R., Lian, J., et al.: Medical image synthesis with deep convolutional adversarial networks. IEEE Trans. Biomed. Eng. 65(12), 2720\u20132730 (2018)","journal-title":"IEEE Trans. Biomed. Eng."},{"issue":"7","key":"50_CR27","doi-asserted-by":"publisher","first-page":"1750","DOI":"10.1109\/TMI.2019.2895894","volume":"38","author":"B Yu","year":"2019","unstructured":"Yu, B., Zhou, L., Wang, L., et al.: Ea-gans: edge-aware generative adversarial networks for cross-modality mr image synthesis. IEEE Trans. Med. Imaging 38(7), 1750\u20131762 (2019)","journal-title":"IEEE Trans. Med. Imaging"},{"key":"50_CR28","doi-asserted-by":"publisher","first-page":"102335","DOI":"10.1016\/j.media.2021.102335","volume":"77","author":"Y Luo","year":"2021","unstructured":"Luo, Y., Zhou, L., Zhan, B., et al.: Adaptive rectification based adversarial network with spectrum constraint for high-quality PET image synthesis. Med. Image Anal. 77, 102335 (2021)","journal-title":"Med. Image Anal."}],"container-title":["Lecture Notes in Computer Science","Medical Image Computing and Computer Assisted Intervention \u2013 MICCAI 2022"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-16446-0_50","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,10,4]],"date-time":"2024-10-04T00:13:28Z","timestamp":1728000808000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-16446-0_50"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022]]},"ISBN":["9783031164453","9783031164460"],"references-count":28,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-16446-0_50","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2022]]},"assertion":[{"value":"17 September 2022","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"MICCAI","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Conference on Medical Image Computing and Computer-Assisted Intervention","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Singapore","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Singapore","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2022","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"18 September 2022","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"22 September 2022","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"25","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"miccai2022","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/conferences.miccai.org\/2022\/en\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Double-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Microsoft Conference","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"1831","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"574","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"0","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"31% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"5","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Yes","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}