{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,9]],"date-time":"2026-04-09T12:28:08Z","timestamp":1775737688000,"version":"3.50.1"},"publisher-location":"Cham","reference-count":32,"publisher":"Springer Nature Switzerland","isbn-type":[{"value":"9783031439988","type":"print"},{"value":"9783031439995","type":"electronic"}],"license":[{"start":{"date-parts":[[2023,1,1]],"date-time":"2023-01-01T00:00:00Z","timestamp":1672531200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2023,1,1]],"date-time":"2023-01-01T00:00:00Z","timestamp":1672531200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2023]]},"DOI":"10.1007\/978-3-031-43999-5_43","type":"book-chapter","created":{"date-parts":[[2023,9,30]],"date-time":"2023-09-30T23:08:57Z","timestamp":1696115337000},"page":"448-458","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":13,"title":["Unified Brain MR-Ultrasound Synthesis Using Multi-modal Hierarchical Representations"],"prefix":"10.1007","author":[{"given":"Reuben","family":"Dorent","sequence":"first","affiliation":[]},{"given":"Nazim","family":"Haouchine","sequence":"additional","affiliation":[]},{"given":"Fryderyk","family":"Kogl","sequence":"additional","affiliation":[]},{"given":"Samuel","family":"Joutard","sequence":"additional","affiliation":[]},{"given":"Parikshit","family":"Juvekar","sequence":"additional","affiliation":[]},{"given":"Erickson","family":"Torio","sequence":"additional","affiliation":[]},{"given":"Alexandra J.","family":"Golby","sequence":"additional","affiliation":[]},{"given":"Sebastien","family":"Ourselin","sequence":"additional","affiliation":[]},{"given":"Sarah","family":"Frisken","sequence":"additional","affiliation":[]},{"given":"Tom","family":"Vercauteren","sequence":"additional","affiliation":[]},{"given":"Tina","family":"Kapur","sequence":"additional","affiliation":[]},{"suffix":"III","given":"William M.","family":"Wells","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2023,10,1]]},"reference":[{"key":"43_CR1","unstructured":"Bakas, S., et al.: Identifying the best machine learning algorithms for brain tumor segmentation, progression assessment, and overall survival prediction in the brats challenge (2019)"},{"issue":"3","key":"43_CR2","doi-asserted-by":"publisher","first-page":"803","DOI":"10.1109\/TMI.2017.2764326","volume":"37","author":"A Chartsias","year":"2017","unstructured":"Chartsias, A., Joyce, T., Giuffrida, M.V., Tsaftaris, S.A.: Multimodal mr synthesis via modality-invariant latent representation. IEEE Trans. Med. Imaging 37(3), 803\u2013814 (2017)","journal-title":"IEEE Trans. Med. Imaging"},{"key":"43_CR3","doi-asserted-by":"publisher","DOI":"10.1016\/j.media.2019.101535","volume":"58","author":"A Chartsias","year":"2019","unstructured":"Chartsias, A., et al.: Disentangled representation learning in cardiac image analysis. Med. Image Anal. 58, 101535 (2019)","journal-title":"Med. Image Anal."},{"issue":"10","key":"43_CR4","doi-asserted-by":"publisher","first-page":"2598","DOI":"10.1109\/TMI.2022.3167808","volume":"41","author":"O Dalmaz","year":"2022","unstructured":"Dalmaz, O., Yurt, M., \u00c7ukur, T.: Resvit: residual vision transformers for multimodal medical image synthesis. IEEE Trans. Med. Imaging 41(10), 2598\u20132614 (2022). https:\/\/doi.org\/10.1109\/TMI.2022.3167808","journal-title":"IEEE Trans. Med. Imaging"},{"issue":"4","key":"43_CR5","doi-asserted-by":"publisher","first-page":"2503","DOI":"10.1007\/s10143-022-01778-4","volume":"45","author":"L Dixon","year":"2022","unstructured":"Dixon, L., Lim, A., Grech-Sollars, M., Nandi, D., Camp, S.: Intraoperative ultrasound in brain tumor surgery: a review and implementation guide. Neurosurg. Rev. 45(4), 2503\u20132515 (2022)","journal-title":"Neurosurg. Rev."},{"key":"43_CR6","doi-asserted-by":"crossref","unstructured":"Donnez, M., Carton, F.X., Le Lann, F., De Schlichting, E., Chabanas, M.: Realistic synthesis of brain tumor resection ultrasound images with a generative adversarial network. In: Medical Imaging 2021: Image-Guided Procedures, Robotic Interventions, and Modeling, vol. 11598, pp. 637\u2013642. SPIE (2021)","DOI":"10.1117\/12.2581911"},{"key":"43_CR7","doi-asserted-by":"publisher","DOI":"10.1016\/j.media.2020.101862","volume":"67","author":"R Dorent","year":"2021","unstructured":"Dorent, R., et al.: Learning joint segmentation of tissues and brain lesions from task-specific hetero-modal domain-shifted datasets. Med. Image Anal. 67, 101862 (2021)","journal-title":"Med. Image Anal."},{"key":"43_CR8","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"74","DOI":"10.1007\/978-3-030-32245-8_9","volume-title":"Medical Image Computing and Computer Assisted Intervention \u2013 MICCAI 2019","author":"R Dorent","year":"2019","unstructured":"Dorent, R., Joutard, S., Modat, M., Ourselin, S., Vercauteren, T.: Hetero-modal variational encoder-decoder for joint modality completion and segmentation. In: Shen, D., et al. (eds.) MICCAI 2019. LNCS, vol. 11765, pp. 74\u201382. Springer, Cham (2019). https:\/\/doi.org\/10.1007\/978-3-030-32245-8_9"},{"key":"43_CR9","doi-asserted-by":"publisher","DOI":"10.1016\/j.media.2022.102628","volume":"83","author":"R Dorent","year":"2023","unstructured":"Dorent, R., Kujawa, A., Ivory, M., Bakas, S., Rieke, N., et al.: Crossmoda 2021 challenge: benchmark of cross-modality domain adaptation techniques for vestibular schwannoma and cochlea segmentation. Med. Image Anal. 83, 102628 (2023)","journal-title":"Med. Image Anal."},{"key":"43_CR10","doi-asserted-by":"crossref","unstructured":"Drobny, D., Vercauteren, T., Ourselin, S., Modat, M.: Registration of MRI and iUS data to compensate brain shift using a symmetric block-matching based approach. In: CuRIOUS (2018)","DOI":"10.1007\/978-3-030-01045-4_21"},{"key":"43_CR11","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"469","DOI":"10.1007\/978-3-319-46723-8_54","volume-title":"Medical Image Computing and Computer-Assisted Intervention \u2013 MICCAI 2016","author":"M Havaei","year":"2016","unstructured":"Havaei, M., Guizard, N., Chapados, N., Bengio, Y.: HeMIS: hetero-modal image segmentation. In: Ourselin, S., Joskowicz, L., Sabuncu, M.R., Unal, G., Wells, W. (eds.) MICCAI 2016. LNCS, vol. 9901, pp. 469\u2013477. Springer, Cham (2016). https:\/\/doi.org\/10.1007\/978-3-319-46723-8_54"},{"key":"43_CR12","unstructured":"Hern\u00e1ndez-Lobato, J.M., et al.: Balancing flexibility and robustness in machine learning: semi-parametric methods and sparse linear models. Appendix C.2 (2010)"},{"key":"43_CR13","doi-asserted-by":"crossref","unstructured":"Hu, J., Shen, L., Sun, G.: Squeeze-and-excitation networks. In: CVPR (2018)","DOI":"10.1109\/CVPR.2018.00745"},{"key":"43_CR14","doi-asserted-by":"crossref","unstructured":"Isola, P., Zhu, J.Y., Zhou, T., Efros, A.A.: Image-To-Image translation with conditional adversarial networks. In: CVPR (2017)","DOI":"10.1109\/CVPR.2017.632"},{"issue":"12","key":"43_CR15","doi-asserted-by":"publisher","first-page":"4413","DOI":"10.1109\/TMI.2020.3018560","volume":"39","author":"J Jiao","year":"2020","unstructured":"Jiao, J., Namburete, A.I.L., Papageorghiou, A.T., Noble, J.A.: Self-supervised ultrasound to mri fetal brain image synthesis. IEEE Trans. Med. Imaging 39(12), 4413\u20134424 (2020). https:\/\/doi.org\/10.1109\/TMI.2020.3018560","journal-title":"IEEE Trans. Med. Imaging"},{"key":"43_CR16","unstructured":"Kingma, D.P., Welling, M.: Auto-encoding variational Bayes. In: ICLR (2014)"},{"key":"43_CR17","doi-asserted-by":"crossref","unstructured":"Lee, D., Kim, J., Moon, W.J., Ye, J.C.: Collagan: collaborative gan for missing image data imputation. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 2487\u20132496 (2019)","DOI":"10.1109\/CVPR.2019.00259"},{"key":"43_CR18","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"795","DOI":"10.1007\/978-3-030-32251-9_87","volume-title":"Medical Image Computing and Computer Assisted Intervention \u2013 MICCAI 2019","author":"H Li","year":"2019","unstructured":"Li, H., et al.: DiamondGAN: unified multi-modal generative adversarial networks for MRI sequences synthesis. In: Shen, D., et al. (eds.) MICCAI 2019. LNCS, vol. 11767, pp. 795\u2013803. Springer, Cham (2019). https:\/\/doi.org\/10.1007\/978-3-030-32251-9_87"},{"key":"43_CR19","unstructured":"Maal\u00f8e, L., Fraccaro, M., Li\u00e9vin, V., Winther, O.: BIVA: a very deep hierarchy of latent variables for generative modeling. In: NeurIPS (2019)"},{"key":"43_CR20","doi-asserted-by":"crossref","unstructured":"Modat, M., et al.: Fast free-form deformation using graphics processing units. Comput. Methods Prog. Biomed. 98, 278\u2013224 (2010)","DOI":"10.1016\/j.cmpb.2009.09.002"},{"key":"43_CR21","doi-asserted-by":"crossref","unstructured":"Park, T., Liu, M.Y., Wang, T.C., Zhu, J.Y.: Semantic image synthesis with spatially-adaptive normalization. In: CVPR (2019)","DOI":"10.1109\/CVPR.2019.00244"},{"key":"43_CR22","unstructured":"Ranganath, R., Tran, D., Blei, D.: Hierarchical variational models. In: ICML (2016)"},{"key":"43_CR23","doi-asserted-by":"crossref","unstructured":"Sandler, M., Howard, A., Zhu, M., Zhmoginov, A., Chen, L.C.: MobileNetV2: inverted residuals and linear bottlenecks. In: CVPR (2018)","DOI":"10.1109\/CVPR.2018.00474"},{"issue":"4","key":"43_CR24","doi-asserted-by":"publisher","first-page":"1170","DOI":"10.1109\/TMI.2019.2945521","volume":"39","author":"A Sharma","year":"2020","unstructured":"Sharma, A., Hamarneh, G.: Missing MRI pulse sequence synthesis using multi-modal generative adversarial network. IEEE Trans. Med. Imaging 39(4), 1170\u20131183 (2020)","journal-title":"IEEE Trans. Med. Imaging"},{"key":"43_CR25","unstructured":"Shi, Y., Paige, B., Torr, P., et al.: Variational mixture-of-experts autoencoders for multi-modal deep generative models. In: NeurIPS, vol. 32 (2019)"},{"key":"43_CR26","unstructured":"S\u00f8nderby, C.K., Raiko, T., Maal\u00f8e, L., S\u00f8nderby, S.K., Winther, O.: Ladder variational autoencoders. In: NeurIPS (2016)"},{"key":"43_CR27","unstructured":"Vahdat, A., Kautz, J.: NVAE: a deep hierarchical variational autoencoder. In: NeurIPS, vol. 33 (2020)"},{"key":"43_CR28","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"201","DOI":"10.1007\/978-3-030-00889-5_23","volume-title":"Deep Learning in Medical Image Analysis and Multimodal Learning for Clinical Decision Support","author":"T Varsavsky","year":"2018","unstructured":"Varsavsky, T., Eaton-Rosen, Z., Sudre, C.H., Nachev, P., Cardoso, M.J.: PIMMS: permutation invariant multi-modal segmentation. In: Stoyanov, D., et al. (eds.) DLMIA\/ML-CDS -2018. LNCS, vol. 11045, pp. 201\u2013209. Springer, Cham (2018). https:\/\/doi.org\/10.1007\/978-3-030-00889-5_23"},{"issue":"1","key":"43_CR29","doi-asserted-by":"publisher","first-page":"11","DOI":"10.1002\/acm2.13121","volume":"22","author":"T Wang","year":"2021","unstructured":"Wang, T., et al.: A review on medical imaging synthesis using deep learning and its clinical applications. J. Appl. Clin. Med. Phys. 22(1), 11\u201336 (2021)","journal-title":"J. Appl. Clin. Med. Phys."},{"key":"43_CR30","unstructured":"Wu, M., Goodman, N.: Multimodal generative models for scalable weakly-supervised learning. In: NeurIPS, vol. 31 (2018)"},{"key":"43_CR31","doi-asserted-by":"crossref","unstructured":"Zhang, R., Isola, P., Efros, A.A., Shechtman, E., Wang, O.: The unreasonable effectiveness of deep features as a perceptual metric. In: CVPR (2018)","DOI":"10.1109\/CVPR.2018.00068"},{"issue":"9","key":"43_CR32","doi-asserted-by":"publisher","first-page":"2772","DOI":"10.1109\/TMI.2020.2975344","volume":"39","author":"T Zhou","year":"2020","unstructured":"Zhou, T., Fu, H., Chen, G., Shen, J., Shao, L.: Hi-net: hybrid-fusion network for multi-modal mr image synthesis. IEEE Trans. Med. Imaging 39(9), 2772\u20132781 (2020)","journal-title":"IEEE Trans. Med. Imaging"}],"container-title":["Lecture Notes in Computer Science","Medical Image Computing and Computer Assisted Intervention \u2013 MICCAI 2023"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-43999-5_43","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,3,11]],"date-time":"2024-03-11T14:47:24Z","timestamp":1710168444000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-43999-5_43"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023]]},"ISBN":["9783031439988","9783031439995"],"references-count":32,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-43999-5_43","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2023]]},"assertion":[{"value":"1 October 2023","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"MICCAI","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Conference on Medical Image Computing and Computer-Assisted Intervention","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Vancouver, BC","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Canada","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2023","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"8 October 2023","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"12 October 2023","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"miccai2023","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/conferences.miccai.org\/2023\/en\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Double-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"CMT","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"2250","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"730","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"0","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"32% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"5","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Yes","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}