{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,12,8]],"date-time":"2025-12-08T22:47:07Z","timestamp":1765234027224,"version":"build-2065373602"},"reference-count":52,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"12","license":[{"start":{"date-parts":[[2025,12,1]],"date-time":"2025-12-01T00:00:00Z","timestamp":1764547200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2025,12,1]],"date-time":"2025-12-01T00:00:00Z","timestamp":1764547200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,12,1]],"date-time":"2025-12-01T00:00:00Z","timestamp":1764547200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"name":"STI 2030-Major Projects","award":["2022ZD0205300"],"award-info":[{"award-number":["2022ZD0205300"]}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Pattern Anal. Mach. Intell."],"published-print":{"date-parts":[[2025,12]]},"DOI":"10.1109\/tpami.2025.3599860","type":"journal-article","created":{"date-parts":[[2025,8,19]],"date-time":"2025-08-19T18:15:55Z","timestamp":1755627355000},"page":"11802-11816","source":"Crossref","is-referenced-by-count":2,"title":["MinD-3D++: Advancing fMRI-Based 3D Reconstruction With High-Quality Textured Mesh Generation and a Comprehensive Dataset"],"prefix":"10.1109","volume":"47","author":[{"ORCID":"https:\/\/orcid.org\/0009-0005-7905-1517","authenticated-orcid":false,"given":"Jianxiong","family":"Gao","sequence":"first","affiliation":[{"name":"Fudan University, Shanghai, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-6595-6893","authenticated-orcid":false,"given":"Yanwei","family":"Fu","sequence":"additional","affiliation":[{"name":"Fudan University, Shanghai, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-0412-5500","authenticated-orcid":false,"given":"Yuqian","family":"Fu","sequence":"additional","affiliation":[{"name":"Fudan University, Shanghai, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-9431-4590","authenticated-orcid":false,"given":"Yun","family":"Wang","sequence":"additional","affiliation":[{"name":"Fudan University, Shanghai, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-8049-7288","authenticated-orcid":false,"given":"Xuelin","family":"Qian","sequence":"additional","affiliation":[{"name":"Fudan University, Shanghai, China"}]},{"given":"Jianfeng","family":"Feng","sequence":"additional","affiliation":[{"name":"Fudan University, Shanghai, China"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.02175"},{"article-title":"Cinematic mindscapes: High-quality video reconstruction from brain activity","year":"2023","author":"Chen","key":"ref2"},{"article-title":"Reconstructing the mind\u2019s eye: FMRI-to-image with contrastive learning and diffusion priors","year":"2023","author":"Scotti","key":"ref3"},{"article-title":"fMRI-PTE: A large-scale fMRI pretrained transformer encoder for multi-subject brain activity decoding","year":"2023","author":"Qian","key":"ref4"},{"article-title":"Semantic neural decoding via cross-modal generation","year":"2023","author":"Qian","key":"ref5"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01042"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01103"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1016\/j.neuroimage.2016.12.039"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1016\/j.neuroimage.2023.119909"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1016\/j.neuron.2018.12.014"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1093\/oso\/9780190070557.003.0011"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1098\/rstb.2021.0455"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1523\/JNEUROSCI.4753-08.2009"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1016\/j.jmhi.2015.02.001"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-72970-6_18"},{"article-title":"Shapenet: An information-rich 3D model repository","year":"2015","author":"Chang","key":"ref16"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/cvpr52729.2023.01263"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1038\/s41593-021-00962-x"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1038\/ncomms15037"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1093\/cercor\/bhx268"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1371\/journal.pcbi.1006633"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.3389\/fncom.2019.00021"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1016\/j.eng.2019.03.010"},{"key":"ref24","first-page":"12332","article-title":"Contrast, attend and diffuse to decode high-resolution images from brain activities","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Sun"},{"key":"ref25","first-page":"6840","article-title":"Denoising diffusion probabilistic models","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"33","author":"Ho"},{"article-title":"Denoising diffusion implicit models","year":"2020","author":"Song","key":"ref26"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.00387"},{"article-title":"Pushing auto-regressive models for 3D shape generation at capacity and scalability","year":"2024","author":"Qian","key":"ref28"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-73235-5_1"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-73220-1_22"},{"article-title":"Dreamgaussian: Generative gaussian splatting for efficient 3D content creation","year":"2023","author":"Tang","key":"ref31"},{"issue":"4","key":"ref32","doi-asserted-by":"crossref","DOI":"10.1145\/3592433","article-title":"3D gaussian splatting for real-time radiance field rendering","volume":"42","author":"Kerbl","year":"2023","journal-title":"ACM Trans. Graph."},{"article-title":"Imagedream: Image-prompt multi-view diffusion for 3D generation","year":"2023","author":"Wang","key":"ref33"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-72897-6_15"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-20062-5_6"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1109\/CVPRW59228.2023.00270"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1038\/s41597-019-0052-3"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.00853"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00609"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1109\/WACV45572.2020.9093430"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.01335"},{"article-title":"Scalable 3D captioning with pretrained models","year":"2023","author":"Luo","key":"ref42"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.1038\/s41592-018-0235-4"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.1038\/s41592-018-0235-4"},{"issue":"2","key":"ref45","first-page":"3","article-title":"LoRA: Low-rank adaptation of large language models","volume-title":"Proc. Int. Conf. Learn. Representations","volume":"1","author":"Hu"},{"article-title":"Instantmesh: Efficient 3D mesh generation from a single image with sparse-view large reconstruction models","year":"2024","author":"Xu","key":"ref46"},{"key":"ref47","doi-asserted-by":"publisher","DOI":"10.1109\/IJCNN55064.2022.9892673"},{"article-title":"Unibrain: Unify image reconstruction and captioning all in one diffusion model from human brain activity","year":"2023","author":"Mai","key":"ref48"},{"key":"ref49","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01737"},{"key":"ref50","first-page":"492","article-title":"DISN: Deep implicit surface network for high-quality single-view 3D reconstruction","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Xu"},{"key":"ref51","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00068"},{"key":"ref52","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.319"}],"container-title":["IEEE Transactions on Pattern Analysis and Machine Intelligence"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/34\/11230086\/11130422.pdf?arnumber=11130422","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,11,6]],"date-time":"2025-11-06T18:53:12Z","timestamp":1762455192000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11130422\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,12]]},"references-count":52,"journal-issue":{"issue":"12"},"URL":"https:\/\/doi.org\/10.1109\/tpami.2025.3599860","relation":{},"ISSN":["0162-8828","2160-9292","1939-3539"],"issn-type":[{"type":"print","value":"0162-8828"},{"type":"electronic","value":"2160-9292"},{"type":"electronic","value":"1939-3539"}],"subject":[],"published":{"date-parts":[[2025,12]]}}}