{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,3,26]],"date-time":"2025-03-26T04:10:02Z","timestamp":1742962202887,"version":"3.40.3"},"publisher-location":"Cham","reference-count":23,"publisher":"Springer Nature Switzerland","isbn-type":[{"type":"print","value":"9783031463167"},{"type":"electronic","value":"9783031463174"}],"license":[{"start":{"date-parts":[[2023,1,1]],"date-time":"2023-01-01T00:00:00Z","timestamp":1672531200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2023,1,1]],"date-time":"2023-01-01T00:00:00Z","timestamp":1672531200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2023]]},"DOI":"10.1007\/978-3-031-46317-4_5","type":"book-chapter","created":{"date-parts":[[2023,10,28]],"date-time":"2023-10-28T06:03:06Z","timestamp":1698472986000},"page":"48-56","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["Neural Video: A Novel Framework for Interpreting the Spatiotemporal Activities of the Human Brain"],"prefix":"10.1007","author":[{"given":"Jingrui","family":"Xu","sequence":"first","affiliation":[]},{"given":"Jianpo","family":"Su","sequence":"additional","affiliation":[]},{"given":"Kai","family":"Gao","sequence":"additional","affiliation":[]},{"given":"Zhipeng","family":"Fan","sequence":"additional","affiliation":[]},{"given":"Ming","family":"Zhang","sequence":"additional","affiliation":[]},{"given":"Dewen","family":"Hu","sequence":"additional","affiliation":[]},{"given":"Ling-Li","family":"Zeng","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2023,10,29]]},"reference":[{"key":"5_CR1","doi-asserted-by":"crossref","unstructured":"Arnab, A., Dehghani, M., Heigold, G., Sun, C., Lu\u010di\u0107, M., Schmid, C.: ViViT: a video vision transformer. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 6836\u20136846 (2021)","DOI":"10.1109\/ICCV48922.2021.00676"},{"key":"5_CR2","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"3","DOI":"10.1007\/978-3-030-00689-1_1","volume-title":"Graphs in Biomedical Image Analysis and Integrating Medical Imaging and Non-Imaging Modalities","author":"S Arslan","year":"2018","unstructured":"Arslan, S., Ktena, S.I., Glocker, B., Rueckert, D.: Graph saliency maps through spectral convolutional networks: application to\u00a0sex classification with brain connectivity. In: Stoyanov, D., et al. (eds.) GRAIL\/Beyond MIC -2018. LNCS, vol. 11044, pp. 3\u201313. Springer, Cham (2018). https:\/\/doi.org\/10.1007\/978-3-030-00689-1_1"},{"key":"5_CR3","unstructured":"Dosovitskiy, A., et al.: An image is worth 16x16 words: Transformers for image recognition at scale. arXiv preprint arXiv:2010.11929 (2020)"},{"key":"5_CR4","doi-asserted-by":"publisher","first-page":"881","DOI":"10.3389\/fnins.2020.00881","volume":"14","author":"L Fan","year":"2020","unstructured":"Fan, L., Su, J., Qin, J., Hu, D., Shen, H.: A deep network model on dynamic functional connectivity with applications to gender classification and intelligence prediction. Front. Neurosci. 14, 881 (2020)","journal-title":"Front. Neurosci."},{"issue":"2","key":"5_CR5","doi-asserted-by":"publisher","first-page":"774","DOI":"10.1016\/j.neuroimage.2012.01.021","volume":"62","author":"B Fischl","year":"2012","unstructured":"Fischl, B.: Freesurfer. Neuroimage 62(2), 774\u2013781 (2012)","journal-title":"Neuroimage"},{"issue":"14","key":"5_CR6","doi-asserted-by":"publisher","first-page":"2972","DOI":"10.1093\/cercor\/bhab394","volume":"32","author":"K Gao","year":"2022","unstructured":"Gao, K., et al.: Deep transfer learning for cerebral cortex using area-preserving geometry mapping. Cereb. Cortex 32(14), 2972\u20132984 (2022)","journal-title":"Cereb. Cortex"},{"key":"5_CR7","doi-asserted-by":"publisher","first-page":"105","DOI":"10.1016\/j.neuroimage.2013.04.127","volume":"80","author":"MF Glasser","year":"2013","unstructured":"Glasser, M.F., et al.: The minimal preprocessing pipelines for the human connectome project. Neuroimage 80, 105\u2013124 (2013)","journal-title":"Neuroimage"},{"key":"5_CR8","doi-asserted-by":"crossref","unstructured":"He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 770\u2013778 (2016)","DOI":"10.1109\/CVPR.2016.90"},{"issue":"2","key":"5_CR9","doi-asserted-by":"publisher","first-page":"142","DOI":"10.1038\/nrn730","volume":"3","author":"DJ Heeger","year":"2002","unstructured":"Heeger, D.J., Ress, D.: What does fMRI tell us about neuronal activity? Nat. Rev. Neurosci. 3(2), 142\u2013151 (2002)","journal-title":"Nat. Rev. Neurosci."},{"issue":"10s","key":"5_CR10","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/3505244","volume":"54","author":"S Khan","year":"2022","unstructured":"Khan, S., Naseer, M., Hayat, M., Zamir, S.W., Khan, F.S., Shah, M.: Transformers in vision: a survey. ACM computing surveys (CSUR) 54(10s), 1\u201341 (2022)","journal-title":"ACM computing surveys (CSUR)"},{"issue":"7197","key":"5_CR11","doi-asserted-by":"publisher","first-page":"869","DOI":"10.1038\/nature06976","volume":"453","author":"NK Logothetis","year":"2008","unstructured":"Logothetis, N.K.: What we can do and what we cannot do with fMRI. Nature 453(7197), 869\u2013878 (2008). https:\/\/doi.org\/10.1038\/nature06976","journal-title":"Nature"},{"key":"5_CR12","unstructured":"Paszke, A., et al.: Automatic differentiation in pytorch (2017)"},{"key":"5_CR13","doi-asserted-by":"publisher","first-page":"418","DOI":"10.3389\/fnhum.2015.00418","volume":"9","author":"J Qin","year":"2015","unstructured":"Qin, J., et al.: Predicting individual brain maturity using dynamic functional connectivity. Front. Hum. Neurosci. 9, 418 (2015)","journal-title":"Front. Hum. Neurosci."},{"issue":"1","key":"5_CR14","doi-asserted-by":"publisher","first-page":"158","DOI":"10.1093\/cercor\/bhr099","volume":"22","author":"WR Shirer","year":"2012","unstructured":"Shirer, W.R., Ryali, S., Rykhlevskaia, E., Menon, V., Greicius, M.D.: Decoding subject-driven cognitive states with whole-brain connectivity patterns. Cereb. Cortex 22(1), 158\u2013165 (2012)","journal-title":"Cereb. Cortex"},{"key":"5_CR15","doi-asserted-by":"crossref","unstructured":"Su, Z., Zeng, W., Shi, R., Wang, Y., Sun, J., Gu, X.: Area preserving brain mapping. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 2235\u20132242 (2013)","DOI":"10.1109\/CVPR.2013.290"},{"key":"5_CR16","doi-asserted-by":"crossref","unstructured":"Van Essen, D.C., et al.: The WU-Minn human connectome project: an overview. Neuroimage 80, 62\u201379 (2013)","DOI":"10.1016\/j.neuroimage.2013.05.041"},{"issue":"4","key":"5_CR17","doi-asserted-by":"publisher","first-page":"2222","DOI":"10.1016\/j.neuroimage.2012.02.018","volume":"62","author":"DC Van Essen","year":"2012","unstructured":"Van Essen, D.C., et al.: The human connectome project: a data acquisition perspective. Neuroimage 62(4), 2222\u20132231 (2012)","journal-title":"Neuroimage"},{"key":"5_CR18","unstructured":"Vaswani, A., et al.: Attention is all you need. In: Advances in Neural Information Processing Systems. vol. 30 (2017)"},{"key":"5_CR19","doi-asserted-by":"publisher","unstructured":"Wightman, R.: Pytorch image models. https:\/\/github.com\/rwightman\/pytorch-image-models (2019). https:\/\/doi.org\/10.5281\/zenodo.4414861","DOI":"10.5281\/zenodo.4414861"},{"key":"5_CR20","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"169","DOI":"10.1007\/978-3-030-66843-3_17","volume-title":"Machine Learning in Clinical Neuroimaging and Radiogenomics in Neuro-oncology","author":"L-L Zeng","year":"2020","unstructured":"Zeng, L.-L., et al.: A deep transfer learning framework for 3D brain imaging based on optimal mass transport. In: Kia, S.M., et al. (eds.) MLCN\/RNO-AI -2020. LNCS, vol. 12449, pp. 169\u2013176. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-66843-3_17"},{"issue":"9","key":"5_CR21","doi-asserted-by":"publisher","first-page":"700","DOI":"10.1089\/brain.2016.0429","volume":"6","author":"C Zhang","year":"2016","unstructured":"Zhang, C., Cahill, N.D., Arbabshirani, M.R., White, T., Baum, S.A., Michael, A.M.: Sex and age effects of functional connectivity in early adulthood. Brain Connectivity 6(9), 700\u2013713 (2016)","journal-title":"Brain Connectivity"},{"issue":"4","key":"5_CR22","doi-asserted-by":"publisher","first-page":"1765","DOI":"10.1002\/hbm.23950","volume":"39","author":"C Zhang","year":"2018","unstructured":"Zhang, C., Dougherty, C.C., Baum, S.A., White, T., Michael, A.M.: Functional connectivity predicts gender: evidence for gender differences in resting brain connectivity. Hum. Brain Mapp. 39(4), 1765\u20131776 (2018)","journal-title":"Hum. Brain Mapp."},{"issue":"12","key":"5_CR23","doi-asserted-by":"publisher","first-page":"2838","DOI":"10.1109\/TVCG.2013.135","volume":"19","author":"X Zhao","year":"2013","unstructured":"Zhao, X., et al.: Area-preservation mapping using optimal mass transport. IEEE Trans. Visual Comput. Graphics 19(12), 2838\u20132847 (2013)","journal-title":"IEEE Trans. Visual Comput. Graphics"}],"container-title":["Lecture Notes in Computer Science","Image and Graphics"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-46317-4_5","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,10,28]],"date-time":"2023-10-28T06:09:19Z","timestamp":1698473359000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-46317-4_5"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023]]},"ISBN":["9783031463167","9783031463174"],"references-count":23,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-46317-4_5","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"type":"print","value":"0302-9743"},{"type":"electronic","value":"1611-3349"}],"subject":[],"published":{"date-parts":[[2023]]},"assertion":[{"value":"29 October 2023","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ICIG","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Conference on Image and Graphics","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Nanjing","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"China","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2023","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"22 September 2023","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"24 September 2023","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"12","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"icig2023","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"http:\/\/icig2023.csig.org.cn\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Double-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Conference Management Toolkit","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"409","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"166","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"0","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"41% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Yes","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}