{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,22]],"date-time":"2026-04-22T19:12:41Z","timestamp":1776885161552,"version":"3.51.2"},"publisher-location":"Cham","reference-count":66,"publisher":"Springer Nature Switzerland","isbn-type":[{"value":"9783031198328","type":"print"},{"value":"9783031198335","type":"electronic"}],"license":[{"start":{"date-parts":[[2022,1,1]],"date-time":"2022-01-01T00:00:00Z","timestamp":1640995200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"},{"start":{"date-parts":[[2022,1,1]],"date-time":"2022-01-01T00:00:00Z","timestamp":1640995200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2022]]},"DOI":"10.1007\/978-3-031-19833-5_25","type":"book-chapter","created":{"date-parts":[[2022,11,4]],"date-time":"2022-11-04T00:40:30Z","timestamp":1667522430000},"page":"422-439","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":29,"title":["Panoramic Vision Transformer for\u00a0Saliency Detection in\u00a0360$$^\\circ $$ Videos"],"prefix":"10.1007","author":[{"given":"Heeseung","family":"Yun","sequence":"first","affiliation":[]},{"given":"Sehun","family":"Lee","sequence":"additional","affiliation":[]},{"given":"Gunhee","family":"Kim","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2022,11,4]]},"reference":[{"key":"25_CR1","doi-asserted-by":"crossref","unstructured":"Anderson, P., et al.: Vision-and-language navigation: interpreting visually-grounded navigation instructions in real environments. In: CVPR (2018)","DOI":"10.1109\/CVPR.2018.00387"},{"key":"25_CR2","unstructured":"Bertasius, G., Wang, H., Torresani, L.: Is space-time attention all you need for video understanding? In: ICML (2021)"},{"key":"25_CR3","doi-asserted-by":"crossref","unstructured":"Borji, A., Tavakoli, H.R., Sihite, D.N., Itti, L.: Analysis of scores, datasets, and models in visual saliency prediction. In: ICCV (2013)","DOI":"10.1109\/ICCV.2013.118"},{"key":"25_CR4","unstructured":"Bruce, N., Tsotsos, J.: Saliency based on information maximization. In: NIPS (2005)"},{"key":"25_CR5","unstructured":"Bylinskii, Z., et al.: MIT saliency benchmark (2015)"},{"key":"25_CR6","doi-asserted-by":"publisher","first-page":"740","DOI":"10.1109\/TPAMI.2018.2815601","volume":"41","author":"Z Bylinskii","year":"2018","unstructured":"Bylinskii, Z., Judd, T., Oliva, A., Torralba, A., Durand, F.: What do different evaluation metrics tell us about saliency models? IEEE TPAMI 41, 740\u2013757 (2018)","journal-title":"IEEE TPAMI"},{"key":"25_CR7","doi-asserted-by":"crossref","unstructured":"Caron, G., Morbidi, F.: Spherical visual gyroscope for autonomous robots using the mixture of photometric potentials. In: ICRA (2018)","DOI":"10.1109\/ICRA.2018.8460761"},{"key":"25_CR8","doi-asserted-by":"crossref","unstructured":"Caron, M., et al.: Emerging properties in self-supervised vision transformers. In: ICCV (2021)","DOI":"10.1109\/ICCV48922.2021.00951"},{"key":"25_CR9","doi-asserted-by":"crossref","unstructured":"Caruso, D., Engel, J., Cremers, D.: Large-scale direct SLAM for omnidirectional cameras. In: IROS (2015)","DOI":"10.1109\/IROS.2015.7353366"},{"key":"25_CR10","doi-asserted-by":"crossref","unstructured":"Chen, X., Xie, S., He, K.: An empirical study of training self-supervised vision transformers. In: ICCV (2021)","DOI":"10.1109\/ICCV48922.2021.00950"},{"key":"25_CR11","doi-asserted-by":"crossref","unstructured":"Cheng, H.T., Chao, C.H., Dong, J.D., Wen, H.K., Liu, T.L., Sun, M.: Cube padding for weakly-supervised saliency prediction in 360 videos. In: CVPR (2018)","DOI":"10.1109\/CVPR.2018.00154"},{"key":"25_CR12","doi-asserted-by":"crossref","unstructured":"Chou, S.H., Chen, Y.C., Zeng, K.H., Hu, H.N., Fu, J., Sun, M.: Self-view grounding given a narrated 360 video. In: AAAI (2018)","DOI":"10.1609\/aaai.v32i1.12289"},{"key":"25_CR13","unstructured":"Cohen, T.S., Geiger, M., K\u00f6hler, J., Welling, M.: Spherical CNNs. In: ICLR (2018)"},{"key":"25_CR14","doi-asserted-by":"crossref","unstructured":"Dai, J., et al.: Deformable convolutional networks. In: ICCV (2017)","DOI":"10.1109\/ICCV.2017.89"},{"key":"25_CR15","doi-asserted-by":"crossref","unstructured":"Deng, J., Dong, W., Socher, R., Li, L.J., Li, K., Fei-Fei, L.: ImageNet: a large-scale hierarchical image database. In: CVPR (2009)","DOI":"10.1109\/CVPR.2009.5206848"},{"key":"25_CR16","unstructured":"Devaraju, B.: Understanding filtering on the sphere: experiences from filtering GRACE data. Ph.D. dissertation, Inst. Geodesy, Univ. Stuttgart (2015)"},{"key":"25_CR17","unstructured":"Dosovitskiy, A., et al.: An image is worth $$16\\times 16$$ words: transformers for image recognition at scale. arXiv:2010.11929 (2020)"},{"key":"25_CR18","doi-asserted-by":"crossref","unstructured":"Eder, M., Shvets, M., Lim, J., Frahm, J.M.: Tangent images for mitigating spherical distortion. In: CVPR (2020)","DOI":"10.1109\/CVPR42600.2020.01244"},{"key":"25_CR19","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"54","DOI":"10.1007\/978-3-030-01261-8_4","volume-title":"Computer Vision \u2013 ECCV 2018","author":"C Esteves","year":"2018","unstructured":"Esteves, C., Allen-Blanchette, C., Makadia, A., Daniilidis, K.: Learning SO(3) equivariant representations with spherical CNNs. In: Ferrari, V., Hebert, M., Sminchisescu, C., Weiss, Y. (eds.) ECCV 2018. LNCS, vol. 11217, pp. 54\u201370. Springer, Cham (2018). https:\/\/doi.org\/10.1007\/978-3-030-01261-8_4"},{"key":"25_CR20","doi-asserted-by":"crossref","unstructured":"Fan, H., et al.: Multiscale vision transformers. In: ICCV (2021)","DOI":"10.1109\/ICCV48922.2021.00675"},{"key":"25_CR21","doi-asserted-by":"crossref","unstructured":"Gao, W., et al.: Token semantic coupled attention map for weakly supervised object localization. In: ICCV (2021)","DOI":"10.1109\/ICCV48922.2021.00288"},{"key":"25_CR22","first-page":"21","volume":"6","author":"N Greene","year":"1986","unstructured":"Greene, N.: Environment mapping and other applications of world projections. IEEE CGA 6, 21\u201329 (1986)","journal-title":"IEEE CGA"},{"key":"25_CR23","unstructured":"Hendrycks, D., Gimpel, K.: Gaussian error linear units (GELUs). arXiv:1606.08415 (2016)"},{"key":"25_CR24","doi-asserted-by":"crossref","unstructured":"Hu, H.N., Lin, Y.C., Liu, M.Y., Cheng, H.T., Chang, Y.J., Sun, M.: Deep 360 pilot: learning a deep agent for piloting through 360 sports videos. In: CVPR (2017)","DOI":"10.1109\/CVPR.2017.153"},{"key":"25_CR25","doi-asserted-by":"publisher","first-page":"1254","DOI":"10.1109\/34.730558","volume":"20","author":"L Itti","year":"1998","unstructured":"Itti, L., Koch, C., Niebur, E.: A model of saliency-based visual attention for rapid scene analysis. IEEE TPAMI 20, 1254\u20131259 (1998)","journal-title":"IEEE TPAMI"},{"key":"25_CR26","unstructured":"Jiang, C.M., Huang, J., Kashinath, K., Marcus, P., Niessner, M., et al.: Spherical CNNs on unstructured grids. In: ICLR (2018)"},{"key":"25_CR27","unstructured":"Kingma, D.P., Ba, J.: Adam: a method for stochastic optimization. In: ICLR (2015)"},{"key":"25_CR28","doi-asserted-by":"crossref","unstructured":"Lee, S., Sung, J., Yu, Y., Kim, G.: A memory network approach for story-based temporal summarization of 360 videos. In: CVPR (2018)","DOI":"10.1109\/CVPR.2018.00153"},{"key":"25_CR29","doi-asserted-by":"crossref","unstructured":"Lee, Y., Jeong, J., Yun, J., Cho, W., Yoon, K.J.: SpherePHD: applying CNNs on a spherical PolyHeDron representation of 360deg images. In: CVPR (2019)","DOI":"10.1109\/CVPR.2019.00940"},{"key":"25_CR30","doi-asserted-by":"crossref","unstructured":"Li, C., Xu, M., Du, X., Wang, Z.: Bridge the gap between VQA and human behavior on omnidirectional video: a large-scale dataset and a deep learning model. In: ACMMM (2018)","DOI":"10.1145\/3240508.3240581"},{"key":"25_CR31","doi-asserted-by":"crossref","unstructured":"Liu, Z., et al.: Swin transformer: hierarchical vision transformer using shifted windows. In: ICCV (2021)","DOI":"10.1109\/ICCV48922.2021.00986"},{"key":"25_CR32","doi-asserted-by":"crossref","unstructured":"Meng, M., Zhang, T., Tian, Q., Zhang, Y., Wu, F.: Foreground activation maps for weakly supervised object localization. In: ICCV (2021)","DOI":"10.1109\/ICCV48922.2021.00337"},{"key":"25_CR33","unstructured":"Pan, J., et al.: SalGAN: visual saliency prediction with generative adversarial networks. arXiv:1701.01081 (2017)"},{"key":"25_CR34","doi-asserted-by":"crossref","unstructured":"Riche, N., Duvinage, M., Mancas, M., Gosselin, B., Dutoit, T.: Saliency and human fixations: state-of-the-art and study of comparison metrics. In: ICCV (2013)","DOI":"10.1109\/ICCV.2013.147"},{"key":"25_CR35","doi-asserted-by":"crossref","unstructured":"Seo, H.J., Milanfar, P.: Nonparametric bottom-up saliency detection by self-resemblance. In: CVPRw (2009)","DOI":"10.1109\/CVPRW.2009.5204207"},{"key":"25_CR36","unstructured":"Sim\u00e9oni, O., et al.: Localizing objects with self-supervised transformers and no labels. In: BMVC (2021)"},{"key":"25_CR37","unstructured":"Su, Y.C., Grauman, K.: Learning spherical convolution for fast features from 360 imagery. In: NIPS (2017)"},{"key":"25_CR38","doi-asserted-by":"crossref","unstructured":"Su, Y.C., Grauman, K.: Kernel transformer networks for compact spherical convolution. In: CVPR (2019)","DOI":"10.1109\/CVPR.2019.00967"},{"key":"25_CR39","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"154","DOI":"10.1007\/978-3-319-54190-7_10","volume-title":"Computer Vision \u2013 ACCV 2016","author":"Y-C Su","year":"2017","unstructured":"Su, Y.-C., Jayaraman, D., Grauman, K.: Pano2Vid: automatic cinematography for watching 360$$^{\\circ }$$\u00a0videos. In: Lai, S.-H., Lepetit, V., Nishino, K., Sato, Y. (eds.) ACCV 2016. LNCS, vol. 10114, pp. 154\u2013171. Springer, Cham (2017). https:\/\/doi.org\/10.1007\/978-3-319-54190-7_10"},{"key":"25_CR40","doi-asserted-by":"crossref","unstructured":"Sun, Y., Lu, A., Yu, L.: Weighted-to-spherically-uniform quality evaluation for omnidirectional video. SPL (2017)","DOI":"10.1109\/LSP.2017.2720693"},{"key":"25_CR41","unstructured":"Touvron, H., Cord, M., Douze, M., Massa, F., Sablayrolles, A., J\u00e9gou, H.: Training data-efficient image transformers & distillation through attention. In: ICML (2021)"},{"key":"25_CR42","doi-asserted-by":"crossref","unstructured":"Touvron, H., Cord, M., Sablayrolles, A., Synnaeve, G., J\u00e9gou, H.: Going deeper with image transformers. In: ICCV (2021)","DOI":"10.1109\/ICCV48922.2021.00010"},{"key":"25_CR43","unstructured":"Ullah, I., et al.: A brief survey of visual saliency detection. MTA (2020)"},{"key":"25_CR44","unstructured":"Vaswani, A., et al.: Attention is all you need. In: NIPS (2017)"},{"key":"25_CR45","doi-asserted-by":"crossref","unstructured":"Wang, M., Konrad, J., Ishwar, P., Jing, K., Rowley, H.: Image saliency: from intrinsic to extrinsic context. In: CVPR (2011)","DOI":"10.1109\/CVPR.2011.5995743"},{"key":"25_CR46","first-page":"4185","volume":"24","author":"W Wang","year":"2015","unstructured":"Wang, W., Shen, J., Shao, L.: Consistent video saliency using local gradient flow optimization and global refinement. TIP 24, 4185\u20134196 (2015)","journal-title":"TIP"},{"key":"25_CR47","doi-asserted-by":"crossref","unstructured":"Wang, Y., Shen, X., Hu, S., Yuan, Y., Crowley, J., Vaufreydaz, D.: Self-supervised transformers for unsupervised object discovery using normalized cut. In: CVPR (2022)","DOI":"10.1109\/CVPR52688.2022.01414"},{"key":"25_CR48","first-page":"600","volume":"13","author":"Z Wang","year":"2004","unstructured":"Wang, Z., Bovik, A.C., Sheikh, H.R., Simoncelli, E.P.: Image quality assessment: from error visibility to structural similarity. TIP 13, 600\u2013612 (2004)","journal-title":"TIP"},{"key":"25_CR49","doi-asserted-by":"crossref","unstructured":"Weinzaepfel, P., Revaud, J., Harchaoui, Z., Schmid, C.: DeepFlow: large displacement optical flow with deep matching. In: ICCV (2013)","DOI":"10.1109\/ICCV.2013.175"},{"key":"25_CR50","unstructured":"Xie, E., Wang, W., Yu, Z., Anandkumar, A., Alvarez, J.M., Luo, P.: SegFormer: simple and efficient design for semantic segmentation with transformers. In: NeurIPS (2021)"},{"key":"25_CR51","doi-asserted-by":"crossref","unstructured":"Xie, J., Luo, C., Zhu, X., Jin, Z., Lu, W., Shen, L.: Online refinement of low-level feature based activation map for weakly supervised object localization. In: ICCV (2021)","DOI":"10.1109\/ICCV48922.2021.00020"},{"key":"25_CR52","doi-asserted-by":"crossref","unstructured":"Yogamani, S., et al.: WoodScape: a multi-task, multi-camera fisheye dataset for autonomous driving. In: ICCV (2019)","DOI":"10.1109\/ICCV.2019.00940"},{"key":"25_CR53","doi-asserted-by":"crossref","unstructured":"Yu, M., Lakshman, H., Girod, B.: A framework to evaluate omnidirectional video coding schemes. In: ISMAR (2015)","DOI":"10.1109\/ISMAR.2015.12"},{"key":"25_CR54","doi-asserted-by":"crossref","unstructured":"Yu, Y., Lee, S., Na, J., Kang, J., Kim, G.: A deep ranking model for spatio-temporal highlight detection from a 360$$^\\circ $$ video. In: AAAI (2018)","DOI":"10.1609\/aaai.v32i1.12335"},{"key":"25_CR55","doi-asserted-by":"crossref","unstructured":"Yuan, L., et al.: Tokens-to-token ViT: training vision transformers from scratch on ImageNet. In: ICCV (2021)","DOI":"10.1109\/ICCV48922.2021.00060"},{"key":"25_CR56","doi-asserted-by":"crossref","unstructured":"Yun, H., Yu, Y., Yang, W., Lee, K., Kim, G.: Pano-AVQA: grounded audio-visual question answering on 360deg videos. In: ICCV (2021)","DOI":"10.1109\/ICCV48922.2021.00204"},{"key":"25_CR57","doi-asserted-by":"crossref","unstructured":"Yun, I., Lee, H.J., Rhee, C.E.: Improving 360 monocular depth estimation via non-local dense prediction transformer and joint supervised and self-supervised learning. In: AAAI (2022)","DOI":"10.1609\/aaai.v36i3.20231"},{"key":"25_CR58","unstructured":"Zeng, Y., Zhuge, Y., Lu, H., Zhang, L.: Joint learning of saliency detection and weakly supervised semantic segmentation. In: ICCV (2019)"},{"key":"25_CR59","doi-asserted-by":"crossref","unstructured":"Zeng, Y., Zhuge, Y., Lu, H., Zhang, L., Qian, M., Yu, Y.: Multi-source weak supervision for saliency detection. In: CVPR (2019)","DOI":"10.1109\/CVPR.2019.00623"},{"key":"25_CR60","doi-asserted-by":"crossref","unstructured":"Zhang, C., Liwicki, S., Smith, W., Cipolla, R.: Orientation-aware semantic segmentation on icosahedron spheres. In: ICCV (2019)","DOI":"10.1109\/ICCV.2019.00363"},{"key":"25_CR61","doi-asserted-by":"crossref","unstructured":"Zhang, Y., et al.: VidTr: video transformer without convolutions. In: ICCV (2021)","DOI":"10.1109\/ICCV48922.2021.01332"},{"key":"25_CR62","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"504","DOI":"10.1007\/978-3-030-01234-2_30","volume-title":"Computer Vision \u2013 ECCV 2018","author":"Z Zhang","year":"2018","unstructured":"Zhang, Z., Xu, Y., Yu, J., Gao, S.: Saliency detection in 360$$^\\circ $$ videos. In: Ferrari, V., Hebert, M., Sminchisescu, C., Weiss, Y. (eds.) ECCV 2018. LNCS, vol. 11211, pp. 504\u2013520. Springer, Cham (2018). https:\/\/doi.org\/10.1007\/978-3-030-01234-2_30"},{"key":"25_CR63","doi-asserted-by":"crossref","unstructured":"Zhao, H., Jiang, L., Jia, J., Torr, P.H., Koltun, V.: Point transformer. In: ICCV (2021)","DOI":"10.1109\/ICCV48922.2021.01595"},{"key":"25_CR64","doi-asserted-by":"crossref","unstructured":"Zheng, S., et al.: Rethinking semantic segmentation from a sequence-to-sequence perspective with transformers. In: CVPR (2021)","DOI":"10.1109\/CVPR46437.2021.00681"},{"key":"25_CR65","doi-asserted-by":"crossref","unstructured":"Zhou, B., Khosla, A., Lapedriza, A., Oliva, A., Torralba, A.: Learning deep features for discriminative localization. In: CVPR (2016)","DOI":"10.1109\/CVPR.2016.319"},{"key":"25_CR66","doi-asserted-by":"crossref","unstructured":"Zhu, F., Zhu, Y., Chang, X., Liang, X.: Vision-language navigation with self-supervised auxiliary reasoning tasks. In: CVPR (2020)","DOI":"10.1109\/CVPR42600.2020.01003"}],"container-title":["Lecture Notes in Computer Science","Computer Vision \u2013 ECCV 2022"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-19833-5_25","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,1,9]],"date-time":"2023-01-09T15:40:12Z","timestamp":1673278812000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-19833-5_25"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022]]},"ISBN":["9783031198328","9783031198335"],"references-count":66,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-19833-5_25","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2022]]},"assertion":[{"value":"4 November 2022","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ECCV","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"European Conference on Computer Vision","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Tel Aviv","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Israel","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2022","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"23 October 2022","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"27 October 2022","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"17","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"eccv2022","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/eccv2022.ecva.net\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Double-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"CMT","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"5804","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"1645","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"0","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"28% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3.21","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3.91","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Yes","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"From the workshops, 367 reviewed full papers have been selected for publication","order":10,"name":"additional_info_on_review_process","label":"Additional Info on Review Process","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}