{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,3,26]],"date-time":"2025-03-26T17:03:40Z","timestamp":1743008620630,"version":"3.40.3"},"publisher-location":"Cham","reference-count":34,"publisher":"Springer Nature Switzerland","isbn-type":[{"type":"print","value":"9783031479687"},{"type":"electronic","value":"9783031479694"}],"license":[{"start":{"date-parts":[[2023,1,1]],"date-time":"2023-01-01T00:00:00Z","timestamp":1672531200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2023,1,1]],"date-time":"2023-01-01T00:00:00Z","timestamp":1672531200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2023]]},"DOI":"10.1007\/978-3-031-47969-4_37","type":"book-chapter","created":{"date-parts":[[2023,11,30]],"date-time":"2023-11-30T20:02:06Z","timestamp":1701374526000},"page":"472-486","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":1,"title":["Future Video Prediction from\u00a0a\u00a0Single Frame for\u00a0Video Anomaly Detection"],"prefix":"10.1007","author":[{"ORCID":"https:\/\/orcid.org\/0000-0003-0246-4370","authenticated-orcid":false,"given":"Mohammad","family":"Baradaran","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-1115-7471","authenticated-orcid":false,"given":"Robert","family":"Bergevin","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2023,12,1]]},"reference":[{"key":"37_CR1","doi-asserted-by":"publisher","first-page":"88170","DOI":"10.1109\/ACCESS.2020.2993373","volume":"8","author":"F Dong","year":"2020","unstructured":"Dong, F., Zhang, Y., Nie, X.: Dual discriminator generative adversarial network for video anomaly detection. IEEE Access 8, 88170\u201388176 (2020)","journal-title":"IEEE Access"},{"key":"37_CR2","doi-asserted-by":"crossref","unstructured":"Liu, W., Luo, W., Lian, D., Gao, S.: Future frame prediction for anomaly detection a new baseline. In: CVPR (2018)","DOI":"10.1109\/CVPR.2018.00684"},{"key":"37_CR3","doi-asserted-by":"crossref","unstructured":"Lu, Y., Kumar, K.M., Shahabeddin Nabavi, S., Wang, Y.: Future frame prediction using convolutional VRNN for anomaly detection. In: AVSS, pp. 1\u20138 (2019)","DOI":"10.1109\/AVSS.2019.8909850"},{"key":"37_CR4","unstructured":"Medel, J.R., Savakis, A.: Anomaly detection in video using predictive convolutional long short-term memory networks (2016)"},{"key":"37_CR5","doi-asserted-by":"crossref","unstructured":"Morais, R., Le, V., Tran, T., Saha, B., Mansour, M., Venkatesh, S.: Learning regularity in skeleton trajectories for anomaly detection in videos. In: CVPR (2019)","DOI":"10.1109\/CVPR.2019.01227"},{"key":"37_CR6","doi-asserted-by":"crossref","unstructured":"Pan, J., et al.: Video generation from single semantic label map. In: CVPR (2019)","DOI":"10.1109\/CVPR.2019.00385"},{"key":"37_CR7","unstructured":"Henderson, P., Lampert, C.H., Bickel, B.: Unsupervised video prediction from a single frame by estimating 3d dynamic scene structure. arXiv preprint arXiv:2106.09051 (2021)"},{"key":"37_CR8","unstructured":"Franceschi, J.Y., Delasalles, E., Chen, M., Lamprier, S., Gallinari, P.: Stochastic latent residual video prediction. In: Thirty-Seventh International Conference on Machine Learning, International Machine Learning Society (2020)"},{"key":"37_CR9","doi-asserted-by":"crossref","unstructured":"Hasan, M., Choi, J., Neumann, J., Roy-Chowdhury, A.K., Davis, L.S.: Learning temporal regularity in video sequences. In: CVPR, pp. 733\u2013742 (2016)","DOI":"10.1109\/CVPR.2016.86"},{"key":"37_CR10","doi-asserted-by":"crossref","unstructured":"Leroux, S., Li, B., Simoens, P.: Multi-branch neural networks for video anomaly detection in adverse lighting and weather conditions. In: WACV (2022)","DOI":"10.1109\/WACV51458.2022.00308"},{"key":"37_CR11","doi-asserted-by":"crossref","unstructured":"Abati, D., Porrello, A., Calderara, S., Cucchiara, R.: Latent space autoregression for novelty detection. In: CVPR, pp. 481\u2013490 (2019)","DOI":"10.1109\/CVPR.2019.00057"},{"key":"37_CR12","doi-asserted-by":"crossref","unstructured":"Gong, D., et al.: Memorizing normality to detect anomaly: memory augmented deep autoencoder for unsupervised anomaly detection. In: ICCV, pp. 1705\u20131714 (2019)","DOI":"10.1109\/ICCV.2019.00179"},{"key":"37_CR13","doi-asserted-by":"crossref","unstructured":"Park, H., Noh, J., Ham, B.: Learning memory-guided normality for anomaly detection. In: CVPR, pp. 14360\u201314369 (2020)","DOI":"10.1109\/CVPR42600.2020.01438"},{"key":"37_CR14","doi-asserted-by":"crossref","unstructured":"Baradaran, M., Bergevin, R.: A critical study on the recent deep learning based semisupervised video anomaly detection methods. MTAP (2023)","DOI":"10.1007\/s11042-023-16445-z"},{"key":"37_CR15","doi-asserted-by":"publisher","DOI":"10.1016\/j.imavis.2020.103915","volume":"98","author":"D Chen","year":"2020","unstructured":"Chen, D., Wang, P., Yue, L., Zhang, Y., Jia, T.: Anomaly detection in surveillance video based on bidirectional prediction. Image Vis. Comput. 98, 103915 (2020)","journal-title":"Image Vis. Comput."},{"key":"37_CR16","doi-asserted-by":"crossref","unstructured":"Luo, W., Liu, W., Gao, S.: Remembering history with convolutional LSTM for anomaly detection. In: ICME, pp. 439\u2013444 (2017)","DOI":"10.1109\/ICME.2017.8019325"},{"key":"37_CR17","doi-asserted-by":"crossref","unstructured":"Ionescu, R.T., Khan, F.S., Georgescu, M.I., Shao, L.: Object-centric auto-encoders and dummy anomalies for abnormal event detection in video. In: CVPR, pp. 7834\u20137843 (2019)","DOI":"10.1109\/CVPR.2019.00803"},{"key":"37_CR18","doi-asserted-by":"crossref","unstructured":"Georgescu, M.I., Barbalau, A., Ionescu, R.T., Khan, F.S., Popescu, M., Shah, M.: Anomaly detection in video via self supervised and multi-task learning. In: CVPR, pp. 12742\u201312752 (2021)","DOI":"10.1109\/CVPR46437.2021.01255"},{"key":"37_CR19","doi-asserted-by":"publisher","first-page":"183914","DOI":"10.1109\/ACCESS.2019.2960654","volume":"7","author":"E Duman","year":"2019","unstructured":"Duman, E., Erdem, O.A.: Anomaly detection in videos using optical flow and convolutional autoencoder. IEEE Access 7, 183914\u2013183923 (2019)","journal-title":"IEEE Access"},{"key":"37_CR20","doi-asserted-by":"crossref","unstructured":"Nguyen, K.T., Dinh, D.T., Do, M.N., Tran, M.T.: Anomaly detection in traffic surveillance videos with GAN-based future frame prediction. In: ICMR, pp. 457\u2013463 (2020)","DOI":"10.1145\/3372278.3390701"},{"key":"37_CR21","doi-asserted-by":"crossref","unstructured":"Baradaran, M., Bergevin, R.: Object class aware video anomaly detection through image translation. In: 19th CRV Conference (2022)","DOI":"10.1109\/CRV55824.2022.00020"},{"key":"37_CR22","doi-asserted-by":"crossref","unstructured":"Baradaran, M., Bergevin, R.: Multi-task learning based video anomaly detection with attention. In: CVPRW-VAND (2023)","DOI":"10.1109\/CVPRW59228.2023.00290"},{"issue":"11","key":"37_CR23","doi-asserted-by":"publisher","first-page":"7505","DOI":"10.1109\/TPAMI.2021.3129349","volume":"44","author":"W Luo","year":"2021","unstructured":"Luo, W., Liu, W., Lian, D., Gao, S.: Future frame prediction network for video anomaly detection. TPAMI 44(11), 7505\u20137520 (2021)","journal-title":"TPAMI"},{"key":"37_CR24","doi-asserted-by":"crossref","unstructured":"Cai, R., Zhang, H., Liu, W., Gao, S., Hao, Z.: Appearance-motion memory consistency network for video anomaly detection. In: Proceedings of the AAAI Conference on Artificial Intelligence, vol. 35, pp. 938\u2013946 (2021)","DOI":"10.1609\/aaai.v35i2.16177"},{"key":"37_CR25","doi-asserted-by":"crossref","unstructured":"Lv, H., Chen, C., Cui, Z., Xu, C., Li, Y., Yang, J.: Learning normal dynamics in videos with meta prototype network. In: CVPR, pp. 15425\u201315434 (2021)","DOI":"10.1109\/CVPR46437.2021.01517"},{"issue":"6","key":"37_CR26","doi-asserted-by":"publisher","first-page":"2301","DOI":"10.1109\/TNNLS.2021.3083152","volume":"33","author":"X Wang","year":"2021","unstructured":"Wang, X., et al.: Robust unsupervised video anomaly detection by multipath frame prediction. IEEE Trans. Neural Netw. Learn. Syst. 33(6), 2301\u20132312 (2021)","journal-title":"IEEE Trans. Neural Netw. Learn. Syst."},{"key":"37_CR27","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"404","DOI":"10.1007\/978-3-031-19772-7_24","volume-title":"Computer Vision - ECCV 2022","author":"Z Yang","year":"2022","unstructured":"Yang, Z., Wu, P., Liu, J., Liu, X.: Dynamic local aggregation network with adaptive clusterer for anomaly detection. In: Avidan, S., Brostow, G., Cisse, M., Farinella, G.M., Hassner, T. (eds.) ECCV 2022. LNCS, vol. 13664, pp. 404\u2013421. Springer, Cham (2022)"},{"key":"37_CR28","doi-asserted-by":"crossref","unstructured":"Ye, M., Peng, X., Gan, W., Wu, W., Qiao, Y.: Anopcn: video anomaly detection via deep predictive coding network. In: ACM International Conference on Multimedia. Association for Computing Machinery (2019)","DOI":"10.1145\/3343031.3350899"},{"key":"37_CR29","doi-asserted-by":"crossref","unstructured":"Vu, T.H., Ambellouis, S., Boonaert, J., Taleb-Ahmed, A.: Anomaly detection in surveillance videos by future appearance-motion prediction. In: Proceedings of the 15th International Joint Conference on Computer Vision, pp. 484\u2013490 (2020)","DOI":"10.5220\/0009146704840490"},{"key":"37_CR30","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"219","DOI":"10.1007\/978-3-030-68799-1_16","volume-title":"Pattern Recognition. ICPR International Workshops and Challenges","author":"PR Roy","year":"2021","unstructured":"Roy, P.R., Bilodeau, G.-A., Seoud, L.: Local anomaly detection in videos using object-centric adversarial learning. In: Del Bimbo, A., et al. (eds.) ICPR 2021. LNCS, vol. 12664, pp. 219\u2013234. Springer, Cham (2021). https:\/\/doi.org\/10.1007\/978-3-030-68799-1_16"},{"key":"37_CR31","doi-asserted-by":"crossref","unstructured":"Gao, Z., et al.: SimVP: simpler yet better video prediction. In: CVPR (2022)","DOI":"10.1109\/CVPR52688.2022.00317"},{"key":"37_CR32","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"402","DOI":"10.1007\/978-3-030-58536-5_24","volume-title":"Computer Vision \u2013 ECCV 2020","author":"Z Teed","year":"2020","unstructured":"Teed, Z., Deng, J.: RAFT: recurrent all-pairs field transforms for optical flow. In: Vedaldi, A., Bischof, H., Brox, T., Frahm, J.-M. (eds.) ECCV 2020. LNCS, vol. 12347, pp. 402\u2013419. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-58536-5_24"},{"key":"37_CR33","doi-asserted-by":"publisher","unstructured":"Wu, Chongke, Shao, Sicong, Tunc, Cihan, Satam, Pratik, Hariri, Salim: An explainable and efficient deep learning framework for video anomaly detection. Cluster Comput., 1\u201323 (2021). https:\/\/doi.org\/10.1007\/s10586-021-03439-5","DOI":"10.1007\/s10586-021-03439-5"},{"key":"37_CR34","doi-asserted-by":"crossref","unstructured":"Mahadevan, V., Li, W., Bhalodia, V., Vasconcelos, N.: Anomaly detection in crowded scenes. In: CVPR (2010)","DOI":"10.1109\/CVPR.2010.5539872"}],"container-title":["Lecture Notes in Computer Science","Advances in Visual Computing"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-47969-4_37","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,11,30]],"date-time":"2023-11-30T20:06:01Z","timestamp":1701374761000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-47969-4_37"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023]]},"ISBN":["9783031479687","9783031479694"],"references-count":34,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-47969-4_37","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"type":"print","value":"0302-9743"},{"type":"electronic","value":"1611-3349"}],"subject":[],"published":{"date-parts":[[2023]]},"assertion":[{"value":"1 December 2023","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ISVC","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Symposium on Visual Computing","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Lake Tahoe, NV","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"USA","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2023","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"16 October 2023","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"18 October 2023","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"18","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"isvc2023","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"http:\/\/www.isvc.net\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Double-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"EasyChair","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"25","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"58","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"0","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"232% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"2.3","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"No","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"43 (oral), 15 (poster),  25 (special tracks) out of 34 submissions","order":10,"name":"additional_info_on_review_process","label":"Additional Info on Review Process","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}