{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,10,7]],"date-time":"2025-10-07T08:42:36Z","timestamp":1759826556903,"version":"3.40.3"},"publisher-location":"Cham","reference-count":28,"publisher":"Springer Nature Switzerland","isbn-type":[{"type":"print","value":"9783031159367"},{"type":"electronic","value":"9783031159374"}],"license":[{"start":{"date-parts":[[2022,1,1]],"date-time":"2022-01-01T00:00:00Z","timestamp":1640995200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"},{"start":{"date-parts":[[2022,1,1]],"date-time":"2022-01-01T00:00:00Z","timestamp":1640995200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2022]]},"DOI":"10.1007\/978-3-031-15937-4_48","type":"book-chapter","created":{"date-parts":[[2022,9,6]],"date-time":"2022-09-06T08:15:35Z","timestamp":1662452135000},"page":"570-581","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":11,"title":["CNN-Transformer Hybrid Architecture for\u00a0Early Fire Detection"],"prefix":"10.1007","author":[{"given":"Chenyue","family":"Yang","sequence":"first","affiliation":[]},{"given":"Yixuan","family":"Pan","sequence":"additional","affiliation":[]},{"given":"Yichao","family":"Cao","sequence":"additional","affiliation":[]},{"given":"Xiaobo","family":"Lu","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2022,9,7]]},"reference":[{"issue":"5","key":"48_CR1","doi-asserted-by":"publisher","first-page":"1293","DOI":"10.1007\/s10694-015-0489-7","volume":"52","author":"WS Qureshi","year":"2016","unstructured":"Qureshi, W.S., Ekpanyapong, M., Dailey, M.N., et al.: QuickBlaze: early fire detection using a combined video processing approach. Fire Technol. 52(5), 1293\u20131317 (2016)","journal-title":"Fire Technol."},{"issue":"3","key":"48_CR2","first-page":"233","volume":"6","author":"T Celik","year":"2006","unstructured":"Celik, T., Demirel, H., Ozkaramanli, H.: Automatic fire detection in video sequences. Fire Saf. J. Nurnal 6(3), 233\u2013240 (2006)","journal-title":"Fire Saf. J. Nurnal"},{"issue":"2","key":"48_CR3","doi-asserted-by":"publisher","first-page":"339","DOI":"10.1109\/TCSVT.2014.2339592","volume":"25","author":"K Dimitropoulos","year":"2015","unstructured":"Dimitropoulos, K., Barmpoutis, P., Grammalidis, N.: Spatio-temporal flame modeling and dynamic texture analysis for automatic video-based fire detection. IEEE Trans. Circ. Syst. Video Technol. 25(2), 339\u2013351 (2015)","journal-title":"IEEE Trans. Circ. Syst. Video Technol."},{"issue":"05","key":"48_CR4","first-page":"1470","volume":"40","author":"J Huang","year":"2020","unstructured":"Huang, J., Chaoxia, C.Y., Dong, X.Y., et al.: Faster R-CNN based color-guided flame detection. J. Comput. Appl. 40(05), 1470\u20131475 (2020)","journal-title":"J. Comput. Appl."},{"issue":"07","key":"48_CR5","first-page":"820","volume":"15","author":"FY Zhao","year":"2020","unstructured":"Zhao, F.Y., Luo, B., Lin, G.J., et al.: Flame detection based on improved YOLOv3. China Sciencepaper 15(07), 820\u2013826 (2020)","journal-title":"China Sciencepaper"},{"key":"48_CR6","doi-asserted-by":"crossref","unstructured":"Muhammad, K., Ahmad, J., Mehmood, I., et al.: Convolutional neural networks based fire detection in surveillance videos. IEEE Access 2018, 18174\u20138183 (2018)","DOI":"10.1109\/ACCESS.2018.2812835"},{"issue":"02","key":"48_CR7","first-page":"49","volume":"47","author":"XL Zhang","year":"2018","unstructured":"Zhang, X.L., Hou, D.B., Zhang, C.C., et al.: Design of MPCANet fire image recognition model for deep learning. Infrared Laser Eng. 47(02), 49\u201354 (2018)","journal-title":"Infrared Laser Eng."},{"key":"48_CR8","doi-asserted-by":"crossref","unstructured":"Wang, X., Girshick, R., Gupta, A., He, K.: Non-local neural networks. In: Proceedings of the CVPR, pp. 7794\u20137803 (2018)","DOI":"10.1109\/CVPR.2018.00813"},{"key":"48_CR9","unstructured":"Dosovitskiy, A., et al.: An image is worth 16x16 words: transformers for image recognition at scale. arXiv preprint arXiv:2010.11929 (2020)"},{"key":"48_CR10","unstructured":"Li, Y, Zhang, K., Cao, J., et al.: LocalViT: bringing locality to vision transformers. arXiv:2104.05707"},{"key":"48_CR11","doi-asserted-by":"crossref","unstructured":"Wang, W., et al.: Pyramid vision transformer: a versatile backbone for dense prediction without convolutions. In: Proceedings of the ICCV, pp. 568\u2013578 (2021)","DOI":"10.1109\/ICCV48922.2021.00061"},{"key":"48_CR12","doi-asserted-by":"crossref","unstructured":"Liu, Z., et al.: Swin transformer: hierarchical vision transformer using shifted windows. In: Proceedings of ICCV, pp. 10012\u201310022 (2021)","DOI":"10.1109\/ICCV48922.2021.00986"},{"key":"48_CR13","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"213","DOI":"10.1007\/978-3-030-58452-8_13","volume-title":"Computer Vision \u2013 ECCV 2020","author":"Nicolas Carion","year":"2020","unstructured":"Carion, Nicolas, Massa, Francisco, Synnaeve, Gabriel, Usunier, Nicolas, Kirillov, Alexander, Zagoruyko, Sergey: End-to-end object detection with transformers. In: Vedaldi, Andrea, Bischof, Horst, Brox, Thomas, Frahm, Jan-Michael. (eds.) ECCV 2020. LNCS, vol. 12346, pp. 213\u2013229. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-58452-8_13"},{"key":"48_CR14","unstructured":"Zhu, X., Su, W., Lu, L., Li, B., Wang, D., Dai, J.: Deformable DETR: deformable transformers for end-to-end object detection. In: Proceeding of ICLR (2021)"},{"key":"48_CR15","doi-asserted-by":"crossref","unstructured":"Shahid, M., Hua, K.-L.: Fire detection using transformer network. In: Proceeding of ICMR, pp. 627\u2013630 (2021)","DOI":"10.1145\/3460426.3463665"},{"key":"48_CR16","doi-asserted-by":"publisher","unstructured":"Ghali, R., Akhloufi, M.A., Jmal, M., Souidene Mseddi, W., Attia, R.: Wildfire segmentation using deep vision transformers. Remote Sens. 13, 3527 (2021). https:\/\/doi.org\/10.3390\/rs13173527\u2019","DOI":"10.3390\/rs13173527\u2019"},{"key":"48_CR17","unstructured":"Mehta, S., Rastegari, M.: MobileViT: light-weight, general-purpose, and mobile-friendly vision transformer. arXiv:2110.02178"},{"key":"48_CR18","doi-asserted-by":"crossref","unstructured":"Sandler, M., Howard, A., Zhu, M., Zhmoginov, A., Chen, L.-C.: Mobilenetv 2: inverted residuals and linear bottlenecks. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 4510\u20134520 (2018)","DOI":"10.1109\/CVPR.2018.00474"},{"key":"48_CR19","doi-asserted-by":"crossref","unstructured":"Howard, A., et al.: Searching for mobilenetv3. In: Proceedings of the ICCV, pp. 1314\u2013324 (2019)","DOI":"10.1109\/ICCV.2019.00140"},{"key":"48_CR20","doi-asserted-by":"crossref","unstructured":"Hu, J., Shen, L., Sun, G.: Squeeze-and-excitation networks. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 7132\u20137141 (2018)","DOI":"10.1109\/CVPR.2018.00745"},{"key":"48_CR21","doi-asserted-by":"crossref","unstructured":"Wang, W., et al.: PVT v2: improved baselines with pyramid vision transformer. arXiv:2106.13797 (2021)","DOI":"10.1007\/s41095-022-0274-8"},{"key":"48_CR22","unstructured":"Vaswani, A., et al.: Attention is all you need. In: Proceedings of the Advances in Neural Information Processing System (2017)"},{"key":"48_CR23","unstructured":"Ba, J.L., Kiros, J.R., Hinton, G.E.: Layer normalization. arXiv preprint arXiv:1607.06450 (2016)"},{"key":"48_CR24","unstructured":"Hendrycks, D., Gimpel, K.: Gaussian Error Linear Units (GELUs). arXiv:1606.08415"},{"issue":"9","key":"48_CR25","doi-asserted-by":"publisher","first-page":"1904","DOI":"10.1109\/TPAMI.2015.2389824","volume":"37","author":"K He","year":"2015","unstructured":"He, K., Zhang, X., Ren, S., Sun, J.: Spatial pyramid pooling in deep convolutional networks for visual recognition. IEEE Trans. Patt. Analy. Mach. Intell. 37(9), 1904\u20131916 (2015)","journal-title":"IEEE Trans. Patt. Analy. Mach. Intell."},{"key":"48_CR26","doi-asserted-by":"crossref","unstructured":"Tan, M., Pang, R., Le, Q.V.: EfficientDet: scalable and efficient object detection. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2020)","DOI":"10.1109\/CVPR42600.2020.01079"},{"key":"48_CR27","unstructured":"Redmon, J., Farhadi, A.: YOLOv3: an incremental improvement. arXiv preprint arXiv:1804.02767 (2018)"},{"key":"48_CR28","unstructured":"Alexey, B., Wang, C.Y., Liao, H.Y.: YOLOv4: optimal speed and accuracy of object detection. arXiv:2004.10934v1 (2004)"}],"container-title":["Lecture Notes in Computer Science","Artificial Neural Networks and Machine Learning \u2013 ICANN 2022"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-15937-4_48","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,9,6]],"date-time":"2022-09-06T08:24:50Z","timestamp":1662452690000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-15937-4_48"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022]]},"ISBN":["9783031159367","9783031159374"],"references-count":28,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-15937-4_48","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"type":"print","value":"0302-9743"},{"type":"electronic","value":"1611-3349"}],"subject":[],"published":{"date-parts":[[2022]]},"assertion":[{"value":"7 September 2022","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ICANN","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Conference on Artificial Neural Networks","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Bristol","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"United Kingdom","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2022","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"6 September 2022","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"9 September 2022","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"31","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"icann2022","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/e-nns.org\/icann2022\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Single-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"EasyChair","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"561","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"255","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"4","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"45% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"No","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}