{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,11,5]],"date-time":"2025-11-05T14:30:56Z","timestamp":1762353056028},"reference-count":41,"publisher":"Springer Science and Business Media LLC","issue":"2","license":[{"start":{"date-parts":[[2021,5,20]],"date-time":"2021-05-20T00:00:00Z","timestamp":1621468800000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"},{"start":{"date-parts":[[2021,5,20]],"date-time":"2021-05-20T00:00:00Z","timestamp":1621468800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["61862060"],"award-info":[{"award-number":["61862060"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Appl Intell"],"published-print":{"date-parts":[[2022,1]]},"DOI":"10.1007\/s10489-021-02496-y","type":"journal-article","created":{"date-parts":[[2021,5,20]],"date-time":"2021-05-20T08:12:03Z","timestamp":1621498323000},"page":"1362-1375","update-policy":"http:\/\/dx.doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":25,"title":["DECA: a novel multi-scale efficient channel attention module for object detection in real-life fire images"],"prefix":"10.1007","volume":"52","author":[{"given":"Junjie","family":"Wang","sequence":"first","affiliation":[]},{"given":"Jiong","family":"Yu","sequence":"additional","affiliation":[]},{"given":"Zhu","family":"He","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2021,5,20]]},"reference":[{"key":"2496_CR1","doi-asserted-by":"crossref","unstructured":"Lin T-Y, Maire M, Belongie S, Hays J, Perona P, Ramanan D, Doll\u00e1r P, Zitnick CL (2014) Microsoft coco: common objects in context. In: European conference on computer vision, pp 740\u2013755. Springer","DOI":"10.1007\/978-3-319-10602-1_48"},{"issue":"2","key":"2496_CR2","doi-asserted-by":"publisher","first-page":"303","DOI":"10.1007\/s11263-009-0275-4","volume":"88","author":"M Everingham","year":"2010","unstructured":"Everingham M, Gool LV, Williams CKI, Winn J, Zisserman A (2010) The pascal visual object classes (voc) challenge. International Journal of Computer Vision 88(2):303\u2013338","journal-title":"International Journal of Computer Vision"},{"issue":"6","key":"2496_CR3","doi-asserted-by":"publisher","first-page":"84","DOI":"10.1145\/3065386","volume":"60","author":"A Krizhevsky","year":"2017","unstructured":"Krizhevsky A, Sutskever I, Hinton GE (2017) Imagenet classification with deep convolutional neural networks. Commun ACM 60(6):84\u201390","journal-title":"Commun ACM"},{"key":"2496_CR4","unstructured":"Simonyan K, Zisserman A (2014) Very deep convolutional networks for large-scale image recognition. arXiv:1409.1556"},{"key":"2496_CR5","doi-asserted-by":"crossref","unstructured":"Szegedy C, Liu W, Jia Y, Sermanet P, Reed S, Anguelov D, Erhan D, Vanhoucke V, Rabinovich A (2015) Going deeper with convolutions. In: Proceedings of the IEEE conference on computer vision and pattern recognition, pp 1\u20139","DOI":"10.1109\/CVPR.2015.7298594"},{"key":"2496_CR6","doi-asserted-by":"crossref","unstructured":"He K, Zhang X, Ren S, Sun J (2016) Deep residual learning for image recognition. In: Proceedings of the IEEE conference on computer vision and pattern recognition, pp 770\u2013778","DOI":"10.1109\/CVPR.2016.90"},{"key":"2496_CR7","doi-asserted-by":"crossref","unstructured":"Huang G, Liu Z, Maaten LVD, Weinberger KQ (2017) Densely connected convolutional networks. In: proceedings of the IEEE conference on computer vision and pattern recognition, pp 4700\u20134708","DOI":"10.1109\/CVPR.2017.243"},{"key":"2496_CR8","doi-asserted-by":"crossref","unstructured":"Li Z, Peng C, Yu G, Zhang X, Deng Y, Sun J (2018) Detnet: a backbone network for object detection. arXiv:1804.06215","DOI":"10.1007\/978-3-030-01240-3_21"},{"key":"2496_CR9","doi-asserted-by":"crossref","unstructured":"Shixiao W u, Zhang L (2018) Using popular object detection methods for real time forest fire detection. In: 2018 11th international symposium on computational intelligence and design (ISCID), vol 1. IEEE, pp 280\u2013284","DOI":"10.1109\/ISCID.2018.00070"},{"key":"2496_CR10","doi-asserted-by":"crossref","unstructured":"Zhaa X, Ji H, Zhang D, Bao H (2018) Fire smoke detection based on contextual object detection. In: 2018 IEEE 3rd international conference on image, vision and computing (ICIVC). IEEE, pp 473\u2013476","DOI":"10.1109\/ICIVC.2018.8492823"},{"key":"2496_CR11","doi-asserted-by":"crossref","unstructured":"Chen K, Cheng Y, Bai H, Mou C, Zhang Y (2019) Research on image fire detection based on support vector machine. In: 2019 9th international conference on fire science and fire protection engineering (ICFSFPE). IEEE, pp 1\u20137","DOI":"10.1109\/ICFSFPE48751.2019.9055795"},{"key":"2496_CR12","unstructured":"Gaia (2021) D-fire: an image dataset of fire and smoke occurrences designed for machine learning and object recognition algorithms with more than 10000 images. https:\/\/github.com\/gaiasd\/DFireDataset"},{"key":"2496_CR13","doi-asserted-by":"crossref","unstructured":"Girshick R (2015) Fast r-cnn. In: Proceedings of the IEEE international conference on computer vision, pp 1440\u20131448","DOI":"10.1109\/ICCV.2015.169"},{"key":"2496_CR14","unstructured":"Ren S, He K, Girshick R, Sun J (2015) Faster r-cnn: towards real-time object detection with region proposal networks. In: Advances in neural information processing systems, pp 91\u201399"},{"key":"2496_CR15","doi-asserted-by":"crossref","unstructured":"Zhang H, Chang H, Ma B, Wang N, Chen X (2020) Dynamic r-cnn: towards high quality object detection via dynamic training. arXiv:2004.06002","DOI":"10.1007\/978-3-030-58555-6_16"},{"key":"2496_CR16","doi-asserted-by":"crossref","unstructured":"Li Y, Chen Y, Wang N, Zhang Z (2019) Scale-aware trident networks for object detection. In: Proceedings of the IEEE international conference on computer vision, pp 6054\u20136063","DOI":"10.1109\/ICCV.2019.00615"},{"key":"2496_CR17","doi-asserted-by":"crossref","unstructured":"Qiao S, Chen L-C, Yuille A (2020) Detectors: detecting objects with recursive feature pyramid and switchable atrous convolution. arXiv:2006.02334","DOI":"10.1109\/CVPR46437.2021.01008"},{"key":"2496_CR18","doi-asserted-by":"crossref","unstructured":"Lin T-Y, Goyal P, Girshick R, He K, Doll\u00e1r P (2017) Focal loss for dense object detection. In: Proceedings of the IEEE international conference on computer vision, pp 2980\u20132988","DOI":"10.1109\/ICCV.2017.324"},{"key":"2496_CR19","doi-asserted-by":"crossref","unstructured":"Liu W, Anguelov D, Erhan D, Szegedy C, Reed S, Fu C-Y, Berg AC (2016) Ssd: single shot multibox detector. In: European conference on computer vision, pp 21\u201337. Springer","DOI":"10.1007\/978-3-319-46448-0_2"},{"key":"2496_CR20","doi-asserted-by":"crossref","unstructured":"Redmon J, Divvala S, Girshick R, Farhadi A (2016) You only look once: unified, real-time object detection. In: Proceedings of the IEEE conference on computer vision and pattern recognition, pp 779\u2013788","DOI":"10.1109\/CVPR.2016.91"},{"key":"2496_CR21","doi-asserted-by":"crossref","unstructured":"Zhang S, Chi C, Yao Y, Lei Z, Li SZ (2020) Bridging the gap between anchor-based and anchor-free detection via adaptive training sample selection. In: Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition, pp 9759\u20139768","DOI":"10.1109\/CVPR42600.2020.00978"},{"key":"2496_CR22","unstructured":"Liu S, Huang D, Wang Y (2019) Learning spatial fusion for single-shot object detection. arXiv:1911.09516"},{"key":"2496_CR23","doi-asserted-by":"crossref","unstructured":"Hu J, Li S, Sun G (2018) Squeeze-and-excitation networks. In: Proceedings of the IEEE conference on computer vision and pattern recognition, pp 7132\u20137141","DOI":"10.1109\/CVPR.2018.00745"},{"key":"2496_CR24","doi-asserted-by":"crossref","unstructured":"Wang Q, Wu B, Zhu P, Li P, Zuo W, Hu Q (2020) Eca-net: efficient channel attention for deep convolutional neural networks. In: Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition, pp 11534\u201311542","DOI":"10.1109\/CVPR42600.2020.01155"},{"key":"2496_CR25","doi-asserted-by":"publisher","first-page":"58923","DOI":"10.1109\/ACCESS.2020.2982994","volume":"8","author":"C Chaoxia","year":"2020","unstructured":"Chaoxia C, Shang W, Zhang F (2020) Information-guided flame detection based on faster r-cnn. IEEE Access 8:58923\u201358932","journal-title":"IEEE Access"},{"key":"2496_CR26","doi-asserted-by":"publisher","first-page":"8467","DOI":"10.1109\/TIP.2020.3016431","volume":"29","author":"S Li","year":"2020","unstructured":"Li S, Yan Q, Liu P (2020) An efficient fire detection method based on multiscale feature extraction, implicit deep supervision and channel attention mechanism. IEEE Trans Image Process 29:8467\u20138475","journal-title":"IEEE Trans Image Process"},{"key":"2496_CR27","doi-asserted-by":"crossref","unstructured":"Woo S, Park J, Lee Joon-Young, In SK (2018) Cbam: convolutional block attention module. In: Proceedings of the European conference on computer vision (ECCV), pp 3\u201319","DOI":"10.1007\/978-3-030-01234-2_1"},{"key":"2496_CR28","doi-asserted-by":"crossref","unstructured":"Li X, Wang W, Hu X, Yang J (2019) Selective kernel networks. In: Proceedings of the IEEE conference on computer vision and pattern recognition, pp 510\u2013519","DOI":"10.1109\/CVPR.2019.00060"},{"key":"2496_CR29","doi-asserted-by":"crossref","unstructured":"Bello I, Zoph B, Vaswani A, Shlens J, Le QV (2019) Attention augmented convolutional networks. In: Proceedings of the IEEE international conference on computer vision, pp 3286\u20133295","DOI":"10.1109\/ICCV.2019.00338"},{"key":"2496_CR30","doi-asserted-by":"crossref","unstructured":"Gao Z, Xie J, Wang Q, Li P (2019) Global second-order pooling convolutional networks. In: Proceedings of the IEEE conference on computer vision and pattern recognition, pp 3024\u20133033","DOI":"10.1109\/CVPR.2019.00314"},{"key":"2496_CR31","doi-asserted-by":"publisher","first-page":"52","DOI":"10.1016\/j.ins.2019.12.084","volume":"517","author":"P Gao","year":"2020","unstructured":"Gao P, Zhang Q, Wang F, Xiao L, Fujita H, Zhang Y (2020) Learning reinforced attentional representation for end-to-end visual tracking. Inf Sci 517:52\u201367","journal-title":"Inf Sci"},{"key":"2496_CR32","doi-asserted-by":"publisher","first-page":"105448","DOI":"10.1016\/j.knosys.2019.105448","volume":"193","author":"P Gao","year":"2020","unstructured":"Gao P, Yuan R, Wang F, Xiao L, Fujita H, Zhang Y (2020) Siamese attentional keypoint network for high performance visual tracking. Knowledge-Based Systems 193:105448","journal-title":"Knowledge-Based Systems"},{"key":"2496_CR33","doi-asserted-by":"crossref","unstructured":"Lin T-Y, Doll\u00e1r P, Girshick R, He K, Hariharan B, Belongie S (2017) Feature pyramid networks for object detection. In: Proceedings of the IEEE conference on computer vision and pattern recognition, pp 2117\u20132125","DOI":"10.1109\/CVPR.2017.106"},{"key":"2496_CR34","doi-asserted-by":"publisher","first-page":"105590","DOI":"10.1016\/j.knosys.2020.105590","volume":"194","author":"F P\u00e9rez-Hern\u00e1ndez","year":"2020","unstructured":"P\u00e9rez-Hern\u00e1ndez F, Tabik S, Lamas A, Olmos R, Fujita H, Herrera F (2020) Object detection binary classifiers methodology based on deep learning to identify small objects handled similarly: application in video surveillance. Knowl-Based Syst 194: 105590","journal-title":"Knowl-Based Syst"},{"key":"2496_CR35","doi-asserted-by":"crossref","unstructured":"Chen L-C, Papandreou G, Kokkinos I, Murphy K, Yuille AL (2017) Deeplab: semantic image segmentation with deep convolutional nets, atrous convolution, and fully connected crfs, vol 40, pp 834\u2013848","DOI":"10.1109\/TPAMI.2017.2699184"},{"key":"2496_CR36","unstructured":"Chen L-C, Papandreou G, Schroff F, Adam H (2017) Rethinking atrous convolution for semantic image segmentation. arXiv:1706.05587"},{"key":"2496_CR37","unstructured":"GengYan L (2021) Fire detect dataset. https:\/\/github.com\/gengyanlei\/fire-detect-yolov4"},{"key":"2496_CR38","unstructured":"Chen K, Wang J, Pang J, Cao Y, Xiong Y, Li X, Sun S, Feng W, Liu Z, Xu J, et al. (2019) Mmdetection: open mmlab detection toolbox and benchmark. arXiv:1906.07155"},{"key":"2496_CR39","doi-asserted-by":"crossref","unstructured":"Deng J, Dong W, Socher R, Li L-J, Li K, Fei-Fei L (2009) Imagenet: a large-scale hierarchical image database. In: 2009 IEEE conference on computer vision and pattern recognition, pp 248\u2013255. IEEE","DOI":"10.1109\/CVPR.2009.5206848"},{"key":"2496_CR40","unstructured":"Goyal P, Doll\u00e1r P, Girshick R, Noordhuis P, Wesolowski L, Kyrola A, Tulloch A, Jia Y, He K (2017) Accurate, large minibatch sgd: training imagenet in 1 hour. arXiv:1706.02677"},{"key":"2496_CR41","doi-asserted-by":"crossref","unstructured":"Wang X, Girshick R, Gupta A, He K (2018) Non-local neural networks. In: Proceedings of the IEEE conference on computer vision and pattern recognition, pp 7794\u20137803","DOI":"10.1109\/CVPR.2018.00813"}],"container-title":["Applied Intelligence"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s10489-021-02496-y.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s10489-021-02496-y\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s10489-021-02496-y.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,1,24]],"date-time":"2022-01-24T01:09:27Z","timestamp":1642986567000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s10489-021-02496-y"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2021,5,20]]},"references-count":41,"journal-issue":{"issue":"2","published-print":{"date-parts":[[2022,1]]}},"alternative-id":["2496"],"URL":"https:\/\/doi.org\/10.1007\/s10489-021-02496-y","relation":{},"ISSN":["0924-669X","1573-7497"],"issn-type":[{"value":"0924-669X","type":"print"},{"value":"1573-7497","type":"electronic"}],"subject":[],"published":{"date-parts":[[2021,5,20]]},"assertion":[{"value":"1 May 2021","order":1,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"20 May 2021","order":2,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors declare that they have no conflict of interest.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"<!--Emphasis Type='Bold' removed-->Conflict of Interests"}}]}}