{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,7]],"date-time":"2026-04-07T16:27:13Z","timestamp":1775579233208,"version":"3.50.1"},"publisher-location":"Cham","reference-count":24,"publisher":"Springer Nature Switzerland","isbn-type":[{"value":"9783031439926","type":"print"},{"value":"9783031439933","type":"electronic"}],"license":[{"start":{"date-parts":[[2023,1,1]],"date-time":"2023-01-01T00:00:00Z","timestamp":1672531200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2023,1,1]],"date-time":"2023-01-01T00:00:00Z","timestamp":1672531200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2023]]},"DOI":"10.1007\/978-3-031-43993-3_56","type":"book-chapter","created":{"date-parts":[[2023,9,30]],"date-time":"2023-09-30T23:07:48Z","timestamp":1696115268000},"page":"580-591","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":2,"title":["PAS-Net: Rapid Prediction of Antibiotic Susceptibility from Fluorescence Images of Bacterial Cells Using Parallel Dual-Branch Network"],"prefix":"10.1007","author":[{"given":"Wei","family":"Xiong","sequence":"first","affiliation":[]},{"given":"Kaiwei","family":"Yu","sequence":"additional","affiliation":[]},{"given":"Liang","family":"Yang","sequence":"additional","affiliation":[]},{"given":"Baiying","family":"Lei","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2023,10,1]]},"reference":[{"key":"56_CR1","doi-asserted-by":"publisher","first-page":"176","DOI":"10.1016\/S0140-6736(15)00473-0","volume":"387","author":"AH Holmes","year":"2016","unstructured":"Holmes, A.H., et al.: Understanding the mechanisms and drivers of antimicrobial resistance. Lancet 387, 176\u2013187 (2016)","journal-title":"Lancet"},{"key":"56_CR2","doi-asserted-by":"publisher","first-page":"3903","DOI":"10.2147\/IDR.S234610","volume":"12","author":"P Dadgostar","year":"2019","unstructured":"Dadgostar, P.: Antimicrobial resistance: implications and costs. Infect. Drug Resist. 12, 3903\u20133910 (2019)","journal-title":"Infect. Drug Resist."},{"key":"56_CR3","doi-asserted-by":"publisher","first-page":"2857","DOI":"10.1080\/10408398.2015.1077192","volume":"57","author":"M Ferri","year":"2017","unstructured":"Ferri, M., Ranucci, E., Romagnoli, P., Giaccone, V.: Antimicrobial resistance: a global emerging threat to public health systems. Crit. Rev. Food Sci. Nutr. 57, 2857\u20132876 (2017)","journal-title":"Crit. Rev. Food Sci. Nutr."},{"key":"56_CR4","doi-asserted-by":"crossref","unstructured":"He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 770\u2013778 (2016)","DOI":"10.1109\/CVPR.2016.90"},{"key":"56_CR5","doi-asserted-by":"crossref","unstructured":"Xie, S., Girshick, R., Doll\u00e1r, P., Tu, Z., He, K.: Aggregated residual transformations for deep neural networks. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 1492\u20131500 (2017)","DOI":"10.1109\/CVPR.2017.634"},{"key":"56_CR6","doi-asserted-by":"crossref","unstructured":"Zhang, H., et al.: Resnest: split-attention networks. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 2736\u20132746 (2022)","DOI":"10.1109\/CVPRW56347.2022.00309"},{"key":"56_CR7","doi-asserted-by":"publisher","first-page":"845","DOI":"10.1016\/j.stemcr.2019.02.004","volume":"12","author":"A Waisman","year":"2019","unstructured":"Waisman, A., et al.: Deep learning neural networks highly predict very early onset of pluripotent stem cell differentiation. Stem Cell Rep. 12, 845\u2013859 (2019)","journal-title":"Stem Cell Rep."},{"key":"56_CR8","doi-asserted-by":"crossref","unstructured":"Riasatian, A., et al.: Fine-Tuning and training of densenet for histopathology image representation using TCGA diagnostic slides. Med. Image Anal. 70, 102032 (2021)","DOI":"10.1016\/j.media.2021.102032"},{"key":"56_CR9","doi-asserted-by":"crossref","unstructured":"Huang, G., Liu, Z., Van Der Maaten, L., Weinberger, K.Q.: Densely connected convolutional networks. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 4700\u20134708 (2017)","DOI":"10.1109\/CVPR.2017.243"},{"key":"56_CR10","unstructured":"Dosovitskiy, A., et al.: An image is worth 16 \u00d7 16 words: transformers for image recognition at scale. arXiv preprint arXiv:.11929 (2020)"},{"key":"56_CR11","unstructured":"Touvron, H., Cord, M., Douze, M., Massa, F., Sablayrolles, A., J\u00e9gou, H.: Training data-efficient image transformers & distillation through attention. In: International Conference on Machine Learning, pp. 10347\u201310357. PMLR (2021)"},{"key":"56_CR12","doi-asserted-by":"crossref","unstructured":"Wang, W., et al.: Pyramid vision transformer: a versatile backbone for dense prediction without convolutions. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 568\u2013578 (2021)","DOI":"10.1109\/ICCV48922.2021.00061"},{"key":"56_CR13","doi-asserted-by":"crossref","unstructured":"Liu, Z., et al.: Swin transformer: hierarchical vision transformer using shifted windows. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 10012\u201310022 (2021)","DOI":"10.1109\/ICCV48922.2021.00986"},{"key":"56_CR14","unstructured":"Vaswani, A., et al.: Attention is all you need. Adv. neural inf. Process. Syst. 30 (2017)"},{"key":"56_CR15","doi-asserted-by":"publisher","first-page":"102357","DOI":"10.1016\/j.media.2022.102357","volume":"77","author":"X He","year":"2022","unstructured":"He, X., Tan, E.-L., Bi, H., Zhang, X., Zhao, S., Lei, B.: Fully transformer network for skin lesion analysis. Med. Image Anal. 77, 102357 (2022)","journal-title":"Med. Image Anal."},{"key":"56_CR16","doi-asserted-by":"crossref","unstructured":"Liu, Z., Mao, H., Wu, C.Y., Feichtenhofer, C., Darrell, T., Xie, S.: A convnet for the 2020s. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 11976\u201311986 (2022)","DOI":"10.1109\/CVPR52688.2022.01167"},{"key":"56_CR17","doi-asserted-by":"crossref","unstructured":"Peng, Z., et al.: Conformer: local features coupling global representations for visual recognition. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 367\u2013376 (2021)","DOI":"10.1109\/ICCV48922.2021.00042"},{"key":"56_CR18","doi-asserted-by":"crossref","unstructured":"Yuan, K., Guo, S., Liu, Z., Zhou, A., Yu, F., Wu, W.: Incorporating convolution designs into visual transformers. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 579\u2013588 (2021)","DOI":"10.1109\/ICCV48922.2021.00062"},{"key":"56_CR19","doi-asserted-by":"publisher","first-page":"416","DOI":"10.1109\/JBHI.2016.2526603","volume":"21","author":"Z Gao","year":"2016","unstructured":"Gao, Z., Wang, L., Zhou, L., Zhang, J.: HEp-2 cell image classification with deep convolutional neural networks. IEEE j. Biomed. Health Inform. 21, 416\u2013428 (2016)","journal-title":"IEEE j. Biomed. Health Inform."},{"key":"56_CR20","doi-asserted-by":"crossref","unstructured":"Phan, H.T.H., Kumar, A., Kim, J., Feng, D.: Transfer learning of a convolutional neural network for HEp-2 cell image classification. In: 2016 IEEE 13th International Symposium on Biomedical Imaging (ISBI), pp. 1208\u20131211. IEEE (2016)","DOI":"10.1109\/ISBI.2016.7493483"},{"key":"56_CR21","doi-asserted-by":"crossref","unstructured":"Jia, X., Shen, L., Zhou, X., Yu, S.: Deep convolutional neural network based HEp-2 cell classification. In: 2016 23rd International Conference on Pattern Recognition (ICPR), pp. 77\u201380. IEEE (2016)","DOI":"10.1109\/ICPR.2016.7899611"},{"key":"56_CR22","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"12","DOI":"10.1007\/978-3-319-67558-9_2","volume-title":"Deep Learning in Medical Image Analysis and Multimodal Learning for Clinical Decision Support","author":"Y Li","year":"2017","unstructured":"Li, Y., Shen, L.: A deep residual inception network for HEp-2 cell classification. In: Cardoso, M.J., Arbel, T., Carneiro, G., Syeda-Mahmood, T., Tavares, J.M.R.S., Moradi, M., Bradley, A., Greenspan, H., Papa, J.P., Madabhushi, A., Nascimento, J.C., Cardoso, J.S., Belagiannis, V., Lu, Z. (eds.) DLMIA\/ML-CDS -2017. LNCS, vol. 10553, pp. 12\u201320. Springer, Cham (2017). https:\/\/doi.org\/10.1007\/978-3-319-67558-9_2"},{"key":"56_CR23","doi-asserted-by":"crossref","unstructured":"Liu, J., Xu, B., Shen, L., Garibaldi, J., Qiu, G.: HEp-2 cell classification based on a deep autoencoding-classification convolutional neural network. In: 2017 IEEE 14th International Symposium on Biomedical Imaging (ISBI 2017), pp. 1019\u20131023. IEEE (2017)","DOI":"10.1109\/ISBI.2017.7950689"},{"key":"56_CR24","doi-asserted-by":"publisher","first-page":"290","DOI":"10.1016\/j.patcog.2018.02.006","volume":"79","author":"H Lei","year":"2018","unstructured":"Lei, H., et al.: A deeply supervised residual network for HEp-2 cell classification via cross-modal transfer learning. Pattern Recogn. 79, 290\u2013302 (2018)","journal-title":"Pattern Recogn."}],"container-title":["Lecture Notes in Computer Science","Medical Image Computing and Computer Assisted Intervention \u2013 MICCAI 2023"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-43993-3_56","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,4,2]],"date-time":"2024-04-02T16:12:22Z","timestamp":1712074342000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-43993-3_56"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023]]},"ISBN":["9783031439926","9783031439933"],"references-count":24,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-43993-3_56","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2023]]},"assertion":[{"value":"1 October 2023","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"MICCAI","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Conference on Medical Image Computing and Computer-Assisted Intervention","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Vancouver, BC","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Canada","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2023","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"8 October 2023","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"12 October 2023","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"miccai2023","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/conferences.miccai.org\/2023\/en\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Double-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"CMT","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"2250","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"730","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"0","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"32% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"5","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Yes","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}