<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.4 20241031//EN"
        "https://jats.nlm.nih.gov/publishing/1.4/JATS-journalpublishing1-4.dtd">
<article  article-type="research-article"        dtd-version="1.4">
            <front>

                <journal-meta>
                                                                <journal-id>gummfd</journal-id>
            <journal-title-group>
                                                                                    <journal-title>Gazi Üniversitesi Mühendislik Mimarlık Fakültesi Dergisi</journal-title>
            </journal-title-group>
                            <issn pub-type="ppub">1300-1884</issn>
                                        <issn pub-type="epub">1304-4915</issn>
                                                                                            <publisher>
                    <publisher-name>Gazi Üniversitesi</publisher-name>
                </publisher>
                    </journal-meta>
                <article-meta>
                                        <article-id pub-id-type="doi">10.17341/gazimmfd.1708157</article-id>
                                                                <article-categories>
                                            <subj-group  xml:lang="en">
                                                            <subject>Deep Learning</subject>
                                                    </subj-group>
                                            <subj-group  xml:lang="tr">
                                                            <subject>Derin Öğrenme</subject>
                                                    </subj-group>
                                    </article-categories>
                                                                                                                                                        <title-group>
                                                                                                                        <article-title>Karaciğer tümör segmentasyonu için veri füzyonunun ResUNet modeli üzerindeki etkisinin araştırılması</article-title>
                                                                                                                                                                                                <trans-title-group xml:lang="en">
                                    <trans-title>Investigating the impact of data fusion on the ResUNet model for liver tumor segmentation</trans-title>
                                </trans-title-group>
                                                                                                    </title-group>
            
                                                    <contrib-group content-type="authors">
                                                                        <contrib contrib-type="author">
                                                                    <contrib-id contrib-id-type="orcid">
                                        https://orcid.org/0000-0002-7142-8448</contrib-id>
                                                                <name>
                                    <surname>Şeker Ertuğrul</surname>
                                    <given-names>Ümran</given-names>
                                </name>
                                                                    <aff>YOZGAT BOZOK ÜNİVERSİTESİ</aff>
                                                            </contrib>
                                                    <contrib contrib-type="author">
                                                                    <contrib-id contrib-id-type="orcid">
                                        https://orcid.org/0000-0001-8602-4262</contrib-id>
                                                                <name>
                                    <surname>Kodaz</surname>
                                    <given-names>Halife</given-names>
                                </name>
                                                                    <aff>KONYA TEKNİK ÜNİVERSİTESİ</aff>
                                                            </contrib>
                                                    <contrib contrib-type="author">
                                                                    <contrib-id contrib-id-type="orcid">
                                        https://orcid.org/0000-0003-4573-7025</contrib-id>
                                                                <name>
                                    <surname>İnan</surname>
                                    <given-names>Onur</given-names>
                                </name>
                                                                    <aff>SELÇUK ÜNİVERSİTESİ</aff>
                                                            </contrib>
                                                                                </contrib-group>
                        
                                        <pub-date pub-type="pub" iso-8601-date="20260331">
                    <day>03</day>
                    <month>31</month>
                    <year>2026</year>
                </pub-date>
                                        <volume>41</volume>
                                        <issue>1</issue>
                                        <fpage>533</fpage>
                                        <lpage>548</lpage>
                        
                        <history>
                                    <date date-type="received" iso-8601-date="20250528">
                        <day>05</day>
                        <month>28</month>
                        <year>2025</year>
                    </date>
                                                    <date date-type="accepted" iso-8601-date="20260116">
                        <day>01</day>
                        <month>16</month>
                        <year>2026</year>
                    </date>
                            </history>
                                        <permissions>
                    <copyright-statement>Copyright © 1986, Gazi Üniversitesi Mühendislik Mimarlık Fakültesi Dergisi</copyright-statement>
                    <copyright-year>1986</copyright-year>
                    <copyright-holder>Gazi Üniversitesi Mühendislik Mimarlık Fakültesi Dergisi</copyright-holder>
                </permissions>
            
                                                                                                <abstract><p>Tıbbi görüntü segmentasyonu, hastalığın teşhisi veya hastalıklı bölgenin konumlandırılması amacıyla, görüntüdeki renk ve şekil farklılıklarını kullanarak bölgeleri ayırma işlemidir. Bu işlem manuel veya otomatik olarak gerçekleştirilebilir. Günümüzde makine öğrenmesi ve derin öğrenme tekniklerini kullanan otomatik segmentasyon yöntemlerinde, performansı artırmak amacıyla alana özgü modeller geliştirilmekte olup, tıbbi veri setlerinde U-Net tabanlı segmentasyon mimarileri sınırlı ve dengesiz veri ile bile etkili sonuçlar verebilmektedir. Ancak U-Net mimarisinde, derin ağlarda gradyan sönmesi gibi eğitim zorlukları ortaya çıkabilmektedir; bu noktada ResNet mimarisi, daha derin bir yapı sağlayarak derinlik gereksinimlerini karşılamaktadır. Bu mimarilerin birleştirilmesiyle oluşan hibrit ResUNet mimarisi U-Net’in segmentasyon gücünü ResNet’in artık bağlantıları ile birleştirerek, hem derin ağların avantajını kullanmakta hem de eğitim sürecindeki zorlukları hafifletmektedir. Bu çalışmada, otomatik karaciğer tümör segmentasyonu amacıyla, Temel Bileşenler Analizi (PCA) ve Ayrık Dalgacık Dönüşümü (DWT) ile kanal bazlı birleştirilen veriler üzerinde hibrit ResUNet modeli uygulanmıştır. Her kanalın özgün ve ayırt edici örüntülerini koruyarak özellik temsillerini zenginleştirmek amacıyla kanal bazlı veri füzyonu kullanılmıştır. PCA ve DWT tabanlı her iki füzyon yöntemi de, verileri farklı uzaylara taşıyarak modelin görüntüdeki farklı yapıları ayırt etme kapasitesini güçlendirmiştir. Sonuçlar, her iki yöntemin de iki farklı veri setinde birbirine yakın dice benzerlik katsayısı değerleri elde ederek karşılaştırılabilir performans sergilediğini ortaya koymaktadır.</p></abstract>
                                                                                                                                    <trans-abstract xml:lang="en">
                            <p>Medical image segmentation is employed to separate regions in images based on color and shape differences for disease diagnosis or localization of pathological areas. It can be performed manually or automatically. Automatic segmentation methods leverage machine learning and deep learning techniques, with domain-specific models developed to enhance performance; U-Net-based architectures can achieve effective results even with limited and imbalanced medical datasets. However, U-Net models may face training challenges such as vanishing gradients in deep networks, which can be addressed by ResNet architectures providing deeper structures. The hybrid ResUNet combines the segmentation capabilities of U-Net with the residual connections of ResNet, thus exploiting the advantages of deep networks while mitigating training difficulties. In this study, for automatic liver tumor segmentation, the hybrid ResUNet was applied to channel-based fused data obtained using Principal Component Analysis (PCA) and Discrete Wavelet Transform (DWT). Channel-based data fusion preserves the unique and distinctive patterns of each channel, enriching feature representations, and both PCA- and DWT-based fusion methods transform the data into different spaces, enhancing the model’s ability to differentiate various structures. The results demonstrate that both methods achieve comparable performance, yielding similar dice similarity coefficient values across two different datasets.</p></trans-abstract>
                                                            
            
                                                            <kwd-group>
                                                    <kwd>Tıbbi görüntü segmentasyonu</kwd>
                                                    <kwd>  Ayrık Dalgacık Dönüşümü ile görüntü füzyonu</kwd>
                                                    <kwd>  Karaciğer tümörü segmentasyonu</kwd>
                                                    <kwd>  Temel Bileşenler Analizi ile görüntü füzyonu</kwd>
                                                    <kwd>  ResUNet modeli</kwd>
                                            </kwd-group>
                                                        
                                                                            <kwd-group xml:lang="en">
                                                    <kwd>Medical image segmentation</kwd>
                                                    <kwd>  Discrete Wavelet Transform image fusion</kwd>
                                                    <kwd>  Liver tumor segmentation</kwd>
                                                    <kwd>  Principal Component Analysis image fusion</kwd>
                                                    <kwd>  ResUNet model</kwd>
                                            </kwd-group>
                                                                                                            </article-meta>
    </front>
    <back>
                            <ref-list>
                                    <ref id="ref1">
                        <label>1</label>
                        <mixed-citation publication-type="journal">1.	Sayiner M., Golabi P., Younossi Z.M., Disease burden of hepatocellular carcinoma: a global perspective, Digestive Diseases and Sciences, 64, 910-917, 2019.</mixed-citation>
                    </ref>
                                    <ref id="ref2">
                        <label>2</label>
                        <mixed-citation publication-type="journal">2.	Li X., Huang Y., Tian J., H-DenseUNet: hybrid densely connected UNet for liver and tumor segmentation from CT volumes, IEEE Transactions on Medical Imaging, 37 (12), 2663-2674, 2018.</mixed-citation>
                    </ref>
                                    <ref id="ref3">
                        <label>3</label>
                        <mixed-citation publication-type="journal">3.	Sefti R., Sbibih D., Jennane R., An automatic B-snake model based on deep learning for medical image segmentation, Expert Systems with Applications, 2025.</mixed-citation>
                    </ref>
                                    <ref id="ref4">
                        <label>4</label>
                        <mixed-citation publication-type="journal">4.	Karakoyun M., Gülcü Ş., Kodaz H., D-MOSG: Discrete multi-objective shuffled gray wolf optimizer for multi-level image thresholding, Engineering Science and Technology, an International Journal, 24 (6), 1455-1466, 2021.</mixed-citation>
                    </ref>
                                    <ref id="ref5">
                        <label>5</label>
                        <mixed-citation publication-type="journal">5.	Mittal M., Garg A., Sofat S., Goyal L.M., Deep learning based enhanced tumor segmentation approach for MR brain images, Applied Soft Computing, 78, 346-354, 2019.</mixed-citation>
                    </ref>
                                    <ref id="ref6">
                        <label>6</label>
                        <mixed-citation publication-type="journal">6.	Fan T., Wang G., Li Y., Wang H., Ma-net: A multi-scale attention network for liver and tumor segmentation, IEEE Access, 8, 179656-179665, 2020.</mixed-citation>
                    </ref>
                                    <ref id="ref7">
                        <label>7</label>
                        <mixed-citation publication-type="journal">7.	Pereira S., Pinto A., Alves V., Silva C.A., Brain tumor segmentation using convolutional neural networks in MRI images, IEEE Transactions on Medical Imaging, 35 (5), 1240-1251, 2016.</mixed-citation>
                    </ref>
                                    <ref id="ref8">
                        <label>8</label>
                        <mixed-citation publication-type="journal">8.	Liu Y., Stojadinovic S., Hrycushko B., Wardak Z., Lau S., Lu W., Gu X., A deep convolutional neural network-based automatic delineation strategy for multiple brain metastases stereotactic radiosurgery, PloS One, 12 (10), e0185844, 2017.</mixed-citation>
                    </ref>
                                    <ref id="ref9">
                        <label>9</label>
                        <mixed-citation publication-type="journal">9.	Shin H.C., Tenenholtz N.A., Rogers J.K., Schwarz C.G., Senjem M.L., Gunter J.L., Michalski M., Medical image synthesis for data augmentation and anonymization using generative adversarial networks, Simulation and Synthesis in Medical Imaging: Third International Workshop, SASHIMI 2018, Held in Conjunction with MICCAI 2018, Granada, Spain, September 16, 2018, Proceedings 3, 1-11, Springer International Publishing, 2018.</mixed-citation>
                    </ref>
                                    <ref id="ref10">
                        <label>10</label>
                        <mixed-citation publication-type="journal">10.	Mok T.C., Chung A.C., Learning data augmentation for brain tumor segmentation with coarse-to-fine generative adversarial networks, BrainLesion: Glioma, Multiple Sclerosis, Stroke and Traumatic Brain Injuries: 4th International Workshop, BrainLes 2018, Held in Conjunction with MICCAI 2018, Granada, Spain, September 16, 2018, Revised Selected Papers, Part I 4, Springer, 2019.</mixed-citation>
                    </ref>
                                    <ref id="ref11">
                        <label>11</label>
                        <mixed-citation publication-type="journal">11.	Dvornik N., Mairal J., Schmid C., On the importance of visual context for data augmentation in scene understanding, IEEE Transactions on Pattern Analysis and Machine Intelligence, 43 (6), 2014-2028, 2019.</mixed-citation>
                    </ref>
                                    <ref id="ref12">
                        <label>12</label>
                        <mixed-citation publication-type="journal">12.	Agarwal M., Mahajan R., Medical images contrast enhancement using quad weighted histogram equalization with adaptive gamma correction and homomorphic filtering, Procedia Computer Science, 115, 509-517, 2017.</mixed-citation>
                    </ref>
                                    <ref id="ref13">
                        <label>13</label>
                        <mixed-citation publication-type="journal">13.	Goodfellow I.J., Pouget-Abadie J., Mirza M., Xu B., Warde-Farley D., Ozair S., Courville A., Bengio Y., Generative adversarial nets, Advances in Neural Information Processing Systems, 27, 2014.</mixed-citation>
                    </ref>
                                    <ref id="ref14">
                        <label>14</label>
                        <mixed-citation publication-type="journal">14.	Nalepa J., Marcinkiewicz M., Kawulok M., Data augmentation for brain-tumor segmentation: a review, Frontiers in Computational Neuroscience, 13, 83, 2019.</mixed-citation>
                    </ref>
                                    <ref id="ref15">
                        <label>15</label>
                        <mixed-citation publication-type="journal">15.	Mouraviev A., Generative adversarial network for MRI super resolution.</mixed-citation>
                    </ref>
                                    <ref id="ref16">
                        <label>16</label>
                        <mixed-citation publication-type="journal">16.	Bi L., Kim J., Kumar A., Feng D., Automatic liver lesion detection using cascaded deep residual networks, arXiv preprint arXiv:1704.02703, 2017.</mixed-citation>
                    </ref>
                                    <ref id="ref17">
                        <label>17</label>
                        <mixed-citation publication-type="journal">17.	Li, Y., Daho, M. E. H., Conze, P. H., Zeghlache, R., Le Boité, H., Tadayoni, R., Quellec, G., A review of deep learning-based information fusion techniques for multimodal medical image classification, Computers in Biology and Medicine, 177, 108635, 2024.</mixed-citation>
                    </ref>
                                    <ref id="ref18">
                        <label>18</label>
                        <mixed-citation publication-type="journal">18.	Aakaaram V., Bachu S., MRI and CT image fusion using synchronized anisotropic diffusion equation with DT-CWT decomposition, 2022 Smart Technologies, Communication and Robotics (STCR), 1-5, 2022.</mixed-citation>
                    </ref>
                                    <ref id="ref19">
                        <label>19</label>
                        <mixed-citation publication-type="journal">19.	Affane A., Kucharski A., Chapuis P., Freydier S., Lebre M.A., Vacavant A., Fabijańska A., Segmentation of liver anatomy by combining 3D U-net approaches, Applied Sciences, 11 (11), 4895, 2021.</mixed-citation>
                    </ref>
                                    <ref id="ref20">
                        <label>20</label>
                        <mixed-citation publication-type="journal">20.	Huang Q., Sun J., Ding H., Wang X., Wang G., Robust liver vessel extraction using 3D U-Net with variant dice loss function, Computers in Biology and Medicine, 101, 153-162, 2018.</mixed-citation>
                    </ref>
                                    <ref id="ref21">
                        <label>21</label>
                        <mixed-citation publication-type="journal">21.	Shen Y., Sheng V.S., Wang L., Duan J., Xi X., Zhang D., Cui Z., Empirical comparisons of deep learning networks on liver segmentation, Computers, Materials &amp; Continua, 62 (3), 2020.</mixed-citation>
                    </ref>
                                    <ref id="ref22">
                        <label>22</label>
                        <mixed-citation publication-type="journal">22.	Bilic P., Christ P., Li H.B., Vorontsov E., Ben-Cohen A., Kaissis G., Menze B., The liver tumor segmentation benchmark (LiTS), Medical Image Analysis, 84, 102680, 2023.</mixed-citation>
                    </ref>
                                    <ref id="ref23">
                        <label>23</label>
                        <mixed-citation publication-type="journal">23.	Moghbel M., Mashohor S., Mahmud R., Saripan M.I.B., Review of liver segmentation and computer assisted detection/diagnosis methods in computed tomography, Artificial Intelligence Review, 50, 497-537, 2018.</mixed-citation>
                    </ref>
                                    <ref id="ref24">
                        <label>24</label>
                        <mixed-citation publication-type="journal">24.	Çavga S.H., Performance of neural networks and heuristic models for disease prediction from liver enzymes: Application to biochemistry device output, Journal of the Faculty of Engineering and Architecture of Gazi University, 39 (4), 2263–2270, 2024.</mixed-citation>
                    </ref>
                                    <ref id="ref25">
                        <label>25</label>
                        <mixed-citation publication-type="journal">25.	Grover P., 3D Liver segmentation, Kaggle Dataset, Available from: https://www.kaggle.com/datasets/prathamgrover/3d-liver-segmentation/data, Accessed on 09.01.2025.</mixed-citation>
                    </ref>
                                    <ref id="ref26">
                        <label>26</label>
                        <mixed-citation publication-type="journal">26.	Tahir J., LiTS17, Kaggle Dataset, Available from: https://www.kaggle.com/datasets/javariatahir/litstrain-val, Accessed on 09.01.2025.</mixed-citation>
                    </ref>
                                    <ref id="ref27">
                        <label>27</label>
                        <mixed-citation publication-type="journal">27.	Aymaz S., A new hybrid approach for multi-focus image fusion using CNN and SVM methods, Journal of the Faculty of Engineering and Architecture of Gazi University, 39 (2), 1123–1136, 2024.</mixed-citation>
                    </ref>
                                    <ref id="ref28">
                        <label>28</label>
                        <mixed-citation publication-type="journal">28.	Salau A.O., Jain S., Eneh J.N., A review of various image fusion types and transform, Indonesian Journal of Electrical Engineering and Computer Science, 24 (3), 1515-1522, 2021.</mixed-citation>
                    </ref>
                                    <ref id="ref29">
                        <label>29</label>
                        <mixed-citation publication-type="journal">29.	Zou Y., Chen S., Che C., Zhang J., Zhang Q., Breast cancer histopathology image classification based on dual-stream high-order network, Biomedical Signal Processing and Control, 78, 104007, 2022.</mixed-citation>
                    </ref>
                                    <ref id="ref30">
                        <label>30</label>
                        <mixed-citation publication-type="journal">30.	Ünel F.B., Yalpir S., Reduction of mass appraisal criteria with principal component analysis and integration to GIS, International Journal of Engineering and Geosciences, 4 (3), 94-105, 2019.</mixed-citation>
                    </ref>
                                    <ref id="ref31">
                        <label>31</label>
                        <mixed-citation publication-type="journal">31.	Abdulkareem M.B., Design and development of multimodal medical image fusion using discrete wavelet transform, 2nd International Conference on Inventive Communication and Computational Technologies (ICICCT), IEEE, 2018.</mixed-citation>
                    </ref>
                                    <ref id="ref32">
                        <label>32</label>
                        <mixed-citation publication-type="journal">32.	Jaeger P.F., Kohl S.A., Bickelhaupt S., Isensee F., Kuder T.A., Schlemmer H.P., Maier-Hein K.H., Retina U-Net: Embarrassingly simple exploitation of segmentation supervision for medical object detection, Machine Learning for Health Workshop, PMLR, 171-183, Nisan 2020.</mixed-citation>
                    </ref>
                                    <ref id="ref33">
                        <label>33</label>
                        <mixed-citation publication-type="journal">33.	Ghosh S., Chaki A., Santosh K., Improved U-Net architecture with VGG-16 for brain tumor segmentation, Physical and Engineering Sciences in Medicine, 44 (3), 703-712, 2021.</mixed-citation>
                    </ref>
                                    <ref id="ref34">
                        <label>34</label>
                        <mixed-citation publication-type="journal">34.	Jin Q., Meng Z., Pham T.D., Chen Q., Wei L., Su R., DUNet: A deformable network for retinal vessel segmentation, Knowledge-Based Systems, 178, 149-162, 2019.</mixed-citation>
                    </ref>
                                    <ref id="ref35">
                        <label>35</label>
                        <mixed-citation publication-type="journal">35.	Jin Q., Meng Z., Sun C., Cui H., Su R., RA-UNet: A hybrid deep attention-aware network to extract liver and tumor in CT scans, Frontiers in Bioengineering and Biotechnology, 8, 605132, 2020.</mixed-citation>
                    </ref>
                                    <ref id="ref36">
                        <label>36</label>
                        <mixed-citation publication-type="journal">36.	Weng Y., Zhou T., Li Y., Qiu X., Nas-unet: Neural architecture search for medical image segmentation, IEEE Access, 7, 44247-44257, 2019.</mixed-citation>
                    </ref>
                                    <ref id="ref37">
                        <label>37</label>
                        <mixed-citation publication-type="journal">37.	Azad R., Asadi-Aghbolaghi M., Fathy M., Escalera S., Bi-directional ConvLSTM U-Net with densely connected convolutions, IEEE/CVF International Conference on Computer Vision Workshops, 0-0, 2019.</mixed-citation>
                    </ref>
                                    <ref id="ref38">
                        <label>38</label>
                        <mixed-citation publication-type="journal">38.	Ding Y., Chen F., Zhao Y., Wu Z., Zhang C., Wu D., A stacked multi-connection simple reducing net for brain tumor segmentation, IEEE Access, 7, 104011-104024, 2019.</mixed-citation>
                    </ref>
                                    <ref id="ref39">
                        <label>39</label>
                        <mixed-citation publication-type="journal">39.	Siddique N., Paheding S., Elkin C.P., Devabhaktuni V., U-net and its variants for medical image segmentation: A review of theory and applications, IEEE Access, 9, 82031-82057, 2021.</mixed-citation>
                    </ref>
                                    <ref id="ref40">
                        <label>40</label>
                        <mixed-citation publication-type="journal">40.	Wightman R., Touvron H., Jégou H., Resnet strikes back: An improved training procedure in timm, arXiv preprint arXiv:2110.00476, 2021.</mixed-citation>
                    </ref>
                                    <ref id="ref41">
                        <label>41</label>
                        <mixed-citation publication-type="journal">41.	Zhang C., Benz P., Argaw D.M., Lee S., Kim J., Rameau F., Kweon I.S., Resnet or densenet? Introducing dense shortcuts to resnet, Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision, 3550-3559, 2021.</mixed-citation>
                    </ref>
                                    <ref id="ref42">
                        <label>42</label>
                        <mixed-citation publication-type="journal">42.	Roy S.K., Manna S., Song T., Bruzzone L., Attention-based adaptive spectral–spatial kernel ResNet for hyperspectral image classification, IEEE Transactions on Geoscience and Remote Sensing, 59 (9), 7831-7843, 2020.</mixed-citation>
                    </ref>
                                    <ref id="ref43">
                        <label>43</label>
                        <mixed-citation publication-type="journal">43.	Yu H., Sun H., Tao J., Qin C., Xiao D., Jin Y., Liu C., A multi-stage data augmentation and AD-ResNet-based method for EPB utilization factor prediction, Automation in Construction, 147, 104734, 2023.</mixed-citation>
                    </ref>
                                    <ref id="ref44">
                        <label>44</label>
                        <mixed-citation publication-type="journal">44.	Keles A., Keles M.B., Keles A., COV19-CNNet and COV19-ResNet: diagnostic inference engines for early detection of COVID-19, Cognitive Computation, 2021, 1-11.</mixed-citation>
                    </ref>
                                    <ref id="ref45">
                        <label>45</label>
                        <mixed-citation publication-type="journal">45.	McNeely-White D., Beveridge J.R., Draper B.A., Inception and ResNet features are (almost) equivalent, Cognitive Systems Research, 59, 312-318, 2020.</mixed-citation>
                    </ref>
                                    <ref id="ref46">
                        <label>46</label>
                        <mixed-citation publication-type="journal">46.	Zhang K., Tang B., Deng L., Liu X., A hybrid attention improved ResNet based fault diagnosis method of wind turbines gearbox, Measurement, 179, 109491, 2021.</mixed-citation>
                    </ref>
                                    <ref id="ref47">
                        <label>47</label>
                        <mixed-citation publication-type="journal">47.	Nguyen G.N., Le Viet N.H., Elhoseny M., Shankar K., Gupta B.B., Abd El-Latif A.A., Secure blockchain enabled Cyber–physical systems in healthcare using deep belief network with ResNet model, Journal of Parallel and Distributed Computing, 153, 150-160, 2021.</mixed-citation>
                    </ref>
                                    <ref id="ref48">
                        <label>48</label>
                        <mixed-citation publication-type="journal">48.	Lu Y., Qin X., Fan H., Lai T., Li Z., WBC-Net: A white blood cell segmentation network based on UNet++ and ResNet, Applied Soft Computing, 101, 107006, 2021.</mixed-citation>
                    </ref>
                                    <ref id="ref49">
                        <label>49</label>
                        <mixed-citation publication-type="journal">49.	Zhang K., Tang B., Deng L., Tan Q., Yu H., A fault diagnosis method for wind turbines gearbox based on adaptive loss weighted meta-ResNet under noisy labels, Mechanical Systems and Signal Processing, 161, 107963, 2021.</mixed-citation>
                    </ref>
                                    <ref id="ref50">
                        <label>50</label>
                        <mixed-citation publication-type="journal">50.	Mandal B., Okeukwu A., Theis Y., Masked face recognition using ResNet-50, arXiv preprint arXiv:2104.08997, 2021.</mixed-citation>
                    </ref>
                                    <ref id="ref51">
                        <label>51</label>
                        <mixed-citation publication-type="journal">51.	Gao M., Qi D., Mu H., Chen J., A transfer residual neural network based on ResNet-34 for detection of wood knot defects, Forests, 12(2), 212, 2021.</mixed-citation>
                    </ref>
                                    <ref id="ref52">
                        <label>52</label>
                        <mixed-citation publication-type="journal">52.	Ma L., Shuai R., Ran X., Liu W., Ye C., Combining DC-GAN with ResNet for blood cell image classification, Medical &amp; Biological Engineering &amp; Computing, 58, 1251-1264, 2020.</mixed-citation>
                    </ref>
                                    <ref id="ref53">
                        <label>53</label>
                        <mixed-citation publication-type="journal">53.	Chen X., Yao L., Zhang Y., Residual attention U-Net for automated multi-class segmentation of COVID-19 chest CT images, arXiv preprint arXiv:2004.05645, 2020.</mixed-citation>
                    </ref>
                                    <ref id="ref54">
                        <label>54</label>
                        <mixed-citation publication-type="journal">54.	Lu Z., Bai Y., Chen Y., Su C., Lu S., Zhan T., Wang S., The classification of gliomas based on a pyramid dilated convolution ResNet model, Pattern Recognition Letters, 133, 173-179, 2020.</mixed-citation>
                    </ref>
                                    <ref id="ref55">
                        <label>55</label>
                        <mixed-citation publication-type="journal">55.	Vuola A.O., Akram S.U., Kannala J., Mask-RCNN and U-net ensembled for nuclei segmentation, 2019 IEEE 16th International Symposium on Biomedical Imaging (ISBI 2019), IEEE, 2019.</mixed-citation>
                    </ref>
                                    <ref id="ref56">
                        <label>56</label>
                        <mixed-citation publication-type="journal">56.	Kerfoot E., Clough J., Oksuz I., Lee J., King A.P., Schnabel J.A., Left-ventricle quantification using residual U-Net, Statistical Atlases and Computational Models of the Heart. Atrial Segmentation and LV Quantification Challenges: 9th International Workshop, STACOM 2018, Held in Conjunction with MICCAI 2018, Granada, Spain, September 16, 2018, Revised Selected Papers 9, Springer International Publishing, 371-380, 2019.</mixed-citation>
                    </ref>
                                    <ref id="ref57">
                        <label>57</label>
                        <mixed-citation publication-type="journal">57.	Li D., Dharmawan D.A., Ng B.P., Rahardja S., Residual U-Net for retinal vessel segmentation, 2019 IEEE International Conference on Image Processing (ICIP), 1425-1429, 2019.</mixed-citation>
                    </ref>
                                    <ref id="ref58">
                        <label>58</label>
                        <mixed-citation publication-type="journal">58.	Zhang Z., Liu Q., Wang Y., Road extraction by deep residual U-Net, IEEE Geoscience and Remote Sensing Letters, 15 (5), 749-753, 2018.</mixed-citation>
                    </ref>
                                    <ref id="ref59">
                        <label>59</label>
                        <mixed-citation publication-type="journal">59.	Sharma S., Sharma S., Athaiya A., Activation functions in neural networks, Towards Data Sci, 6 (12), 310-316, 2017.</mixed-citation>
                    </ref>
                                    <ref id="ref60">
                        <label>60</label>
                        <mixed-citation publication-type="journal">60.	Gustineli M., A survey on recently proposed activation functions for deep learning, arXiv preprint arXiv:2204.02921, 2022.</mixed-citation>
                    </ref>
                                    <ref id="ref61">
                        <label>61</label>
                        <mixed-citation publication-type="journal">61.	Makris, A., Kontopoulos, I., Tserpes, K., COVID-19 detection from chest X-Ray images using Deep Learning and Convolutional Neural Networks, in 11th Hellenic Conference on Artificial Intelligence, 2020.</mixed-citation>
                    </ref>
                                    <ref id="ref62">
                        <label>62</label>
                        <mixed-citation publication-type="journal">62.	Sankaran, K. S., Thangapandian, M., Vasudevan, N., Brain tumor grade identification using deep Elman neural network with adaptive fuzzy clustering-based segmentation approach, Multimedia Tools and Applications, 80(16), 25139-25169, 2021.</mixed-citation>
                    </ref>
                                    <ref id="ref63">
                        <label>63</label>
                        <mixed-citation publication-type="journal">63.	Rahman, Z., Hussain, A., Shah, H., Arshad, M., Urdu news clustering using K-Mean algorithm on the basis of Jaccard coefficient and Dice coefficient similarity, 2022.</mixed-citation>
                    </ref>
                                    <ref id="ref64">
                        <label>64</label>
                        <mixed-citation publication-type="journal">64.	Wang, L., Wang, C., Sun, Z., Chen, S., An improved dice loss for pneumothorax segmentation by mining the information of negative areas, IEEE Access, 8, 167939-167949, 2020.</mixed-citation>
                    </ref>
                                    <ref id="ref65">
                        <label>65</label>
                        <mixed-citation publication-type="journal">65.	Lei, T., Wang, R., Zhang, Y., Wan, Y., Liu, C., Nandi, A. K., DefED-Net: Deformable encoder-decoder network for liver and liver tumor segmentation, IEEE Transactions on Radiation and Plasma Medical Sciences, 6 (1), 68-78, 2021.</mixed-citation>
                    </ref>
                                    <ref id="ref66">
                        <label>66</label>
                        <mixed-citation publication-type="journal">66.	Zhang, C., Hua, Q., Chu, Y., Wang, P., Liver tumor segmentation using 2.5D UV-Net with multi-scale convolution, Computers in Biology and Medicine, 133, 104424, 2021.</mixed-citation>
                    </ref>
                                    <ref id="ref67">
                        <label>67</label>
                        <mixed-citation publication-type="journal">67.	Chen, X., Zhang, R., Yan, P., Feature fusion encoder decoder network for automatic liver lesion segmentation, in 2019 IEEE 16th International Symposium on Biomedical Imaging (ISBI 2019), 2019, IEEE.</mixed-citation>
                    </ref>
                                    <ref id="ref68">
                        <label>68</label>
                        <mixed-citation publication-type="journal">68.	Han, X., Automatic liver lesion segmentation using a deep convolutional neural network method, arXiv preprint arXiv:1704.07239, 2017.</mixed-citation>
                    </ref>
                                    <ref id="ref69">
                        <label>69</label>
                        <mixed-citation publication-type="journal">69.	Alirr, O.I., Deep learning and level set approach for liver and tumor segmentation from CT scans. Journal of Applied Clinical Medical Physics, 2020, 21 (10), 200-209.</mixed-citation>
                    </ref>
                                    <ref id="ref70">
                        <label>70</label>
                        <mixed-citation publication-type="journal">70.	Tummala, B.M. and S.S. Barpanda, Liver tumor segmentation from computed tomography images using multiscale residual dilated encoder‐decoder network. International Journal of Imaging Systems and Technology, 32 (2), 600-613, 2022.</mixed-citation>
                    </ref>
                                    <ref id="ref71">
                        <label>71</label>
                        <mixed-citation publication-type="journal">71.	Sks-zod, Resized Liver Tumor, Kaggle Dataset, Available from: https://www.kaggle.com/datasets/skszod/resized-liver-tumor, Accessed on 10.12.2024.</mixed-citation>
                    </ref>
                                    <ref id="ref72">
                        <label>72</label>
                        <mixed-citation publication-type="journal">72.	Şeker Ertuğrul Ü., Kodaz H., Discrete Wavelet Transform-Based Data Fusion with ResUNet Model for Liver Tumor Segmentation, Electronics, 14 (13), 2589, 2025.</mixed-citation>
                    </ref>
                            </ref-list>
                    </back>
    </article>
