<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.4 20241031//EN"
        "https://jats.nlm.nih.gov/publishing/1.4/JATS-journalpublishing1-4.dtd">
<article  article-type="research-article"        dtd-version="1.4">
            <front>

                <journal-meta>
                                    <journal-id></journal-id>
            <journal-title-group>
                                                                                    <journal-title>Balkan Journal of Electrical and Computer Engineering</journal-title>
            </journal-title-group>
                            <issn pub-type="ppub">2147-284X</issn>
                                        <issn pub-type="epub">2147-284X</issn>
                                                                                            <publisher>
                    <publisher-name>MUSA YILMAZ</publisher-name>
                </publisher>
                    </journal-meta>
                <article-meta>
                                        <article-id pub-id-type="doi">10.17694/bajece.1817907</article-id>
                                                                <article-categories>
                                            <subj-group  xml:lang="en">
                                                            <subject>Computer Software</subject>
                                                    </subj-group>
                                            <subj-group  xml:lang="tr">
                                                            <subject>Bilgisayar Yazılımı</subject>
                                                    </subj-group>
                                    </article-categories>
                                                                                                                                                        <title-group>
                                                                                                                        <article-title>Bias Mitigation in Ensemble-Based Meat Freshness Classification Using Grad-CAM</article-title>
                                                                                                                                                                                                <trans-title-group xml:lang="tr">
                                    <trans-title>Grad-CAM Kullanılarak Topluluk Tabanlı Et Tazeliği Sınıflandırmasında Önyargının Azaltılması</trans-title>
                                </trans-title-group>
                                                                                                    </title-group>
            
                                                    <contrib-group content-type="authors">
                                                                        <contrib contrib-type="author">
                                                                    <contrib-id contrib-id-type="orcid">
                                        https://orcid.org/0000-0002-4871-709X</contrib-id>
                                                                <name>
                                    <surname>Külcü</surname>
                                    <given-names>Sercan</given-names>
                                </name>
                                                                    <aff>GİRESUN ÜNİVERSİTESİ</aff>
                                                            </contrib>
                                                    <contrib contrib-type="author">
                                                                    <contrib-id contrib-id-type="orcid">
                                        https://orcid.org/0000-0001-7108-2654</contrib-id>
                                                                <name>
                                    <surname>Balpetek Külcü</surname>
                                    <given-names>Duygu</given-names>
                                </name>
                                                                    <aff>GIRESUN UNIVERSITY, FACULTY OF ENGINEERING</aff>
                                                            </contrib>
                                                                                </contrib-group>
                        
                                        <pub-date pub-type="pub" iso-8601-date="20260328">
                    <day>03</day>
                    <month>28</month>
                    <year>2026</year>
                </pub-date>
                                        <volume>14</volume>
                                                    <fpage>74</fpage>
                                        <lpage>82</lpage>
                        
                        <history>
                                    <date date-type="received" iso-8601-date="20251105">
                        <day>11</day>
                        <month>05</month>
                        <year>2025</year>
                    </date>
                                                    <date date-type="accepted" iso-8601-date="20260310">
                        <day>03</day>
                        <month>10</month>
                        <year>2026</year>
                    </date>
                            </history>
                                        <permissions>
                    <copyright-statement>Copyright © 2013, Balkan Journal of Electrical and Computer Engineering</copyright-statement>
                    <copyright-year>2013</copyright-year>
                    <copyright-holder>Balkan Journal of Electrical and Computer Engineering</copyright-holder>
                </permissions>
            
                                                                                                <abstract><p>Visual biases in deep learning models, such as focusing on packaging trays instead of meat texture, reduce the reliability of computer vision systems in food safety applications. This study proposes a Grad-CAM-guided bias mitigation framework for multiclass meat freshness classification that combines explainable AI with a lightweight hybrid ensemble design. A MiniCAM attention module is integrated into MobileNetV2 to redirect model focus toward meat-specific visual cues, and its features are fused with complementary embeddings extracted from Xception. The final decision is obtained by combining the predictions of MobileNetV2 with classical classifiers (SVM and XGBoost) using test-time augmentation and grid-optimized weighted ensembling. The proposed framework achieves 99.78% accuracy on the held-out test set and 99.66% ± 0.23 average accuracy under 5-fold cross-validation, while maintaining real-time efficiency (4.3M parameters, 16.5 MB model size, and 825.1 FPS on a single GPU), and effectively suppresses non-informative background elements (e.g., packaging trays) as confirmed by Grad-CAM visualizations. These results demonstrate that integrating explainable bias mitigation with lightweight ensemble learning enables reliable and deployable meat freshness assessment for real-world food safety inspection.</p></abstract>
                                                                                                                                    <trans-abstract xml:lang="tr">
                            <p>Derin öğrenme modellerindeki görsel önyargılar, örneğin et dokusuna odaklanmak yerine ambalaj tepsilerine odaklanmak, gıda güvenliği uygulamalarında güvenilirliği azaltır. Bu çalışmada, hibrit topluluk öğrenmesiyle et tazeliği sınıflandırması için Grad-CAM rehberli bir önyargı azaltma çerçevesi önerilmektedir. Aktivasyon haritası analizi, temel modellerin sistematik olarak bilgilendirici olmayan arka plan unsurlarına dikkat ettiğini ortaya koymuştur. Bunu düzeltmek için MobileNetV2’ye bir mini kanal dikkat modülü (MiniCAM) entegre ettik, özelliklerini Xception gömüleriyle birleştirdik ve test-zamanı artırımı (TTA) ile ızgara-optimizasyonlu ağırlıklı topluluk kullanarak SVM ve XGBoost tahminlerini birleştirdik. Bu yaklaşım, modelin odağını ilgili ipuçlarına (renk, doku ve nem) başarıyla yönlendirmiştir. Bu ipuçları, taze, yarı taze ve bozulmuş sınıfları ayırt etmede kritik öneme sahiptir. Gerçek dünya verilerinden oluşan çok sınıflı bir veri seti üzerinde değerlendirilen önerilen topluluk, daha yüksek doğruluk ve dayanıklılık göstermiştir. Bu çalışma, görsel akıl yürütmeyi alan uzmanlığıyla birleştirir ve Açıklanabilir Yapay Zeka (XAI)’nın yüksek riskli gıda kalite denetiminde kullanılmasını önerir. Böylece, kaynak kısıtlı tedarik zincirlerinde güvenilir dağıtım sağlanabilir.</p></trans-abstract>
                                                            
            
                                                            <kwd-group>
                                                    <kwd>Bias Mitigation</kwd>
                                                    <kwd>  Computer Vision</kwd>
                                                    <kwd>  Ensemble Learning</kwd>
                                                    <kwd>  Food Safety</kwd>
                                                    <kwd>  Grad-CAM</kwd>
                                                    <kwd>  Meat Freshness</kwd>
                                            </kwd-group>
                                                        
                                                                            <kwd-group xml:lang="tr">
                                                    <kwd>Önyargı Azaltma</kwd>
                                                    <kwd>  Bilgisayarla Görme</kwd>
                                                    <kwd>  Topluluk Öğrenmesi</kwd>
                                                    <kwd>  Gıda Güvenliği</kwd>
                                                    <kwd>  Grad-CAM</kwd>
                                                    <kwd>  Et Tazeliği.</kwd>
                                            </kwd-group>
                                                                                                        <funding-group specific-use="FundRef">
                    <award-group>
                                                    <funding-source>
                                <named-content content-type="funder_name">The authors declares that no funding was used in the study.</named-content>
                            </funding-source>
                                                                    </award-group>
                </funding-group>
                                </article-meta>
    </front>
    <back>
                            <ref-list>
                                    <ref id="ref1">
                        <label>1</label>
                        <mixed-citation publication-type="journal">[1]	Karanth, S., Feng, S., Patra, D., &amp; Pradhan, A. K. (2023). Linking microbial contamination to food spoilage and food waste: The role of smart packaging, spoilage risk assessments, and date labeling. Frontiers in Microbiology, 14, 1198124. https://doi.org/10.3389/fmicb.2023.1198124</mixed-citation>
                    </ref>
                                    <ref id="ref2">
                        <label>2</label>
                        <mixed-citation publication-type="journal">[2]	Shi, Y., Wang, X., Borhan, M. S., Young, J., Newman, D., Berg, E., &amp; Sun, X. (2021). A review on meat quality evaluation methods based on non-destructive computer vision and artificial intelligence technologies. Food Science of Animal Resources, 41(4), 563. https://doi.org/10.5851/kosfa.2021.e25</mixed-citation>
                    </ref>
                                    <ref id="ref3">
                        <label>3</label>
                        <mixed-citation publication-type="journal">[3]	Shanawad, V. (2025, November 3). Meat freshness image dataset. Kaggle. https://www.kaggle.com/datasets/vinayakshanawad/meat-freshness-image-dataset</mixed-citation>
                    </ref>
                                    <ref id="ref4">
                        <label>4</label>
                        <mixed-citation publication-type="journal">[4]	Büyükarıkan, B. (2024). ConvColor DL: Concatenated convolutional and handcrafted color features fusion for beef quality identification. Food Chemistry, 460, 140795. https://doi.org/10.1016/j.foodchem.2024.140795</mixed-citation>
                    </ref>
                                    <ref id="ref5">
                        <label>5</label>
                        <mixed-citation publication-type="journal">[5]	Abd Elfattah, M., Ewees, A. A., Darwish, A., &amp; Hassanien, A. E. (2025). Detection and classification of meat freshness using an optimized deep learning method. Food Chemistry, 489, 144783. https://doi.org/10.1016/j.foodchem.2025.144783</mixed-citation>
                    </ref>
                                    <ref id="ref6">
                        <label>6</label>
                        <mixed-citation publication-type="journal">[6]	Hidalgo, M. M., Lima, R. C., De Nadai Fernandes, E. A., Bacchi, M. A., &amp; Sarriés, G. A. (2025). Leveraging pre-trained computer vision models for accurate classification of meat freshness. Food Chemistry, 495, 146430. https://doi.org/10.1016/j.foodchem.2025.146430</mixed-citation>
                    </ref>
                                    <ref id="ref7">
                        <label>7</label>
                        <mixed-citation publication-type="journal">[7]	Elangovan, P., Dhurairajan, V., Nath, M. K., Yogarajah, P., &amp; Condell, J. (2024). A novel approach for meat quality assessment using an ensemble of compact convolutional neural networks. Applied Sciences, 14(14), 5979. https://doi.org/10.3390/app14145979</mixed-citation>
                    </ref>
                                    <ref id="ref8">
                        <label>8</label>
                        <mixed-citation publication-type="journal">[8]	Zhou, C., Pi, J., Chen, X., Wang, D., &amp; Liu, J. (2025). Identification and analysis of pork freshness quality based on improved MobileNetV3. Applied Engineering in Agriculture, 41(1), 57–66. https://doi.org/10.13031/aea.16131</mixed-citation>
                    </ref>
                                    <ref id="ref9">
                        <label>9</label>
                        <mixed-citation publication-type="journal">[9]	Shyamala Devi, M., Arun Pandian, J., Umanandhini, D., Sakineti, A., &amp; Jeyaraj, R. (2024, January). Meat freshness state prediction using a novel fifteen layered deep convolutional neural network. In Proceedings of the International Conference on Data Science and Network Engineering (ICDSNE 2023) (pp. 103–116). Springer. https://doi.org/10.1007/978-981-99-6755-1_9</mixed-citation>
                    </ref>
                                    <ref id="ref10">
                        <label>10</label>
                        <mixed-citation publication-type="journal">[10]	Tanim, S. A., Shrestha, T. E., Tanvir, K., Kabir, M. S., Mridha, M. F. &amp; Haq, M. K. (2024, September). Single-level fusion for enhancing meat quality classification with explainable AI. In Proceedings of the IEEE International Conference on Computing, Applications and Systems (COMPAS) (pp. 1–6). IEEE. https://doi.org/10.1109/COMPAS60761.2024.10796775</mixed-citation>
                    </ref>
                                    <ref id="ref11">
                        <label>11</label>
                        <mixed-citation publication-type="journal">[11]	Ren, X., Wang, Y., Huang, Y., Mustafa, M., Sun, D., Xue, F., Chen, D., Xu, L., &amp; Wu, F. (2023). A CNN-based E-Nose using time series features for food freshness classification. IEEE Sensors Journal, 23(6), 6027–6038. https://doi.org/10.1109/JSEN.2023.3241842</mixed-citation>
                    </ref>
                                    <ref id="ref12">
                        <label>12</label>
                        <mixed-citation publication-type="journal">[12]	Susanti, E., Ariyana, R. Y., Cahyo, E. N., Sutanta, E., &amp; Kumalasanti, R. A. (2023, October). Beef image classification using the Inception V3 transfer learning model. In Proceedings of the IEEE 9th Information Technology International Seminar (ITIS) (pp. 1–6). IEEE. https://doi.org/10.1109/ITIS59651.2023.10420013</mixed-citation>
                    </ref>
                                    <ref id="ref13">
                        <label>13</label>
                        <mixed-citation publication-type="journal">[13]	Hidalgo, M. M., Lima, R. C., De Nadai Fernandes, E. A., Bacchi, M. A., &amp; Sarriés, G. A. (2025). Leveraging pre-trained computer vision models for accurate classification of meat freshness. Food Chemistry, 495, 146430. https://doi.org/10.1016/j.foodchem.2025.146430</mixed-citation>
                    </ref>
                                    <ref id="ref14">
                        <label>14</label>
                        <mixed-citation publication-type="journal">[14]	Sandler, M., Howard, A., Zhu, M., Zhmoginov, A., &amp; Chen, L. (2018, June). MobileNetV2: Inverted residuals and linear bottlenecks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (pp. 4510–4520). IEEE. https://doi.org/10.1109/CVPR.2018.00474</mixed-citation>
                    </ref>
                                    <ref id="ref15">
                        <label>15</label>
                        <mixed-citation publication-type="journal">[15]	He, K., Zhang, X., Ren, S., &amp; Sun, J. (2016, June). Deep residual learning for image recognition. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (pp. 770–778). IEEE. https://doi.org/10.1109/CVPR.2016.90</mixed-citation>
                    </ref>
                                    <ref id="ref16">
                        <label>16</label>
                        <mixed-citation publication-type="journal">[16]	Jocher, G., &amp; Qiu, J. (2024). Ultralytics YOLO11 (Version 11.0.0). GitHub. https://github.com/ultralytics/ultralytics</mixed-citation>
                    </ref>
                                    <ref id="ref17">
                        <label>17</label>
                        <mixed-citation publication-type="journal">[17]	Deng, J., Dong, W., Socher, R., Li, L.-J., Li, K., &amp; Fei-Fei, L. (2009, June). ImageNet: A large-scale hierarchical image database. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (pp. 248–255). IEEE. https://doi.org/10.1109/CVPR.2009.5206848</mixed-citation>
                    </ref>
                                    <ref id="ref18">
                        <label>18</label>
                        <mixed-citation publication-type="journal">[18]	Selvaraju, R. R., Cogswell, M., Das, A., Vedantam, R., Parikh, D., &amp; Batra, D. (2017, October). Grad-CAM: Visual explanations from deep networks via gradient-based localization. In Proceedings of the IEEE International Conference on Computer Vision (ICCV) (pp. 618–626). IEEE. https://doi.org/10.1109/ICCV.2017.74</mixed-citation>
                    </ref>
                                    <ref id="ref19">
                        <label>19</label>
                        <mixed-citation publication-type="journal">[19]	Chollet, F. (2017, July). Xception: Deep learning with depthwise separable convolutions. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (pp. 1251–1258). IEEE. https://doi.org/10.1109/CVPR.2017.195</mixed-citation>
                    </ref>
                            </ref-list>
                    </back>
    </article>
