<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.4 20241031//EN"
        "https://jats.nlm.nih.gov/publishing/1.4/JATS-journalpublishing1-4.dtd">
<article  article-type="research-article"        dtd-version="1.4">
            <front>

                <journal-meta>
                                    <journal-id></journal-id>
            <journal-title-group>
                                                                                    <journal-title>Çukurova Üniversitesi Sosyal Bilimler Enstitüsü Dergisi</journal-title>
            </journal-title-group>
                                        <issn pub-type="epub">1304-8899</issn>
                                                                                            <publisher>
                    <publisher-name>Çukurova Üniversitesi</publisher-name>
                </publisher>
                    </journal-meta>
                <article-meta>
                                        <article-id pub-id-type="doi">10.35379/cusosbil.1685334</article-id>
                                                                <article-categories>
                                            <subj-group  xml:lang="en">
                                                            <subject>History of Philosophy (Other)</subject>
                                                            <subject>Sociology (Other)</subject>
                                                    </subj-group>
                                            <subj-group  xml:lang="tr">
                                                            <subject>Felsefe Tarihi (Diğer)</subject>
                                                            <subject>Sosyoloji (Diğer)</subject>
                                                    </subj-group>
                                    </article-categories>
                                                                                                                                                        <title-group>
                                                                                                                                                                                                <trans-title-group xml:lang="tr">
                                    <trans-title>YAPAY ZEKA VE ETİK: KÜRESEL BİR BAKIŞ</trans-title>
                                </trans-title-group>
                                                                                                                                                                                                <article-title>ARTIFICIAL INTELLIGENCE AND ETHICS: A GLOBAL PERSPECTIVE</article-title>
                                                                                                    </title-group>
            
                                                    <contrib-group content-type="authors">
                                                                        <contrib contrib-type="author">
                                                                    <contrib-id contrib-id-type="orcid">
                                        https://orcid.org/0000-0001-6629-6543</contrib-id>
                                                                <name>
                                    <surname>Güzelergene</surname>
                                    <given-names>Elif Simge</given-names>
                                </name>
                                                                    <aff>PAMUKKALE ÜNİVERSİTESİ, EĞİTİM FAKÜLTESİ</aff>
                                                            </contrib>
                                                    <contrib contrib-type="author">
                                                                    <contrib-id contrib-id-type="orcid">
                                        https://orcid.org/0000-0003-1614-1214</contrib-id>
                                                                <name>
                                    <surname>Cinar</surname>
                                    <given-names>Deniz Baransel</given-names>
                                </name>
                                                                    <aff>PAMUKKALE ÜNİVERSİTESİ</aff>
                                                            </contrib>
                                                    <contrib contrib-type="author">
                                                                    <contrib-id contrib-id-type="orcid">
                                        https://orcid.org/0000-0002-9313-4942</contrib-id>
                                                                <name>
                                    <surname>Nayır</surname>
                                    <given-names>Funda</given-names>
                                </name>
                                                                    <aff>AĞRI İBRAHİM ÇEÇEN ÜNİVERSİTESİ</aff>
                                                            </contrib>
                                                                                </contrib-group>
                        
                                        <pub-date pub-type="pub" iso-8601-date="20260428">
                    <day>04</day>
                    <month>28</month>
                    <year>2026</year>
                </pub-date>
                                        <volume>35</volume>
                                                            
                        <history>
                                    <date date-type="received" iso-8601-date="20250428">
                        <day>04</day>
                        <month>28</month>
                        <year>2025</year>
                    </date>
                                                    <date date-type="accepted" iso-8601-date="20260105">
                        <day>01</day>
                        <month>05</month>
                        <year>2026</year>
                    </date>
                            </history>
                                        <permissions>
                    <copyright-statement>Copyright © 2013, Çukurova Üniversitesi Sosyal Bilimler Enstitüsü Dergisi</copyright-statement>
                    <copyright-year>2013</copyright-year>
                    <copyright-holder>Çukurova Üniversitesi Sosyal Bilimler Enstitüsü Dergisi</copyright-holder>
                </permissions>
            
                                                                                                                                                <trans-abstract xml:lang="tr">
                            <p>Yapay zekâ teknolojilerinin hızla gelişmesi ve yaygınlaşması, bu teknolojilerin etik ilkelerle uyumlu biçimde geliştirilmesini zorunlu kılmaktadır. Bu çalışma, UNESCO, Avrupa Komisyonu, OECD, Avrupa Parlamentosu ve Avrupa Konseyi tarafından yayımlanan beş büyük uluslararası yapay zekâ etiği raporunu karşılaştırmalı olarak incelemektedir. Doküman analizi ve tematik kodlama yöntemiyle yürütülen araştırmada, şeffaflık, sorumluluk ve güvenilirlik ilkelerinin tüm raporlarda ortak biçimde vurgulandığı tespit edilmiştir. Bulgular, kuruluşların ortak etik değerlerde uzlaşırken, bu ilkelerin uygulanmasında önemli farklılıklar gösterdiğini ortaya koymaktadır. UNESCO eğitim ve kapasite geliştirmeyle kültürel uyarlama; Avrupa Komisyonu ve Parlamentosu bağlayıcı yasal düzenlemeler; OECD esnek işbirliği; Avrupa Konseyi ise insan hakları hukuku yoluyla etik uygulamayı hayata geçirmektedir. Bu farklılaşma, çalışmanın temel kavramı olan “etik uygulama boşluğunu”, başka bir ifade ile “yaygın kabul gören ilkeler ile gerçek uygulamalar arasındaki açığı” doğurmaktadır. Çalışma, yapay zekâ etiğindeki güncel zorlukların paylaşılan ilkelerin eksikliğinden değil, bu ilkelerin hayata geçirilmesine yönelik uyumsuz mekanizmalardan kaynaklandığını göstermektedir. Bu uygulama boşluğunu analitik olarak açıklayarak, araştırma yapay zekâ etiği literatürünü ilke belirlemenin ötesine taşımakta ve politika yapıcılar ile uygulayıcılar için eylem odaklı öneriler sunmaktadır.</p></trans-abstract>
                                                                                                                                    <abstract><p>The rapid development and widespread adoption of artificial intelligence technologies necessitate their alignment with ethical principles throughout design and deployment processes. This study comparatively examines five major international reports on AI ethics published by UNESCO, the European Commission, the OECD, the European Parliament, and the Council of Europe. Employing document analysis and thematic coding methods, the research identifies transparency, responsibility, and trustworthiness as core ethical principles commonly emphasized across these frameworks. However, the findings reveal a fundamental pattern: while organizations demonstrate normative convergence around shared ethical values, they exhibit operational divergence in how these principles are framed, justified, and institutionalized within distinct governance contexts. This produces an ethical implementation gap, the distance between widely endorsed norms and their practical realization in policy frameworks. UNESCO pursues culturally adaptive norms through education and capacity building; the European Commission and European Parliament enforce compliance through binding regulations; OECD coordinates flexible implementation across member states; and the Council of Europe anchors AI ethics within human rights law. These divergent governance logics reflect deeper philosophical disagreements about whether ethical AI should be realized through voluntary norms, technical standards, binding legislation, or rights-based frameworks. The study demonstrates that contemporary challenges in AI ethics stem not from a lack of shared principles but from incompatible mechanisms for their operationalization. By analytically unpacking this implementation gap, the research advances AI ethics scholarship beyond principle identification and provides actionable insights for policymakers and practitioners.</p></abstract>
                                                            
            
                                                                                                                                                <kwd-group>
                                                    <kwd>Artificial intelligence ethics</kwd>
                                                    <kwd>  Ethical implementation gap</kwd>
                                                    <kwd>  Governance logics</kwd>
                                                    <kwd>  International policy frameworks</kwd>
                                                    <kwd>  Transparency</kwd>
                                                    <kwd>  Accountability</kwd>
                                                    <kwd>  Trustworthiness</kwd>
                                            </kwd-group>
                            
                                                                                                        <kwd-group xml:lang="tr">
                                                    <kwd>Yapay Zeka Etiği</kwd>
                                                    <kwd>  Etik uygulama boşluğu</kwd>
                                                    <kwd>  Yönetişim yaklaşımları</kwd>
                                                    <kwd>  Uluslararası politika çerçeveleri</kwd>
                                                    <kwd>  Şeffaflık</kwd>
                                                    <kwd>  Hesap verebilirlik</kwd>
                                                    <kwd>  Güvenilirlik</kwd>
                                            </kwd-group>
                                                                                                                                        </article-meta>
    </front>
    <back>
                            <ref-list>
                                    <ref id="ref1">
                        <label>1</label>
                        <mixed-citation publication-type="journal">Acemoglu, D., &amp; Restrepo, P. (2020). Robots and jobs: Evidence from US labor markets.  Journal of Political Economy, 128(6), 2188-2244. https://doi.org/10.1086/705716</mixed-citation>
                    </ref>
                                    <ref id="ref2">
                        <label>2</label>
                        <mixed-citation publication-type="journal">Allen, C., Wallach, W., &amp; Smit, I. (2006). Why machine ethics? IEEE Intelligent Systems, 21(4), 12-17. https://doi.org/10.1109/MIS.2006.83</mixed-citation>
                    </ref>
                                    <ref id="ref3">
                        <label>3</label>
                        <mixed-citation publication-type="journal">Anderson, M., &amp; Anderson, S. L. (2007). Machine ethics: Creating an ethical intelligent agent. AI Magazine, 28(4), 15-26. https://doi.org/10.1609/aimag.v28i4.2065</mixed-citation>
                    </ref>
                                    <ref id="ref4">
                        <label>4</label>
                        <mixed-citation publication-type="journal">Batool, A., Lee, S., Liu, Y., &amp; Dong, L. (2026). The anatomy of AI policies: a systematic comparative analysis of AI policies across the globe. AI and Ethics 6, 55. https://doi.org/10.1007/s43681-025-00886-3</mixed-citation>
                    </ref>
                                    <ref id="ref5">
                        <label>5</label>
                        <mixed-citation publication-type="journal">Batool, A., Zowghi, D. &amp; Bano, M. (2025). AI governance: a systematic literature review. AI and Ethics 5, 3265-3279. https://doi.org/10.1007/s43681-024-00653-w</mixed-citation>
                    </ref>
                                    <ref id="ref6">
                        <label>6</label>
                        <mixed-citation publication-type="journal">Binns, R. (2018). Fairness in machine learning: Lessons from political philosophy. In Proceedings of the Conference on Fairness, Accountability and Transparency (FAT*) (pp. 149–159). PMLR.</mixed-citation>
                    </ref>
                                    <ref id="ref7">
                        <label>7</label>
                        <mixed-citation publication-type="journal">Bowen, G. A. (2009). Document analysis as a qualitative research method. Qualitative Research Journal, 9(2), 27-40. https://doi.org/10.3316/QRJ0902027</mixed-citation>
                    </ref>
                                    <ref id="ref8">
                        <label>8</label>
                        <mixed-citation publication-type="journal">Braun, V., &amp; Clarke, V. (2006). Using thematic analysis in psychology. Qualitative Research in Psychology, 3(2), 77-101. https://doi.org/10.1191/1478088706qp063oa</mixed-citation>
                    </ref>
                                    <ref id="ref9">
                        <label>9</label>
                        <mixed-citation publication-type="journal">Brynjolfsson, E., &amp; McAfee, A. (2014). The second machine age: Work, progress, and prosperity in a time of brilliant technologies. W. W. Norton &amp; Company.</mixed-citation>
                    </ref>
                                    <ref id="ref10">
                        <label>10</label>
                        <mixed-citation publication-type="journal">Buolamwini, J., &amp; Gebru, T. (2018). Gender shades: Intersectional accuracy disparities in commercial gender classification. In Proceedings of the Conference on Fairness, Accountability and Transparency (FAT*) (pp. 77–91). PMLR.</mixed-citation>
                    </ref>
                                    <ref id="ref11">
                        <label>11</label>
                        <mixed-citation publication-type="journal">Cohen, J. (1960). A coefficient of agreement for nominal scales. Educational and Psychological Measurement, 20(1), 37-46. https://doi.org/10.1177/001316446002000104</mixed-citation>
                    </ref>
                                    <ref id="ref12">
                        <label>12</label>
                        <mixed-citation publication-type="journal">Corrêa, N. K., Galvão, C., Santos, J. W., Del Pino, C., Pinto, E. P., Barbosa, C., Massmann, D., Mambrini, R., Galvão, L., Terem, E., &amp; de Oliveira, N. (2023). Worldwide AI ethics: A review of 200 guidelines and recommendations for AI governance. Patterns, 4(10), 100857. https://doi.org/10.1016/j.patter.2023.100857</mixed-citation>
                    </ref>
                                    <ref id="ref13">
                        <label>13</label>
                        <mixed-citation publication-type="journal">Corrêa, N. K., Santos, J. W., Galvão, C., Pasetti, M., Schiavon, D., Naqvi, F., Hossain, R., &amp; de Oliveira, N. (2025). Crossing the principle-practice gap in AI ethics with ethical problem-solving. AI and Ethics 5, 1271-1288. https://doi.org/10.1007/s43681-024-00469-8</mixed-citation>
                    </ref>
                                    <ref id="ref14">
                        <label>14</label>
                        <mixed-citation publication-type="journal">Crawford, K. (2021). Atlas of AI: Power, politics, and the planetary costs of artificial intelligence. Yale University Press. https://doi.org/10.12987/9780300252392</mixed-citation>
                    </ref>
                                    <ref id="ref15">
                        <label>15</label>
                        <mixed-citation publication-type="journal">Diakopoulos, N. (2016). Accountability in algorithmic decision making. Communications of the ACM, 59(2), 56-62. https://doi.org/10.1145/2844110</mixed-citation>
                    </ref>
                                    <ref id="ref16">
                        <label>16</label>
                        <mixed-citation publication-type="journal">Ding, J. (2018). Deciphering China’s AI dream. Future of Humanity Institute, University of Oxford. https://cdn.governance.ai/Deciphering_Chinas_AI-Dream.pdf</mixed-citation>
                    </ref>
                                    <ref id="ref17">
                        <label>17</label>
                        <mixed-citation publication-type="journal">Doshi-Velez, F., &amp; Kim, B. (2017). Towards a rigorous science of interpretable machine learning. arXiv. https://doi.org/10.48550/arXiv.1702.08608</mixed-citation>
                    </ref>
                                    <ref id="ref18">
                        <label>18</label>
                        <mixed-citation publication-type="journal">Esteva, A., Chou, K., Yeung, S., Naik, N., Madani, A., Mottaghi, A., Liu, Y., Topol, E., Dean, J., &amp; Socher, R. (2021). Deep learning-enabled medical computer vision. NPJ Digital Medicine, 4(1), 5. https://doi.org/10.1038/s41746-020-00376-2</mixed-citation>
                    </ref>
                                    <ref id="ref19">
                        <label>19</label>
                        <mixed-citation publication-type="journal">European Commission (2019). Ethics Guidelines for Trustworthy AI. https://digital-strategy.ec.europa.eu/en/library/ethics-guidelines-trustworthy-ai</mixed-citation>
                    </ref>
                                    <ref id="ref20">
                        <label>20</label>
                        <mixed-citation publication-type="journal">European Parliament and Council of the European Union. (2024). Regulation (EU) 2024/1689 of the European Parliament and of the Council of 13 June 2024 laying down harmonised rules on artificial intelligence (Artificial Intelligence Act) and amending Regulations (EC) No 300/2008, (EU) No 167/2013, (EU) No 168/2013, (EU) 2018/858, (EU) 2018/1139 and (EU) 2019/2144 and Directives 2014/90/EU, (EU) 2016/797 and (EU) 2020/1828 (Text with EEA relevance). Official Journal of the European Union. https://eur-lex.europa.eu/eli/reg/2024/1689/oj/eng</mixed-citation>
                    </ref>
                                    <ref id="ref21">
                        <label>21</label>
                        <mixed-citation publication-type="journal">Fox-Skelly, J., Bird, E., &amp; Jenner, N. (2020). The ethics of artificial intelligence: Issues and initiatives. European Parliament, Directorate-General for Parliamentary Research Services. https://data.europa.eu/doi/10.2861/6644</mixed-citation>
                    </ref>
                                    <ref id="ref22">
                        <label>22</label>
                        <mixed-citation publication-type="journal">European Parliamentary Research Service. (2024). AI investment: EU and global indicators.  European Parliament. https://www.europarl.europa.eu/RegData/etudes/ATAG/2024/760392/EPRS_ATA(2024)760392_EN.pdf</mixed-citation>
                    </ref>
                                    <ref id="ref23">
                        <label>23</label>
                        <mixed-citation publication-type="journal">European Parliamentary Research Service. (2023). China-US global rivalry and the EU  
(Briefing 749803). European Parliament. https://www.europarl.europa.eu/RegData/etudes/BRIE/2023/749803/EPRS_BRI(2023)749803_EN.pdf</mixed-citation>
                    </ref>
                                    <ref id="ref24">
                        <label>24</label>
                        <mixed-citation publication-type="journal">Floridi, L. (2014). The fourth revolution: How the infosphere is reshaping human reality. Oxford University Press.</mixed-citation>
                    </ref>
                                    <ref id="ref25">
                        <label>25</label>
                        <mixed-citation publication-type="journal">Floridi, L., &amp; Cowls, J. (2022). A unified framework of five principles for AI in society. In S. Carta (Ed.), Machine 
Learning and the City: Applications in Architecture and Urban Design (pp. 535-545). Wiley. https://doi.org/10.1002/9781119815075.ch45</mixed-citation>
                    </ref>
                                    <ref id="ref26">
                        <label>26</label>
                        <mixed-citation publication-type="journal">Goodfellow, I., Bengio, Y., &amp; Courville, A. (2016). Deep learning. MIT Press.</mixed-citation>
                    </ref>
                                    <ref id="ref27">
                        <label>27</label>
                        <mixed-citation publication-type="journal">Jobin, A., Ienca, M., &amp; Vayena, E. (2019). The global landscape of AI ethics guidelines. Nature Machine Intelligence, 1(9), 389-399. https://doi.org/10.1038/s42256-019-0088-2</mixed-citation>
                    </ref>
                                    <ref id="ref28">
                        <label>28</label>
                        <mixed-citation publication-type="journal">Israel, I. B., Cerdio, J., Ema, A., Friedman, L., Ienca, M., Mantelero, A., &amp; Matania, E. (2020). Towards regulation of AI systems: Global perspectives on the development of a legal framework on artificial intelligence systems based on the Council of Europe’s standards on human rights, democracy and the rule of law. Council of Europe.</mixed-citation>
                    </ref>
                                    <ref id="ref29">
                        <label>29</label>
                        <mixed-citation publication-type="journal">Lipton, Z. C. (2018). The mythos of model interpretability: In machine learning, the concept of interpretability is both important and slippery. Queue, 16(3), 31-57. https://doi.org/10.1145/3236386.3241340</mixed-citation>
                    </ref>
                                    <ref id="ref30">
                        <label>30</label>
                        <mixed-citation publication-type="journal">Luckin, R., Holmes, W., Griffiths, M., &amp; Forcier, L. B. (2016). Intelligence Unleashed: An Argument for AI in Education. Pearson.</mixed-citation>
                    </ref>
                                    <ref id="ref31">
                        <label>31</label>
                        <mixed-citation publication-type="journal">McKinsey Global Institute. (2023). The economic impact of artificial intelligence. McKinsey &amp; Company. https://www.mckinsey.com</mixed-citation>
                    </ref>
                                    <ref id="ref32">
                        <label>32</label>
                        <mixed-citation publication-type="journal">Merriam, S. B., &amp; Tisdell, E. J. (2016). Qualitative research: A guide to design and implementation (4th ed.). Jossey-Bass.</mixed-citation>
                    </ref>
                                    <ref id="ref33">
                        <label>33</label>
                        <mixed-citation publication-type="journal">Milakis, D., Van Arem, B., &amp; Van Wee, B. (2017). Policy and society related implications of automated driving: A review of literature and directions for future research. Journal of Intelligent Transportation Systems, 21(4), 324-348. https://doi.org/10.1080/15472450.2017.1291351</mixed-citation>
                    </ref>
                                    <ref id="ref34">
                        <label>34</label>
                        <mixed-citation publication-type="journal">Mittelstadt, B. D., Allo, P., Taddeo, M., Wachter, S., &amp; Floridi, L. (2016). The ethics of algorithms: Mapping the debate. Big Data &amp; Society, 3(2), 2053951716679679. https://doi.org/10.1177/2053951716679679</mixed-citation>
                    </ref>
                                    <ref id="ref35">
                        <label>35</label>
                        <mixed-citation publication-type="journal">OECD. (2019). Recommendation of the council on artificial intelligence. OECD.</mixed-citation>
                    </ref>
                                    <ref id="ref36">
                        <label>36</label>
                        <mixed-citation publication-type="journal">OECD. (2023). The state of implementation of the OECD AI Principles four years on. https://www.oecd.org/en/publications/the-state-of-implementation-of-the-oecd-ai-principles-four-years-on_835641c9-en.html</mixed-citation>
                    </ref>
                                    <ref id="ref37">
                        <label>37</label>
                        <mixed-citation publication-type="journal">O’Neil, C. (2017). Weapons of math destruction: How big data increases inequality and threatens democracy. Crown.</mixed-citation>
                    </ref>
                                    <ref id="ref38">
                        <label>38</label>
                        <mixed-citation publication-type="journal">Papagiannidis, E., Enholm, I. M., Dremel, C., Mikalef, P., &amp; Krogstie, J. (2023). Toward AI governance: Identifying best practices and potential barriers and outcomes. Information Systems Frontiers, 25(1), 123–141. https://doi.org/10.1007/s10796-022-10251-y</mixed-citation>
                    </ref>
                                    <ref id="ref39">
                        <label>39</label>
                        <mixed-citation publication-type="journal">Papagiannidis, E., Mikalef, P., &amp; Conboy, K. (2025). Responsible artificial intelligence governance: A review and research framework. The Journal of Strategic Information Systems, 34(2), 101885. https://doi.org/10.1016/j.jsis.2024.101885</mixed-citation>
                    </ref>
                                    <ref id="ref40">
                        <label>40</label>
                        <mixed-citation publication-type="journal">PricewaterhouseCoopers (PwC). (2017). Sizing the prize: What’s the real value of AI for your business and how can you capitalise? https://www.pwc.com.au/government/pwc-ai-analysis-sizing-the-prize-report.pdf
Russell, S., &amp; Norvig, P. (2021). Artificial intelligence: A modern approach (4th ed.). Pearson.</mixed-citation>
                    </ref>
                                    <ref id="ref41">
                        <label>41</label>
                        <mixed-citation publication-type="journal">Sharma, S. (2024). Benefits or concerns of AI: A multistakeholder responsibility. Futures, 157, 103328. https://doi.org/10.1016/j.futures.2024.103328</mixed-citation>
                    </ref>
                                    <ref id="ref42">
                        <label>42</label>
                        <mixed-citation publication-type="journal">Siau, K., &amp; Wang, W. (2020). Artificial intelligence (AI) ethics: Ethics of AI and ethical AI. Journal of Database Management, 31(2), 74-87.  https://doi.org/10.4018/JDM.2020040105</mixed-citation>
                    </ref>
                                    <ref id="ref43">
                        <label>43</label>
                        <mixed-citation publication-type="journal">Taddeo, M., &amp; Floridi, L. (2018). How AI can be a force for good. Science, 361(6404), 751-752. https://doi.org/10.1126/science.aat5991</mixed-citation>
                    </ref>
                                    <ref id="ref44">
                        <label>44</label>
                        <mixed-citation publication-type="journal">UNESCO. (2021). Recommendation on the Ethics of Artificial Intelligence.  https://unesdoc.unesco.org/ark:/48223/pf0000381137</mixed-citation>
                    </ref>
                                    <ref id="ref45">
                        <label>45</label>
                        <mixed-citation publication-type="journal">Van Otterlo, M. (2017). From algorithmic black boxes to adaptive white boxes: Declarative decision-theoretic ethical programs as codes of ethics. arXiv. https://doi.org/10.48550/arXiv.1711.06035</mixed-citation>
                    </ref>
                                    <ref id="ref46">
                        <label>46</label>
                        <mixed-citation publication-type="journal">Whittlestone, J., Nyrup, R., Alexandrova, A., &amp; Cave, S. (2019, January). The role and limits of principles in AI ethics: Towards a focus on tensions. In Proceedings of the 2019 AAAI/ACM Conference on AI, Ethics, and Society (pp. 195-200).</mixed-citation>
                    </ref>
                                    <ref id="ref47">
                        <label>47</label>
                        <mixed-citation publication-type="journal">Wieringa, M. (2020). What to account for when accounting for algorithms: A systematic literature review on algorithmic accountability. Proceedings of the 2020 Conference on Fairness, Accountability, and Transparency, 1-18. https://doi.org/10.1145/3351095.3372833</mixed-citation>
                    </ref>
                                    <ref id="ref48">
                        <label>48</label>
                        <mixed-citation publication-type="journal">Xiong, H., Ledwidge, M. T., Fadahunsi, K. P., Lee, H. Y., Wu, J., Morrow, S., Nisar, Y. B., Mbakaya, B., O’Donoghue, J., &amp;</mixed-citation>
                    </ref>
                                    <ref id="ref49">
                        <label>49</label>
                        <mixed-citation publication-type="journal">Gallagher, J. (2025). Global Artificial Intelligence (AI) Governance, Trust, and Ethics for Sustainable Health (GATES): A Protocol for Methodological Framework. VeriXiv, 2, 187. https://doi.org/10.12688/verixiv.1380.1</mixed-citation>
                    </ref>
                                    <ref id="ref50">
                        <label>50</label>
                        <mixed-citation publication-type="journal">Zarsky, T. (2016). The trouble with algorithmic decisions: An analytic road map to examine efficiency and fairness in automated and opaque decision making. Science, Technology, &amp; Human Values, 41(1), 118-132.  https://doi.org/10.1177/01622439156057</mixed-citation>
                    </ref>
                            </ref-list>
                    </back>
    </article>
