<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.4 20241031//EN"
        "https://jats.nlm.nih.gov/publishing/1.4/JATS-journalpublishing1-4.dtd">
<article  article-type="research-article"        dtd-version="1.4">
            <front>

                <journal-meta>
                                    <journal-id></journal-id>
            <journal-title-group>
                                                                                    <journal-title>Dumlupınar Üniversitesi Sosyal Bilimler Dergisi</journal-title>
            </journal-title-group>
                            <issn pub-type="ppub">1302-1842</issn>
                                        <issn pub-type="epub">2587-005X</issn>
                                                                                            <publisher>
                    <publisher-name>Kütahya Dumlupinar University</publisher-name>
                </publisher>
                    </journal-meta>
                <article-meta>
                                        <article-id/>
                                                                <article-categories>
                                            <subj-group  xml:lang="en">
                                                            <subject>Technology Management</subject>
                                                            <subject>Business Administration</subject>
                                                            <subject>Organisation and Management Theory</subject>
                                                            <subject>Organizational Culture</subject>
                                                    </subj-group>
                                            <subj-group  xml:lang="tr">
                                                            <subject>Teknoloji Yönetimi</subject>
                                                            <subject>İşletme </subject>
                                                            <subject>Organizasyon ve Yönetim Teorisi</subject>
                                                            <subject>Örgüt Kültürü</subject>
                                                    </subj-group>
                                    </article-categories>
                                                                                                                                                        <title-group>
                                                                                                                        <trans-title-group xml:lang="tr">
                                    <trans-title>Örgütsel Yapay Zekâ Kararlarının Açıklanabilirlik Yükü ve Hesap Verebilirliği: Blok Zinciri Tabanlı Bir Yönetişim Modeli</trans-title>
                                </trans-title-group>
                                                                                                                                                                                                <article-title>Explainability Burden and Accountability of Organizational AI Decisions: A Blockchain Based Governance Model</article-title>
                                                                                                    </title-group>
            
                                                    <contrib-group content-type="authors">
                                                                        <contrib contrib-type="author">
                                                                    <contrib-id contrib-id-type="orcid">
                                        https://orcid.org/0000-0002-4446-4865</contrib-id>
                                                                <name>
                                    <surname>Yıldırım</surname>
                                    <given-names>Arif</given-names>
                                </name>
                                                                    <aff>ÇANAKKALE ONSEKİZ MART ÜNİVERSİTESİ İLETİŞİM FAKÜLTESİ GAZETECİLİK BÖLÜMÜ BİLİŞİM BİLİM DALI</aff>
                                                            </contrib>
                                                                                </contrib-group>
                        
                                        <pub-date pub-type="pub" iso-8601-date="20260430">
                    <day>04</day>
                    <month>30</month>
                    <year>2026</year>
                </pub-date>
                                                    <issue>89</issue>
                                        <fpage>343</fpage>
                                        <lpage>366</lpage>
                        
                        <history>
                                    <date date-type="received" iso-8601-date="20260201">
                        <day>02</day>
                        <month>01</month>
                        <year>2026</year>
                    </date>
                                                    <date date-type="accepted" iso-8601-date="20260429">
                        <day>04</day>
                        <month>29</month>
                        <year>2026</year>
                    </date>
                            </history>
                                        <permissions>
                    <copyright-statement>Copyright © 1999, Dumlupınar Üniversitesi Sosyal Bilimler Dergisi</copyright-statement>
                    <copyright-year>1999</copyright-year>
                    <copyright-holder>Dumlupınar Üniversitesi Sosyal Bilimler Dergisi</copyright-holder>
                </permissions>
            
                                                                                                <trans-abstract xml:lang="tr">
                            <p>Kurumsal karar alma alanında yapay zekânın (YZ) hızla yaygınlaşması, hesap verebilirlik krizini keskinleştirmiştir. Finans, sağlık ve insan kaynaklarında YZ sistemleri daha yüksek riskli karar çıktıları üzerinde etkili olurken, paydaşlar kararların nasıl verildiğine dair şeffaflık ve hatalar karşısında daha güçlü hesap verebilirlik talep etmektedir. Ancak açıklanabilirliğe ilişkin yaklaşımlar; silo temelli sorumluluk yapıları, zayıf denetim izleri ve yeterince tanımlanmamış bir “açıklanabilirlik yükü” nedeniyle sınırlı kalmaktadır. Bu yük, YZ kararlarına ilişkin açıklamaları üretme, sürdürme, doğrulama ve güvence altına alma emeğini ifade eder. Bu kavramsal-kuramsal makale, blok zincirini değiştirilemez bir yönetişim altyapısı olarak ele alarak bu yükün paydaşlar (geliştiriciler, veri sağlayıcılar, süreç sahipleri ve denetçiler) arasında nasıl dağıtılabileceğine ilişkin bir çerçeve kurar. Algoritmik hesap verebilirlik, kurumsal kuram ve dağıtık yönetişim literatürüne dayanarak; açıklama beklentilerini risk maruziyeti ve kapasiteye göre nicelleştiren ve tahsisleri akıllı sözleşmeler aracılığıyla uygulanabilir kılan biçimsel bir model önerilmektedir. Model, açıklanabilirlik faaliyetini ölçülebilir bir yük olarak görür ve şeffaflık ile meşruiyeti güvence altına almak için tahsis edilmesi gerektiğini savunur. Blok zinciriyle sağlanan denetlenebilirlik, paydaş güveni, düzenleyici uyum ve örgütsel öğrenme arasındaki ilişkiye dair test edilebilir beş önerme geliştirir; ayrıca AB Yapay Zekâ Tüzüğü (EU AI Act), GDPR Madde 22 ve yaklaşan YZ yönetişimi rejimleri için çıkarımlar sunar ve uygulamaya dönük yön vermektedir.</p></trans-abstract>
                                                                                                                                    <abstract><p>The speed with which artificial intelligence has proliferated in organizational decision-making has intensified the accountability crisis. In finance, healthcare, and human resources, AI systems increasingly influence high-risk decision outcomes, with stakeholders demanding transparency concerning how decisions are made and accountability for failures. Yet prevailing understandings of explainability are limited by siloed responsibility structures, weak audit trails, and an under-specified “explainability burden”—the labor associated with the production, maintaining, verifying, and assuring of explanations for decisions made by AI. This conceptual article builds a framework for distributing that burden among key stakeholders (AI developers, data providers, process owners, auditors) using blockchain as an immutable governance infrastructure. Building on research in algorithmic accountability, institutional theory and distributed governance, the article proposes a framework for quantifying explanation expectations as a function of risk exposure and capacity and implementing these allocations through smart contracts as smart contracts on blockchain platforms. The model regards explainability as a measurable burden that must be strategically allocated to ensure transparency and legitimacy. I derive five testable propositions linking blockchain-based auditability to accountability outcomes, stakeholder trust, regulatory compliance and organizational learning, and provide insights for organizations dealing with the EU AI Act, GDPR Article 22 and upcoming AI governance regimes.</p></abstract>
                                                            
            
                                                                                        <kwd-group>
                                                    <kwd>Explainable AI</kwd>
                                                    <kwd>  Blockchain</kwd>
                                                    <kwd>  Algorithmic accountability</kwd>
                                                    <kwd>  Distributed governance</kwd>
                                                    <kwd>  Responsibility allocation</kwd>
                                                    <kwd>  Institutional theory</kwd>
                                            </kwd-group>
                            
                                                <kwd-group xml:lang="tr">
                                                    <kwd>Açıklanabilir yapay zeka</kwd>
                                                    <kwd>  Blok zincir</kwd>
                                                    <kwd>  Algoritmik hesap verebilirlik</kwd>
                                                    <kwd>  Dağıtık yönetişim</kwd>
                                                    <kwd>  Sorumluluk tahsisi</kwd>
                                                    <kwd>  Kurumsal teori</kwd>
                                            </kwd-group>
                                                                                                                                    <funding-group specific-use="FundRef">
                    <award-group>
                                                    <funding-source>
                                <named-content content-type="funder_name">Not applicaple</named-content>
                            </funding-source>
                                                                    </award-group>
                </funding-group>
                                </article-meta>
    </front>
    <back>
                            <ref-list>
                                    <ref id="ref1">
                        <label>1</label>
                        <mixed-citation publication-type="journal">Argote, L., &amp; Miron-Spektor, E. (2011). Organizational learning: From experience to knowledge. Organization Science, 22(5), 1123-1137. https://doi.org/10.1287/orsc.1100.0621</mixed-citation>
                    </ref>
                                    <ref id="ref2">
                        <label>2</label>
                        <mixed-citation publication-type="journal">Asif, R., Hassan, S. R., &amp; Parr, G. (2023). Integrating a blockchain-based governance framework for responsible AI. Future Internet, 15(3), 97, 2-21. https://doi.org/10.3390/fi15030097</mixed-citation>
                    </ref>
                                    <ref id="ref3">
                        <label>3</label>
                        <mixed-citation publication-type="journal">Atzei, N., Bartoletti, M., &amp; Cimoli, T. (2017). A survey of attacks on Ethereum smart contracts (SoK). In M. Maffei &amp; M. Ryan (Eds.), Principles of security and trust. POST 2017 (Lecture Notes in Computer Science, Vol. 10204, pp. 164-186). Springer, Berlin, Heidelberg. https://doi.org/10.1007/978-3-662-54455-6_8</mixed-citation>
                    </ref>
                                    <ref id="ref4">
                        <label>4</label>
                        <mixed-citation publication-type="journal">Banerjee, G., Dhar, S., Roy, S., Syed, R., &amp; Das, A. (2024). Explainability and transparency in designing responsible AI applications in the enterprise. In N. Naik, P. Jenkins, S. Prajapat, &amp; P. Grace (Eds.), Contributions presented at The International Conference on Computing, Communication, Cybersecurity and AI, July 3-4, 2024, London, UK. C3AI 2024 (Lecture Notes in Networks and Systems, Vol. 884, pp. 420-431). Springer, Cham. https://doi.org/10.1007/978-3-031-74443-3_25</mixed-citation>
                    </ref>
                                    <ref id="ref5">
                        <label>5</label>
                        <mixed-citation publication-type="journal">Beck, R., Müller-Bloch, C., &amp; King, J. L. (2018). Governance in the blockchain economy: A framework and research agenda. Journal of the Association for Information Systems, 19(10). https://doi.org/10.17705/1jais.00518</mixed-citation>
                    </ref>
                                    <ref id="ref6">
                        <label>6</label>
                        <mixed-citation publication-type="journal">Bovens, M. (2007). Analysing and assessing accountability: A conceptual framework. European Law Journal, 13, 447-468. https://doi.org/10.1111/j.1468-0386.2007.00378.x</mixed-citation>
                    </ref>
                                    <ref id="ref7">
                        <label>7</label>
                        <mixed-citation publication-type="journal">Buterin, V. (2014). A next-generation smart contract and decentralized application platform. Ethereum White Paper. https://ethereum.org/en/whitepaper/</mixed-citation>
                    </ref>
                                    <ref id="ref8">
                        <label>8</label>
                        <mixed-citation publication-type="journal">Butt, U. A., Amin, R., Aldabbas, H., Mehmood, M., Shaukat, M. W., &amp; Raza, S. M. (2023). Deploying blockchains to simplify AI algorithm auditing. In 2023 IEEE 8th International Conference on Engineering Technologies and Applied Sciences (ICETAS), Bahrain (pp. 1-6). https://doi.org/10.1109/ICETAS59148.2023.10346420</mixed-citation>
                    </ref>
                                    <ref id="ref9">
                        <label>9</label>
                        <mixed-citation publication-type="journal">Christidis, K., &amp; Devetsikiotis, M. (2016). Blockchains and smart contracts for the Internet of Things. IEEE Access, 4, 2292-2303. https://doi.org/10.1109/ACCESS.2016.2566339</mixed-citation>
                    </ref>
                                    <ref id="ref10">
                        <label>10</label>
                        <mixed-citation publication-type="journal">Croman, K., Decker, C., Eyal, I., Gencer, A. E., Juels, A., Kosba, A., Miller, A., Saxena, P., Shi, E., Sirer, E. G., Song, D., &amp; Wattenhofer, R. (2016). On scaling decentralized blockchains. In J. Clark, S. Meiklejohn, P. Ryan, D. Wallach, M. Brenner, &amp; K. Rohloff (Eds.), Financial cryptography and data security. FC 2016 (Lecture Notes in Computer Science, Vol. 9604, pp. 106-125). Springer, Berlin, Heidelberg. https://doi.org/10.1007/978-3-662-53357-4_8</mixed-citation>
                    </ref>
                                    <ref id="ref11">
                        <label>11</label>
                        <mixed-citation publication-type="journal">De Bruijn, H., Warnier, M., &amp; Janssen, M. (2022). The perils and pitfalls of explainable AI: Strategies for explaining algorithmic decision-making. Government Information Quarterly, 39(2), 101666, 1-8. https://doi.org/10.1016/j.giq.2021.101666</mixed-citation>
                    </ref>
                                    <ref id="ref12">
                        <label>12</label>
                        <mixed-citation publication-type="journal">De Filippi, P., &amp; Loveluck, B. (2016). The invisible politics of Bitcoin: Governance crisis of a decentralized infrastructure. Internet Policy Review, 5(3), 1-28. https://doi.org/10.14763/2016.3.427</mixed-citation>
                    </ref>
                                    <ref id="ref13">
                        <label>13</label>
                        <mixed-citation publication-type="journal">Diakopoulos, N. (2016). Accountability in algorithmic decision making. Communications of the ACM, 59(2), 56-62. https://doi.org/10.1145/2844110</mixed-citation>
                    </ref>
                                    <ref id="ref14">
                        <label>14</label>
                        <mixed-citation publication-type="journal">DiMaggio, P. J., &amp; Powell, W. W. (1983). The iron cage revisited: Institutional isomorphism and collective rationality in organizational fields. American Sociological Review, 48(2), 147-160. https://doi.org/10.2307/2095101</mixed-citation>
                    </ref>
                                    <ref id="ref15">
                        <label>15</label>
                        <mixed-citation publication-type="journal">Ehsan, U., &amp; Riedl, M. O. (2020). Human-centered explainable AI: Towards a reflective sociotechnical approach. In C. Stephanidis, M. Kurosu, H. Degen, &amp; L. Reinerman-Jones (Eds.), HCI International 2020—Late breaking papers: Multimodality and intelligence. HCII 2020 (Lecture Notes in Computer Science, Vol. 12424, pp. 449-466). Springer, Cham. https://doi.org/10.1007/978-3-030-60117-1_33</mixed-citation>
                    </ref>
                                    <ref id="ref16">
                        <label>16</label>
                        <mixed-citation publication-type="journal">European Commission. (2021). Proposal for a regulation laying down harmonized rules on artificial intelligence (Artificial Intelligence Act). COM(2021) 206 final. https://eur-lex.europa.eu/legal-content/EN/TXT/?uri=CELEX:52021PC0206</mixed-citation>
                    </ref>
                                    <ref id="ref17">
                        <label>17</label>
                        <mixed-citation publication-type="journal">European Parliament &amp; Council. (2016). Regulation (EU) 2016/679 on the protection of natural persons with regard to the processing of personal data and on the free movement of such data (General Data Protection Regulation). Official Journal of the European Union, L119/1. https://eur-lex.europa.eu/eli/reg/2016/679/oj</mixed-citation>
                    </ref>
                                    <ref id="ref18">
                        <label>18</label>
                        <mixed-citation publication-type="journal">Gebru, T., Morgenstern, J., Vecchione, B., Wortman Vaughan, J., Wallach, H., Daumé III, H., &amp; Crawford, K. (2021). Datasheets for datasets. Communications of the ACM, 64(12), 86-92. https://doi.org/10.1145/3458723</mixed-citation>
                    </ref>
                                    <ref id="ref19">
                        <label>19</label>
                        <mixed-citation publication-type="journal">Gerlings, J. (2025). The relevance of explainable artificial intelligence (xAI) in high-risk decisions. Copenhagen Business School [PhD]. PhD Series No. 35.2025 https://doi.org/10.22439/phd.35.2025</mixed-citation>
                    </ref>
                                    <ref id="ref20">
                        <label>20</label>
                        <mixed-citation publication-type="journal">Jarsania, P., Kumar, S., &amp; Patel, R. (2025). TranspareGov-AI: A multi-stakeholder framework for auditable algorithmic decision-making in business processes. In 2025 IEEE International Conference on Artificial Intelligence for Learning and Optimization (ICoAILO), Bali, Indonesia (pp. 332-337). https://doi.org/10.1109/ICoAILO66760.2025.11156056</mixed-citation>
                    </ref>
                                    <ref id="ref21">
                        <label>21</label>
                        <mixed-citation publication-type="journal">Kroll, J. A., Huey, J., Barocas, S., Felten, E. W., Reidenberg, J. R., Robinson, D. G., &amp; Yu, H. (2017). Accountable algorithms. University of Pennsylvania Law Review, 165, 633-705. https://scholarship.law.upenn.edu/penn_law_review/vol165/iss3/3</mixed-citation>
                    </ref>
                                    <ref id="ref22">
                        <label>22</label>
                        <mixed-citation publication-type="journal">Lage, I., Chen, E., He, J., Narayanan, M., Kim, B., Gershman, S. J., &amp; Doshi-Velez, F. (2019). Human evaluation of models built for interpretability. Proceedings of the AAAI Conference on Human Computation and Crowdsourcing, 7(1), 59-67. https://doi.org/10.1609/hcomp.v7i1.5280</mixed-citation>
                    </ref>
                                    <ref id="ref23">
                        <label>23</label>
                        <mixed-citation publication-type="journal">Lipton, Z. C. (2018). The mythos of model interpretability. Communications of the ACM, 61(10), 36-43. https://doi.org/10.1145/3233231</mixed-citation>
                    </ref>
                                    <ref id="ref24">
                        <label>24</label>
                        <mixed-citation publication-type="journal">Lumineau, F., Wang, W., &amp; Schilke, O. (2021). Blockchain governance—A new way of organizing collaborations? Organization Science, 32(2), 500-521. https://doi.org/10.1287/orsc.2020.1379</mixed-citation>
                    </ref>
                                    <ref id="ref25">
                        <label>25</label>
                        <mixed-citation publication-type="journal">Lundberg, S. M., &amp; Lee, S. I. (2017). A unified approach to interpreting model predictions. Advances in Neural Information Processing Systems, 30, 4765-4774. https://proceedings.neurips.cc/paper/2017/hash/8a20a8621978632d76c43dfd28b67767-Abstract.html</mixed-citation>
                    </ref>
                                    <ref id="ref26">
                        <label>26</label>
                        <mixed-citation publication-type="journal">Miller, T. (2019). Explanation in artificial intelligence: Insights from the social sciences. Artificial Intelligence, 267, 1-38. https://doi.org/10.1016/j.artint.2018.07.007</mixed-citation>
                    </ref>
                                    <ref id="ref27">
                        <label>27</label>
                        <mixed-citation publication-type="journal">Nakamoto, S. (2008). Bitcoin: A peer-to-peer electronic cash system. Bitcoin.org (pp. 1-9). https://bitcoin.org/bitcoin.pdf</mixed-citation>
                    </ref>
                                    <ref id="ref28">
                        <label>28</label>
                        <mixed-citation publication-type="journal">Nissenbaum, H. (1996). Accountability in a computerized society. Science and Engineering Ethics, 2(1), 25-42. https://doi.org/10.1007/BF02639315</mixed-citation>
                    </ref>
                                    <ref id="ref29">
                        <label>29</label>
                        <mixed-citation publication-type="journal">Ostrom, E. (2010). Beyond markets and states: Polycentric governance of complex economic systems. American Economic Review, 100(3), 641-672. https://doi.org/10.1257/aer.100.3.641</mixed-citation>
                    </ref>
                                    <ref id="ref30">
                        <label>30</label>
                        <mixed-citation publication-type="journal">Parlak, B. (2025). Blockchain-assisted explainable decision traces (BAXDT): An approach for transparency and accountability in artificial intelligence systems. Knowledge-Based Systems, 307, 114402, 1-17. https://doi.org/10.1016/j.knosys.2025.114402</mixed-citation>
                    </ref>
                                    <ref id="ref31">
                        <label>31</label>
                        <mixed-citation publication-type="journal">Raji, I. D., Smart, A., White, R. N., Mitchell, M., Gebru, T., Hutchinson, B., &amp; Barnes, P. (2020). Closing the AI accountability gap: Defining an end-to-end framework for internal algorithmic auditing. In Proceedings of the 2020 Conference on Fairness, Accountability, and Transparency (FAT’20). Association for Computing Machinery, New York, NY, USA (pp. 33-44). https://doi.org/10.1145/3351095.3372873</mixed-citation>
                    </ref>
                                    <ref id="ref32">
                        <label>32</label>
                        <mixed-citation publication-type="journal">Ribeiro, M. T., Singh, S., &amp; Guestrin, C. (2016). Why should I trust you?: Explaining the predictions of any classifier. In Proceedings of the 22nd ACM SIGKDD International Conference on Knowledge Discovery and Data Mining (KDD’16). Association for Computing Machinery, New York, NY, USA (pp. 1135-1144). https://doi.org/10.1145/2939672.2939778</mixed-citation>
                    </ref>
                                    <ref id="ref33">
                        <label>33</label>
                        <mixed-citation publication-type="journal">Rudin, C. (2019). Stop explaining black box machine learning models for high stakes decisions and use interpretable models instead. Nature Machine Intelligence, 1, 206-215. https://doi.org/10.1038/s42256-019-0048-x</mixed-citation>
                    </ref>
                                    <ref id="ref34">
                        <label>34</label>
                        <mixed-citation publication-type="journal">Scott, W. R. (2014). Institutions and organizations: Ideas, interests, and identities (4th ed.). SAGE Publications.</mixed-citation>
                    </ref>
                                    <ref id="ref35">
                        <label>35</label>
                        <mixed-citation publication-type="journal">Sculley, D., Holt, G., Golovin, D., Davydov, E., Phillips, T., Ebner, D., Chaudhary, V., Young, M., Crespo, J.-F., &amp; Dennison, D. (2015). Hidden technical debt in machine learning systems. Advances in Neural Information Processing Systems, 28, 2503-2511. Curran Associates, Inc. https://proceedings.neurips.cc/paper/2015/hash/86df7dcfd896fcaf2674f757a2463eba-Abstract.html</mixed-citation>
                    </ref>
                                    <ref id="ref36">
                        <label>36</label>
                        <mixed-citation publication-type="journal">Shokri, R., Stronati, M., Song, C., &amp; Shmatikov, V. (2017). Membership inference attacks against machine learning models. In 2017 IEEE Symposium on Security and Privacy (SP), San Jose, CA, USA (pp. 3-18). https://doi.org/10.1109/SP.2017.41</mixed-citation>
                    </ref>
                                    <ref id="ref37">
                        <label>37</label>
                        <mixed-citation publication-type="journal">Suchman, M. C. (1995). Managing legitimacy: Strategic and institutional approaches. The Academy of Management Review, 20(3), 571-610. https://doi.org/10.2307/258788</mixed-citation>
                    </ref>
                                    <ref id="ref38">
                        <label>38</label>
                        <mixed-citation publication-type="journal">Szabo, N. (1997). Formalizing and securing relationships on public networks. First Monday, 2(9). https://doi.org/10.5210/fm.v2i9.548</mixed-citation>
                    </ref>
                                    <ref id="ref39">
                        <label>39</label>
                        <mixed-citation publication-type="journal">Tramèr, F., Zhang, F., Juels, A., Reiter, M. K., &amp; Ristenpart, T. (2016). Stealing machine learning models via prediction APIs. In 25th USENIX Security Symposium (pp. 601-618). USENIX Association. https://www.usenix.org/conference/usenixsecurity16/technical-sessions/presentation/tramer</mixed-citation>
                    </ref>
                                    <ref id="ref40">
                        <label>40</label>
                        <mixed-citation publication-type="journal">Wachter, S., Mittelstadt, B., &amp; Floridi, L. (2017). Why a right to explanation of automated decision-making does not exist in the general data protection regulation. International Data Privacy Law, 7(2), 76-99. https://doi.org/10.1093/idpl/ipx005</mixed-citation>
                    </ref>
                                    <ref id="ref41">
                        <label>41</label>
                        <mixed-citation publication-type="journal">Wieringa, M. (2020). What to account for when accounting for algorithms: A systematic literature review on algorithmic accountability. In Proceedings of the 2020 Conference on Fairness, Accountability, and Transparency (FAT ’20). Association for Computing Machinery, New York, NY, USA (pp. 1-18). https://doi.org/10.1145/3351095.3372833</mixed-citation>
                    </ref>
                            </ref-list>
                    </back>
    </article>
