<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.4 20241031//EN"
        "https://jats.nlm.nih.gov/publishing/1.4/JATS-journalpublishing1-4.dtd">
<article  article-type="research-article"        dtd-version="1.4">
            <front>

                <journal-meta>
                                                                <journal-id>jobig</journal-id>
            <journal-title-group>
                                                                                    <journal-title>Journal of Business Innovation and Governance</journal-title>
            </journal-title-group>
                                        <issn pub-type="epub">2717-9559</issn>
                                                                                            <publisher>
                    <publisher-name>İzmir Katip Çelebi Üniversitesi</publisher-name>
                </publisher>
                    </journal-meta>
                <article-meta>
                                        <article-id pub-id-type="doi">10.54472/jobig.1821679</article-id>
                                                                <article-categories>
                                            <subj-group  xml:lang="en">
                                                            <subject>Policy and Administration (Other)</subject>
                                                    </subj-group>
                                            <subj-group  xml:lang="tr">
                                                            <subject>Politika ve Yönetim (Diğer)</subject>
                                                    </subj-group>
                                    </article-categories>
                                                                                                                                                        <title-group>
                                                                                                                        <trans-title-group xml:lang="tr">
                                    <trans-title>LLM Tabanlı Eğitim Araçlarında Halüsinasyonlar: Güvenilir Öğrenme İçin Riskler ve Çözüm Yaklaşımları</trans-title>
                                </trans-title-group>
                                                                                                                                                                                                <article-title>HALLUCINATION IN LLM-BASED EDUCATIONAL TOOLS: RISKS AND SOLUTIONS FOR RELIABLE LEARNING</article-title>
                                                                                                    </title-group>
            
                                                    <contrib-group content-type="authors">
                                                                        <contrib contrib-type="author">
                                                                    <contrib-id contrib-id-type="orcid">
                                        https://orcid.org/0000-0001-8672-0612</contrib-id>
                                                                <name>
                                    <surname>Peltekova</surname>
                                    <given-names>Elitsa</given-names>
                                </name>
                                                                    <aff>Sofia University St. Kliment Ohridski</aff>
                                                            </contrib>
                                                    <contrib contrib-type="author">
                                                                    <contrib-id contrib-id-type="orcid">
                                        https://orcid.org/0000-0002-4841-2245</contrib-id>
                                                                <name>
                                    <surname>Miteva</surname>
                                    <given-names>Dafinka</given-names>
                                </name>
                                                                    <aff>Sofia University St. Kliment Ohridski</aff>
                                                            </contrib>
                                                    <contrib contrib-type="author">
                                                                    <contrib-id contrib-id-type="orcid">
                                        https://orcid.org/0000-0003-1355-7433</contrib-id>
                                                                <name>
                                    <surname>Patias</surname>
                                    <given-names>Ioannis</given-names>
                                </name>
                                                                    <aff>Sofia University St. Kliment Ohridski</aff>
                                                            </contrib>
                                                                                </contrib-group>
                        
                                        <pub-date pub-type="pub" iso-8601-date="20260308">
                    <day>03</day>
                    <month>08</month>
                    <year>2026</year>
                </pub-date>
                                        <volume>9</volume>
                                        <issue>1</issue>
                                                
                        <history>
                                    <date date-type="received" iso-8601-date="20251111">
                        <day>11</day>
                        <month>11</month>
                        <year>2025</year>
                    </date>
                                                    <date date-type="accepted" iso-8601-date="20260227">
                        <day>02</day>
                        <month>27</month>
                        <year>2026</year>
                    </date>
                            </history>
                                        <permissions>
                    <copyright-statement>Copyright © 2018, Journal of Business Innovation and Governance</copyright-statement>
                    <copyright-year>2018</copyright-year>
                    <copyright-holder>Journal of Business Innovation and Governance</copyright-holder>
                </permissions>
            
                                                                                                <trans-abstract xml:lang="tr">
                            <p>Büyük Dil Modelleri (LLM&#039;ler), kişiselleştirilmiş öğretim, içerik üretimi ve akıllı geri bildirimler sunarak eğitim alanında hızla dönüştürücü araçlar olarak ortaya çıkmıştır. Ancak bu modellerin yaygın biçimde benimsenmesi, sıklıkla kritik bir sınırlamayla engellenmektedir: halüsinasyonlar—gerçekmiş gibi görünen ancak gerçekte hatalı çıktılar. Eğitim bağlamlarında bu tür yanlışlıklar öğrencileri yanıltabilir, güveni zedeleyebilir ve pedagojik bütünlüğü tehlikeye atabilir.Bu makale, LLM tabanlı eğitim araçlarında halüsinasyonları azaltmaya yönelik olarak girdi, model ve çıktı düzeylerinde müdahaleleri kapsayan yapılandırılmış, üç katmanlı bir çerçeve önermektedir.Girdi düzeyinde, kullanıcı komutlarının kalitesi ve netliği artırılarak halüsinasyon riski azaltılır. Komut mühendisliği, müfredata uyumlu materyallerle bağlamsal temellendirme ve girdi doğrulama gibi teknikler, modelin daha kesin ve ilgili sorgular almasını sağlar. Bu katman, belirsizliği azaltmak için kullanıcı niyeti tespitinin ve alana özgü bağlam entegrasyonunun önemini vurgular.Model düzeyinde, LLM&#039;nin içsel akıl yürütmesi ve olgusal temellendirmesinin güçlendirilmesine odaklanılır. Geri çağırma ile zenginleştirilmiş üretim (RAG), seçilmiş eğitim veri kümeleriyle ince ayar ve sembolik kısıtlamalar veya mantıksal katmanlar gibi yöntemlerin etkinliği incelenir.Çıktı düzeyinde, öğrencilere sunulmadan önce içeriklerin olgusal doğruluğunu sağlamak amacıyla üretim sonrası doğrulama stratejileri önerilmektedir. Bunlar arasında otomatik doğrulama modülleri, güven puanlaması, insan denetimi sistemleri ve atıf oluşturma ile gerekçe izleme gibi açıklanabilirlik özellikleri yer alır.Halüsinasyon azaltma stratejilerini bu üç katmanda birleştiren bu çerçeve, geliştiriciler, eğitimciler ve araştırmacılar için LLM’lerin eğitim ortamlarında sorumlu bir biçimde uygulanmasına yönelik kapsamlı bir yol haritası sunmaktadır. Makale, uygulama zorluklarını ve zaman içinde halüsinasyon kalıplarından öğrenebilen uyarlanabilir sistemlerin potansiyelini de tartışarak sonlanmaktadır. Bu katmanlı yaklaşım, yalnızca eğitimde LLM’lerin güvenilirliğini artırmakla kalmaz, aynı zamanda yapay zekâ güvenliği ve güvenilirliğine yönelik daha geniş çabalara da katkı sağlar.</p></trans-abstract>
                                                                                                                                    <abstract><p>Large Language Models (LLMs) have rapidly emerged as transformative tools in education, offering personalized tutoring, content generation, and intelligent feedback. However, their widespread adoption is often restricted by a critical limitation: hallucinations, sounding true but factually incorrect outputs. In educational contexts, such inaccuracies can mislead learners, undermine trust, and compromise pedagogical integrity. This paper proposes a structured, three-layered framework for mitigating hallucinations in LLM-based educational tools, encompassing interventions at the input, model, and output levels.   At the Input Level, hallucination risks are addressed by refining the quality and clarity of user prompts. Techniques such as prompt engineering, contextual grounding with curriculum-aligned materials, and input validation are explored to ensure that the model receives precise and relevant queries. This layer emphasizes the importance of user intent detection and the integration of domain-specific context to reduce ambiguity.   The Model Level focuses on enhancing the internal reasoning and factual grounding of the LLM itself. We examine the efficacy of retrieval-augmented generation (RAG), fine-tuning with curated educational datasets, and the application of symbolic constraints or logic overlays.     At the Output Level, we propose post-generation verification strategies to ensure factual accuracy before content is delivered to learners. These include automated fact-checking modules, confidence scoring, human-in-the-loop review systems, and explainability features such as citation generation and rationale tracing. This layer acts as a final safeguard, ensuring that only validated and pedagogically sound content reaches the end user.By organizing hallucination mitigation strategies across these three layers, this framework provides a comprehensive roadmap for developers, educators, and researchers seeking to deploy LLMs responsibly in educational environments. The paper concludes with a discussion of implementation challenges, and evaluation metrics including the potential for adaptive systems that learn from hallucination patterns over time. This layered approach not only enhances the reliability of LLMs in education but also contributes to broader efforts in AI safety and trustworthiness.</p></abstract>
                                                            
            
                                                                                        <kwd-group>
                                                    <kwd>Large Language Models (LLMs)</kwd>
                                                    <kwd>  Educational Technology</kwd>
                                                    <kwd>  AI Hallucinations</kwd>
                                                    <kwd>  Retrieval-Augmented Generation</kwd>
                                                    <kwd>  AI Safety in Education.</kwd>
                                            </kwd-group>
                            
                                                <kwd-group xml:lang="tr">
                                                    <kwd>Büyük Dil Modelleri (LLM)</kwd>
                                                    <kwd>  Eğitim Teknolojisi</kwd>
                                                    <kwd>  Yapay Zekâ Halüsinasyonları</kwd>
                                                    <kwd>  Geri Çağırma ile Zenginleştirilmiş Üretim (RAG)</kwd>
                                                    <kwd>  Eğitimde Yapay Zekâ Güvenliği</kwd>
                                            </kwd-group>
                                                                                                                                        </article-meta>
    </front>
    <back>
                            <ref-list>
                                    <ref id="ref1">
                        <label>1</label>
                        <mixed-citation publication-type="journal">Alhafni, B., Vajjala, S., Bannò, S., Maurya, K. K., and Kochmar, E. (2024, 18 September). LLMs in Education: Novel Perspectives, Challenges, and Opportunities. https://arxiv.org/abs/2409.11917 (Access Date, 30 November 2025)</mixed-citation>
                    </ref>
                                    <ref id="ref2">
                        <label>2</label>
                        <mixed-citation publication-type="journal">Benedetto, L., Aradelli, G., Donvito, A., Lucchetti, A., Cappelli, A., and Buttery, P. (2024). Using LLMs to simulate students’ responses to exam questions. In Y. Al-Onaizan, M. Bansal, &amp; Y.-N. Chen (Eds.), Findings of the Association for Computational Linguistics: EMNLP 2024 (pp. 11351–11368). Association for Computational Linguistics. https://doi.org/10.18653/v1/2024.findings-emnlp.663</mixed-citation>
                    </ref>
                                    <ref id="ref3">
                        <label>3</label>
                        <mixed-citation publication-type="journal">Brown, N. B. (2024, 4 June). Enhancing Trust in LLMs: Algorithms for Comparing and Interpreting LLMs. https://arxiv.org/abs/2406.01943 (Access Date, 30 November 2025)</mixed-citation>
                    </ref>
                                    <ref id="ref4">
                        <label>4</label>
                        <mixed-citation publication-type="journal">Chen, B., Zhang, Z., Langrené, N., and Zhu, S. (2025, 13 June). Unleashing the potential of prompt engineering for large language models. Patterns, 6(6), 101260. https://doi.org/10.1016/j.patter.2025.101260 (Access Date, 30 November 2025)</mixed-citation>
                    </ref>
                                    <ref id="ref5">
                        <label>5</label>
                        <mixed-citation publication-type="journal">Chen, Z. Z., Ma, J., Zhang, X., Hao, N., Yan, A., Nourbakhsh, A., Yang, X., McAuley, J., Petzold, L., and Wang, W. Y. (2024, 2 May). A Survey on Large Language Models for Critical Societal Domains: Finance, Healthcare, and Law. https://arxiv.org/abs/2405.01769 (Access Date, 30 November 2025)</mixed-citation>
                    </ref>
                                    <ref id="ref6">
                        <label>6</label>
                        <mixed-citation publication-type="journal">Cheng, M., Luo, Y., Ouyang, J., Liu, Q., Liu, H., Li, L., Yu, S., Zhang, B., Cao, J., Ma, J., Wang, D., and Chen, E. (2025, 11 March). A Survey on Knowledge-Oriented Retrieval-Augmented Generation. https://arxiv.org/abs/2503.10677 (Access Date, 30 November 2025)</mixed-citation>
                    </ref>
                                    <ref id="ref7">
                        <label>7</label>
                        <mixed-citation publication-type="journal">Cossio, M. (2025, 3 August). A comprehensive taxonomy of hallucinations in Large Language Models. https://arxiv.org/abs/2508.01781 (Access Date, 30 November 2025)</mixed-citation>
                    </ref>
                                    <ref id="ref8">
                        <label>8</label>
                        <mixed-citation publication-type="journal">Dennison, D. V., Ahtisham, B., Chourasia, K., Arora, N., Singh, R., Kizilcec, R. F., Nambi, A., Ganu, T., and Vashistha, A. (2025, 1 July). Teacher-AI Collaboration for Curating and Customizing Lesson Plans in Low-Resource Schools. https://arxiv.org/abs/2507.00456 (Access Date, 30 November 2025)</mixed-citation>
                    </ref>
                                    <ref id="ref9">
                        <label>9</label>
                        <mixed-citation publication-type="journal">Jančařík, A., and Dušek, O. (2024, 23 October). The Problem of AI Hallucination and How to Solve It. European Conference on E-Learning, 23, 122–128. https://doi.org/10.34190/ecel.23.1.2584 (Access Date, 30 November 2025)</mixed-citation>
                    </ref>
                                    <ref id="ref10">
                        <label>10</label>
                        <mixed-citation publication-type="journal">Kim, Y., Jeong, H., Chen, S., Li, S. S., Lu, M., Alhamoud, K., Mun, J., Grau, C., Jung, M., Gameiro, R., Fan, L., Park, E., Lin, T., Yoon, J., Yoon, W., Sap, M., Tsvetkov, Y., Liang, P., Xu, X., and Breazeal, C. (2025, 2 November). Medical Hallucinations in Foundation Models and Their Impact on Healthcare. https://arxiv.org/abs/2503.05777 (Access Date, 30 November 2025)</mixed-citation>
                    </ref>
                                    <ref id="ref11">
                        <label>11</label>
                        <mixed-citation publication-type="journal">Lee, H., Yoon, S., Won, Y., Oh, H., Kim, G., Bui, T., Dernoncourt, F., Stengel-Eskin, E., Bansal, M., and Seo, M. (2025, 18 June). Context-Informed Grounding Supervision. https://arxiv.org/abs/2506.15480 (Access Date, 30 November 2025)</mixed-citation>
                    </ref>
                                    <ref id="ref12">
                        <label>12</label>
                        <mixed-citation publication-type="journal">Magesh, V., Surani, F., Dahl, M., Suzgun, M., Manning, C. D., and Ho, D. E. (2024). Hallucination-Free? Assessing the Reliability of Leading AI Legal Research Tools. https://arxiv.org/abs/2405.20362</mixed-citation>
                    </ref>
                                    <ref id="ref13">
                        <label>13</label>
                        <mixed-citation publication-type="journal">Papagiannopoulos, I., Koutalidis, H., Rempi, P., Ntanos, C., and Askounis, D. (2025, 23 July). Comparison of explainability methods for hallucination analysis in LLMs [version 1; peer review: 1 not approved]. Open Research Europe, 5(191). https://doi.org/10.12688/openreseurope.20839.1 (Access Date, 30 November 2025)</mixed-citation>
                    </ref>
                                    <ref id="ref14">
                        <label>14</label>
                        <mixed-citation publication-type="journal">Patias, I. (2025, 12 August). Foundational Models as General-Purpose Technology: A Guide to Corporate Transformation. 2025 9th International Symposium on Innovative Approaches in Smart Technologies (ISAS), 1–6. https://doi.org/10.1109/ISAS66241.2025.11101869(Access Date, 30 November 2025)</mixed-citation>
                    </ref>
                                    <ref id="ref15">
                        <label>15</label>
                        <mixed-citation publication-type="journal">Patias, I., Miteva, D., and Peltekova, E. (2026). Using Old Lessons for New AI – A Trainer for Project Risk Management. In G. De Tré, S. Sotirov, J. Kacprzyk, G. Psaila, G. Smits, T. Andreasen, G. Bordogna, &amp; H. Legind Larsen (Eds.), Flexible Query Answering Systems (pp. 155–167). Springer Nature Switzerland.</mixed-citation>
                    </ref>
                                    <ref id="ref16">
                        <label>16</label>
                        <mixed-citation publication-type="journal">Patias, I., Miteva, D., Peltekova, E., Wright, M., and Gasteiger-Klicpera, B. (2024, 6 December). Leveraging Large Language Models to Enhance Mental Health Literacy and Diversity Awareness in Adolescents: The me_HeLi-D Project. 2024 8th International Symposium on Innovative Approaches in Smart Technologies (ISAS), 1–5. https://doi.org/10.1109/ISAS64331.2024.10845582 (Access Date, 30 November 2025)</mixed-citation>
                    </ref>
                                    <ref id="ref17">
                        <label>17</label>
                        <mixed-citation publication-type="journal">Qamar, M. T., Yasmeen, J., Pathak, S. K., Sohail, S. S., Madsen, D. Ø., and Rangarajan, M. (2024, 18 June). Big claims, low outcomes: Fact checking ChatGPT’s efficacy in handling linguistic creativity and ambiguity. Cogent Arts &amp; Humanities, 11(1), 2353984. https://doi.org/10.1080/23311983.2024.2353984 (Access Date, 30 November 2025)</mixed-citation>
                    </ref>
                                    <ref id="ref18">
                        <label>18</label>
                        <mixed-citation publication-type="journal">Rahman, S. S., Islam, M. A., Alam, M. M., Zeba, M., Rahman, M. A., Chowa, S. S., Raiaan, M. A. K., and Azam, S. (2025, 26 September). Hallucination to Truth: A Review of Fact-Checking and Factuality Evaluation in Large Language Models. https://arxiv.org/abs/2508.03860 (Access Date, 30 November 2025)</mixed-citation>
                    </ref>
                                    <ref id="ref19">
                        <label>19</label>
                        <mixed-citation publication-type="journal">Seth, P., &amp; Sankarapu, V. K. (2025, 20 November). Bridging the Gap in XAI-Why Reliable Metrics Matter for Explainability and Compliance. https://arxiv.org/abs/2502.04695 (Access Date, 30 November 2025)</mixed-citation>
                    </ref>
                                    <ref id="ref20">
                        <label>20</label>
                        <mixed-citation publication-type="journal">Shahzad, T., Mazhar, T., Tariq, M. U., Ahmad, W., Ouahada, K., and Hamam, H. (2025, 14 January). A comprehensive review of large language models: Issues and solutions in learning environments. Discover Sustainability, 6(1), 27. https://doi.org/10.1007/s43621-025-00815-8 (Access Date, 30 November 2025)</mixed-citation>
                    </ref>
                                    <ref id="ref21">
                        <label>21</label>
                        <mixed-citation publication-type="journal">Wang, H., Fu, W., Tang, Y., Chen, Z., Huang, Y., Piao, J., Gao, C., Xu, F., Jiang, T., and Li, Y. (2025, 16 January). A Survey on Responsible LLMs: Inherent Risk, Malicious Use, and Mitigation Strategy. https://arxiv.org/abs/2501.09431 (Access Date, 30 November 2025)</mixed-citation>
                    </ref>
                                    <ref id="ref22">
                        <label>22</label>
                        <mixed-citation publication-type="journal">Xu, H., Gan, W., Qi, Z., Wu, J., and Yu, P. S. (2024, 12 May). Large Language Models for Education: A Survey. https://arxiv.org/abs/2405.13001 (Access Date, 30 November 2025)</mixed-citation>
                    </ref>
                                    <ref id="ref23">
                        <label>23</label>
                        <mixed-citation publication-type="journal">Zhou, J., Zhang, J., Wan, R., Cui, X., Liu, Q., Guo, H., Shi, X., Fu, B., Meng, J., Yue, B., Zhang, Y., and Zhang, Z. (2025, 19 March). Integrating AI into clinical education: Evaluating general practice trainees’ proficiency in distinguishing AI-generated hallucinations and impacting factors. BMC Medical Education, 25(1), 406. https://doi.org/10.1186/s12909-025-06916-2 (Access Date, 30 November 2025)</mixed-citation>
                    </ref>
                                    <ref id="ref24">
                        <label>24</label>
                        <mixed-citation publication-type="journal">Zhou, K. Z., Kilhoffer, Z., Sanfilippo, M. R., Underwood, T., Gumusel, E., Wei, M., Choudhry, A., and Xiong, J. (2024, 23 January). “The teachers are confused as well”: A Multiple-Stakeholder Ethics Discussion on Large Language Models in Computing Education. https://arxiv.org/abs/2401.12453 (Access Date, 30 November 2025)</mixed-citation>
                    </ref>
                            </ref-list>
                    </back>
    </article>
