<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.4 20241031//EN"
        "https://jats.nlm.nih.gov/publishing/1.4/JATS-journalpublishing1-4.dtd">
<article  article-type="research-article"        dtd-version="1.4">
            <front>

                <journal-meta>
                                                                <journal-id>job</journal-id>
            <journal-title-group>
                                                                                    <journal-title>Journal of Baltalimanı</journal-title>
            </journal-title-group>
                                        <issn pub-type="epub">3108-4591</issn>
                                                                                            <publisher>
                    <publisher-name>İstanbul Metin Sabancı Baltalimanı Kemik Hastalıkları Eğitim ve Araştırma Hastanesi</publisher-name>
                </publisher>
                    </journal-meta>
                <article-meta>
                                        <article-id pub-id-type="doi">10.5281/zenodo.18998621</article-id>
                                                                <article-categories>
                                            <subj-group  xml:lang="en">
                                                            <subject>Orthopaedics</subject>
                                                    </subj-group>
                                            <subj-group  xml:lang="tr">
                                                            <subject>Ortopedi</subject>
                                                    </subj-group>
                                    </article-categories>
                                                                                                                                                        <title-group>
                                                                                                                        <article-title>Evaluation of ChatGPT&#039;s Performance in Residency Training Progress Exams and Competency Exams in Orthopedics and Traumatology</article-title>
                                                                                                                                                                                                <trans-title-group xml:lang="tr">
                                    <trans-title>Ortopedi ve Travmatoloji’de Uzmanlık Eğitimi Gelişim Sınavları ve Yeterlik Sınavlarında ChatGPT’nin Performansının Değerlendirilmesi</trans-title>
                                </trans-title-group>
                                                                                                    </title-group>
            
                                                    <contrib-group content-type="authors">
                                                                        <contrib contrib-type="author">
                                                                    <contrib-id contrib-id-type="orcid">
                                        https://orcid.org/0000-0001-6576-1802</contrib-id>
                                                                <name>
                                    <surname>Dinçel</surname>
                                    <given-names>Yaşar Mahsut</given-names>
                                </name>
                                                                    <aff>TEKIRDAG NAMIK KEMAL UNIVERSITY</aff>
                                                            </contrib>
                                                    <contrib contrib-type="author">
                                                                    <contrib-id contrib-id-type="orcid">
                                        https://orcid.org/0000-0002-1077-4945</contrib-id>
                                                                <name>
                                    <surname>Kutluay</surname>
                                    <given-names>Gündüz Ercan</given-names>
                                </name>
                                                                    <aff>ÇOSB Kapaklı Devlet Hastahanesi</aff>
                                                            </contrib>
                                                    <contrib contrib-type="author">
                                                                    <contrib-id contrib-id-type="orcid">
                                        https://orcid.org/0000-0001-6236-4123</contrib-id>
                                                                <name>
                                    <surname>Sasanı</surname>
                                    <given-names>Hadi</given-names>
                                </name>
                                                                    <aff>TEKIRDAG NAMIK KEMAL UNIVERSITY</aff>
                                                            </contrib>
                                                    <contrib contrib-type="author">
                                                                    <contrib-id contrib-id-type="orcid">
                                        https://orcid.org/0000-0002-9743-5515</contrib-id>
                                                                <name>
                                    <surname>Erem</surname>
                                    <given-names>Murat</given-names>
                                </name>
                                                                    <aff>TRAKYA UNIVERSITY</aff>
                                                            </contrib>
                                                                                </contrib-group>
                        
                                        <pub-date pub-type="pub" iso-8601-date="20260330">
                    <day>03</day>
                    <month>30</month>
                    <year>2026</year>
                </pub-date>
                                        <volume>2</volume>
                                        <issue>1</issue>
                                        <fpage>14</fpage>
                                        <lpage>19</lpage>
                        
                        <history>
                                    <date date-type="received" iso-8601-date="20260206">
                        <day>02</day>
                        <month>06</month>
                        <year>2026</year>
                    </date>
                                                    <date date-type="accepted" iso-8601-date="20260304">
                        <day>03</day>
                        <month>04</month>
                        <year>2026</year>
                    </date>
                            </history>
                                        <permissions>
                    <copyright-statement>Copyright © 2025, Baltalimanı Dergisi</copyright-statement>
                    <copyright-year>2025</copyright-year>
                    <copyright-holder>Baltalimanı Dergisi</copyright-holder>
                </permissions>
            
                                                                                                <abstract><p>Background: Artificial intelligence (AI) technologies have rapidly expanded into the field of medical education, offering innovative tools for training and assessment.This study aimed to evaluate the performance of the ChatGPT-3.5 language model in the “Residency Training Progress Examination” (UEGS) and the “Competency Examination” administered by the Turkish Society of Orthopedics and Traumatology (TOTBID). The objective was to determine whether ChatGPT performs comparably to orthopedic residents and whether it can achieve a passing score in the Competency Exam. Methods: A total of 2,000 UEGS and 1,000 Competency Exam questions (2012–2023, excluding 2020) were presented to ChatGPT-3.5 using standardized prompts designed within the Role–Goals–Context (RGC) framework. The model’s responses were statistically compared with those of orthopedic residents and specialists using the Mann–Whitney U and Kruskal–Wallis tests (p &amp;lt; 0.05). Results: ChatGPT achieved the highest accuracy in the General Orthopedics category (62%) and the lowest in Adult Reconstructive Surgery (40%). It outperformed residents only in the Spine Surgery category (p &amp;lt; 0.05). In the Competency Exams, ChatGPT passed four of ten exams. Conclusion: ChatGPT-3.5 demonstrated limited reliability and accuracy in orthopedic examinations and should be used cautiously as an educational support tool. Future studies involving newer multimodal versions of large language models may clarify their potential role in medical education and assessment.</p></abstract>
                                                                                                                                    <trans-abstract xml:lang="tr">
                            <p>Amaç : Yapay zekâ (YZ) teknolojileri, eğitim ve değerlendirmede yenilikçi araçlar sunarak tıp eğitimi alanında hızla yaygınlaşmaktadır. Bu çalışmanın amacı, ChatGPT-3.5 dil modelinin Türk Ortopedi ve Travmatoloji Birliği (TOTBİD) tarafından uygulanan Uzmanlık Eğitimi Gelişim Sınavı (UEGS) ve Yeterlik Sınavı’ndaki performansını değerlendirmektir. Çalışmada, ChatGPT’nin ortopedi asistanlarıyla karşılaştırılabilir bir performans sergileyip sergilemediği ve Yeterlik Sınavı’nda geçme başarısı gösterip gösteremediği araştırılmıştır. Yöntemler : 2012–2023 yılları arasına ait (2020 yılı hariç) toplam 2.000 UEGS ve 1.000 Yeterlik Sınavı sorusu, Rol–Amaçlar–Bağlam (RGC) çerçevesinde oluşturulan standartlaştırılmış istemler (promptlar) kullanılarak ChatGPT-3.5’e sunuldu. Modelin yanıtları, ortopedi asistanları ve uzmanlarının sonuçlarıyla Mann–Whitney U ve Kruskal–Wallis testleri kullanılarak istatistiksel olarak karşılaştırıldı (p &amp;lt; 0,05). Bulgular : ChatGPT en yüksek doğruluk oranını Genel Ortopedi alanında (%62), en düşük doğruluk oranını ise Erişkin Rekonstrüktif Cerrahi alanında (%40) gösterdi. Sadece Omurga Cerrahisi alanında asistanlardan daha yüksek performans sergiledi (p &amp;lt; 0,05). Yeterlik Sınavlarında ise ChatGPT, on sınavın dördünde geçme başarısı gösterdi. Sonuç : ChatGPT-3.5, ortopedi sınavlarında sınırlı güvenilirlik ve doğruluk göstermiştir ve bir eğitim destek aracı olarak temkinli kullanılmalıdır. Gelecekte, daha yeni ve çok modlu büyük dil modellerini içeren çalışmalar, bu tür sistemlerin tıp eğitimindeki ve değerlendirmedeki potansiyel rolünü daha net ortaya koyabilir.</p></trans-abstract>
                                                            
            
                                                            <kwd-group>
                                                    <kwd>ChatGPT</kwd>
                                                    <kwd>  Board Examination</kwd>
                                                    <kwd>  Orthopedics</kwd>
                                                    <kwd>  Traumatology</kwd>
                                                    <kwd>  Artificial Intelligence</kwd>
                                            </kwd-group>
                                                        
                                                                            <kwd-group xml:lang="tr">
                                                    <kwd>ChatGPT</kwd>
                                                    <kwd>  Yeterlik Sınavı</kwd>
                                                    <kwd>  Ortopedi</kwd>
                                                    <kwd>  Yapay Zekâ</kwd>
                                            </kwd-group>
                                                                                                            </article-meta>
    </front>
    <back>
                            <ref-list>
                                    <ref id="ref1">
                        <label>1</label>
                        <mixed-citation publication-type="journal">Acaroğlu, E., Kahraman, S., Senköylü, A., Berk, H., Caner, H., Özkan, S., ... (2014). Core curriculum (CC) of spinal surgery: A step forward in defining our profession. Acta Orthopaedica et Traumatologica Turcica, 48(5), 475–478.</mixed-citation>
                    </ref>
                                    <ref id="ref2">
                        <label>2</label>
                        <mixed-citation publication-type="journal">Alessandri Bonetti, M., Giorgino, R., Gallo Afflitto, G., De Lorenzi, F., &amp; Egro, F. M. (2024). How does ChatGPT perform on the Italian Residency Admission National Exam compared to 15,869 medical graduates? Annals of Biomedical Engineering, 52(4), 745–749.</mixed-citation>
                    </ref>
                                    <ref id="ref3">
                        <label>3</label>
                        <mixed-citation publication-type="journal">Aljindan, F. K., Al Qurashi, A. A., Albalawi, I. A. S., Alanazi, A. M. M., Aljuhani, H. A. M., Falah Almutairi, F., ... (2023). ChatGPT conquers the Saudi Medical Licensing Exam: Exploring the accuracy of artificial intelligence in medical knowledge assessment and implications for modern medical education. Cureus, 15(9), Article e45043.</mixed-citation>
                    </ref>
                                    <ref id="ref4">
                        <label>4</label>
                        <mixed-citation publication-type="journal">Atik, O. Ş. (2024). Artificial intelligence: Who must have autonomy the machine or the human? Joint Diseases and Related Surgery, 35(1), 1–2.
Ayik, G., Kolac, U. C., Aksoy, T., Yilmaz, A., Sili, M. V., Tokgozoglu, M., ... (2025). Exploring the role of artificial intelligence in Turkish orthopedic progression exams. Acta Orthopaedica et Traumatologica Turcica, 59(1), 18–26.</mixed-citation>
                    </ref>
                                    <ref id="ref5">
                        <label>5</label>
                        <mixed-citation publication-type="journal">Benli, İ., &amp; Acaroğlu, E. (2011). Türk Ortopedi ve Travmatoloji Birliği Derneği (TOTBİD) Türk Ortopedi ve Travmatoloji Eğitim Konseyi Yeterlik Sınavları. Acta Orthopaedica et Traumatologica Turcica, 45(2). https://dergipark.org.tr/en/download/article-file/169969</mixed-citation>
                    </ref>
                                    <ref id="ref6">
                        <label>6</label>
                        <mixed-citation publication-type="journal">Gönen, D. E. (2013). 2012-2013 TOTBİD-TOTEK Uzmanlık Eğitimi Gelişim Sınavı Raporu (UEGS). Türk Ortopedi ve Travmatoloji Birliği Derneği. https://totbid.org.tr/uploads/files/uegs_2013_rapor.pdf</mixed-citation>
                    </ref>
                                    <ref id="ref7">
                        <label>7</label>
                        <mixed-citation publication-type="journal">Haenlein, M., &amp; Kaplan, A. (2019). A brief history of artificial intelligence: On the past, present, and future of artificial intelligence. California Management Review, 61(4), 5–14.</mixed-citation>
                    </ref>
                                    <ref id="ref8">
                        <label>8</label>
                        <mixed-citation publication-type="journal">Huang, Y., Gomaa, A., Semrau, S., Haderlein, M., Lettmaier, S., Weissmann, T., ... (2023). Benchmarking ChatGPT-4 on a radiation oncology in-training exam and Red Journal Gray Zone cases: Potentials and challenges for AI-assisted medical education and decision making in radiation oncology. Frontiers in Oncology, 13, Article 1265024.</mixed-citation>
                    </ref>
                                    <ref id="ref9">
                        <label>9</label>
                        <mixed-citation publication-type="journal">Khan, R. A., Jawaid, M., Khan, A. R., &amp; Sajjad, M. (2023). ChatGPT - Reshaping medical education and clinical management. Pakistan Journal of Medical Sciences, 39(2). https://pjms.org.pk/index.php/pjms/article/view/7653</mixed-citation>
                    </ref>
                                    <ref id="ref10">
                        <label>10</label>
                        <mixed-citation publication-type="journal">Liu, P. R., Lu, L., Zhang, J. Y., Huo, T. T., Liu, S. X., &amp; Ye, Z. W. (2021). Application of artificial intelligence in medicine: An overview. Current Medical Science, 41(6), 1105–1115.</mixed-citation>
                    </ref>
                                    <ref id="ref11">
                        <label>11</label>
                        <mixed-citation publication-type="journal">Massey, P. A., Montgomery, C., &amp; Zhang, A. S. (2023). Comparison of ChatGPT–3.5, ChatGPT-4, and orthopaedic resident performance on orthopaedic assessment examinations. Journal of the American Academy of Orthopaedic Surgeons, 31(23), 1173.
Minh, D., Wang, H. X., Li, Y. F., &amp; Nguyen, T. N. (2022). Explainable artificial intelligence: A comprehensive review. Artificial Intelligence Review, 55(5), 3503–3568.</mixed-citation>
                    </ref>
                                    <ref id="ref12">
                        <label>12</label>
                        <mixed-citation publication-type="journal">Moritz, S., Romeike, B., Stosch, C., &amp; Tolks, D. (2023). Generative AI (gAI) in medical education: Chat-GPT and co. GMS Journal for Medical Education, 40(4), Article Doc54.</mixed-citation>
                    </ref>
                                    <ref id="ref13">
                        <label>13</label>
                        <mixed-citation publication-type="journal">Ollivier, M., Pareek, A., Dahmen, J., Kayaalp, M. E., Winkler, P. W., Hirschmann, M. T., ... (2023). A deeper dive into ChatGPT: History, use and future perspectives for orthopaedic research. Knee Surgery, Sports Traumatology, Arthroscopy, 31(4), 1190–1192.</mixed-citation>
                    </ref>
                                    <ref id="ref14">
                        <label>14</label>
                        <mixed-citation publication-type="journal">Oztermeli, A. D., &amp; Oztermeli, A. (2023). ChatGPT performance in the medical specialty exam: An observational study. Medicine, 102(32), Article e34673.</mixed-citation>
                    </ref>
                                    <ref id="ref15">
                        <label>15</label>
                        <mixed-citation publication-type="journal">Ruksakulpiwat, S., Kumar, A., &amp; Ajibade, A. (2023). Using ChatGPT in medical research: Current status and future directions. Journal of Multidisciplinary Healthcare, 16, 1513–1520.</mixed-citation>
                    </ref>
                                    <ref id="ref16">
                        <label>16</label>
                        <mixed-citation publication-type="journal">Sumbal, A., Sumbal, R., &amp; Amir, A. (2024). Can ChatGPT-3.5 pass a medical exam? A systematic review of ChatGPT’s performance in academic testing. Journal of Medical Education and Curricular Development, 11, Article 23821205241238641.</mixed-citation>
                    </ref>
                                    <ref id="ref17">
                        <label>17</label>
                        <mixed-citation publication-type="journal">Tabatabaian, M. (2024). Prompt engineering using ChatGPT: Crafting effective interactions and building GPT apps. Mercury Learning and Information.</mixed-citation>
                    </ref>
                                    <ref id="ref18">
                        <label>18</label>
                        <mixed-citation publication-type="journal">Türk Ortopedi ve Travmatoloji Birliği Derneği. (n.d.). TOTBİD resmi sitesi. https://totbid.org.tr/tr/</mixed-citation>
                    </ref>
                                    <ref id="ref19">
                        <label>19</label>
                        <mixed-citation publication-type="journal">Wang, X., Gong, Z., Wang, G., Jia, J., Xu, Y., Zhao, J., ... (2023). ChatGPT performs on the Chinese National Medical Licensing Examination. Journal of Medical Systems, 47(1), Article 86.</mixed-citation>
                    </ref>
                                    <ref id="ref20">
                        <label>20</label>
                        <mixed-citation publication-type="journal">Wu, D., Xiang, Y., Wu, X., Yu, T., Huang, X., Zou, Y., ... (2020). Artificial intelligence-tutoring problem-based learning in ophthalmology clerkship. Annals of Translational Medicine, 8(11), Article 700.</mixed-citation>
                    </ref>
                                    <ref id="ref21">
                        <label>21</label>
                        <mixed-citation publication-type="journal">Yang, Y. Y., &amp; Shulruf, B. (2019). Expert-led and artificial intelligence (AI) system-assisted tutoring course increase confidence of Chinese medical interns on suturing and ligature skills: Prospective pilot study. Journal of Educational Evaluation for Health Professions, 16, Article 7.</mixed-citation>
                    </ref>
                                    <ref id="ref22">
                        <label>22</label>
                        <mixed-citation publication-type="journal">Yağar, H., Gümüşoğlu, E., &amp; Mert Asfuroğlu, Z. (2025). Assessing the performance of ChatGPT-4o on the Turkish Orthopedics and Traumatology Board Examination. Joint Diseases and Related Surgery, 36(2), 304–310.</mixed-citation>
                    </ref>
                            </ref-list>
                    </back>
    </article>
