<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.4 20241031//EN"
        "https://jats.nlm.nih.gov/publishing/1.4/JATS-journalpublishing1-4.dtd">
<article  article-type="research-article"        dtd-version="1.4">
            <front>

                <journal-meta>
                                                                <journal-id>techno-science</journal-id>
            <journal-title-group>
                                                                                    <journal-title>Scientific Journal of Mehmet Akif Ersoy University</journal-title>
            </journal-title-group>
                                        <issn pub-type="epub">2651-3722</issn>
                                                                                            <publisher>
                    <publisher-name>Burdur Mehmet Akif Ersoy University</publisher-name>
                </publisher>
                    </journal-meta>
                <article-meta>
                                        <article-id pub-id-type="doi">10.70030/sjmakeu.1879926</article-id>
                                                                <article-categories>
                                            <subj-group  xml:lang="en">
                                                            <subject>Natural Language Processing</subject>
                                                    </subj-group>
                                            <subj-group  xml:lang="tr">
                                                            <subject>Doğal Dil İşleme</subject>
                                                    </subj-group>
                                    </article-categories>
                                                                                                                                                        <title-group>
                                                                                                                        <article-title>Towards Benchmarking Transformer Models for Biomedical Text Simplification</article-title>
                                                                                                    </title-group>
            
                                                    <contrib-group content-type="authors">
                                                                        <contrib contrib-type="author">
                                                                    <contrib-id contrib-id-type="orcid">
                                        https://orcid.org/0000-0001-7727-0197</contrib-id>
                                                                <name>
                                    <surname>Mercan</surname>
                                    <given-names>Öykü Berfin</given-names>
                                </name>
                                                                    <aff>IZMIR KATIP CELEBI UNIVERSITY</aff>
                                                            </contrib>
                                                    <contrib contrib-type="author">
                                                                    <contrib-id contrib-id-type="orcid">
                                        https://orcid.org/0000-0003-1784-9003</contrib-id>
                                                                <name>
                                    <surname>Toçoğlu</surname>
                                    <given-names>Mansur Alp</given-names>
                                </name>
                                                                    <aff>IZMIR KATIP CELEBI UNIVERSITY</aff>
                                                            </contrib>
                                                    <contrib contrib-type="author">
                                                                    <contrib-id contrib-id-type="orcid">
                                        https://orcid.org/0000-0002-9012-4386</contrib-id>
                                                                <name>
                                    <surname>Turhan Turan</surname>
                                    <given-names>Nezihe</given-names>
                                </name>
                                                                    <aff>IZMIR KATIP CELEBI UNIVERSITY</aff>
                                                            </contrib>
                                                    <contrib contrib-type="author">
                                                                    <contrib-id contrib-id-type="orcid">
                                        https://orcid.org/0000-0002-9434-5880</contrib-id>
                                                                <name>
                                    <surname>Onan</surname>
                                    <given-names>Aytuğ</given-names>
                                </name>
                                                                    <aff>IZMIR INSTITUTE OF TECHNOLOGY</aff>
                                                            </contrib>
                                                                                </contrib-group>
                        
                                        <pub-date pub-type="pub" iso-8601-date="20260415">
                    <day>04</day>
                    <month>15</month>
                    <year>2026</year>
                </pub-date>
                                        <volume>9</volume>
                                        <issue>1</issue>
                                        <fpage>13</fpage>
                                        <lpage>29</lpage>
                        
                        <history>
                                    <date date-type="received" iso-8601-date="20260202">
                        <day>02</day>
                        <month>02</month>
                        <year>2026</year>
                    </date>
                                                    <date date-type="accepted" iso-8601-date="20260311">
                        <day>03</day>
                        <month>11</month>
                        <year>2026</year>
                    </date>
                            </history>
                                        <permissions>
                    <copyright-statement>Copyright © 2018, Scientific Journal of Mehmet Akif Ersoy University</copyright-statement>
                    <copyright-year>2018</copyright-year>
                    <copyright-holder>Scientific Journal of Mehmet Akif Ersoy University</copyright-holder>
                </permissions>
            
                                                                                                <abstract><p>Biomedical texts typically contain a high level of technical terminology and complex sentence structures, which limits their comprehensibility for readers without domain expertise. Text simplification, a natural language processing problem, aims to transform complex texts into a more readable and accessible form while preserving their original semantic content. Especially in biomedical texts, simplification can play an essential role in making scientific information understandable to patients and the general public. In this context, this study investigates the text simplification performance of pre-trained general-purpose and domain-specific language models (PLMs) for biomedical texts. The experiments utilize the Cochrane-Simplification dataset, which comprises technical abstracts from systematic reviews and their corresponding plain language summaries. General-purpose models and summarization tuned variants (BART-Large, BART-Large-CNN, BART-Large-XSum, PEGASUS-Large, PEGASUS-XSum, T5 and FLAN-T5) are compared alongside domain-specific models (BioBARTv2-Large, SciFive, Clinical-T5) under comparable fine-tuning settings. The models were compared using ROUGE, BLEU, BERTScore and SARI metrics to measure textual similarity and semantic coherence. The results indicate that BART based models achieve superior performance in the medical text simplification task.</p></abstract>
                                                            
            
                                                            <kwd-group>
                                                    <kwd>Text simplification</kwd>
                                                    <kwd>  Pre-trained language models</kwd>
                                                    <kwd>  Biomedical text</kwd>
                                                    <kwd>  Cochrane-Simplification dataset</kwd>
                                            </kwd-group>
                            
                                                                                                                        </article-meta>
    </front>
    <back>
                            <ref-list>
                                    <ref id="ref1">
                        <label>1</label>
                        <mixed-citation publication-type="journal">Swanson, K., He, S., Calvano, J., Chen, D., Telvizian, T., Jiang, L., ... &amp; Lee, J. (2024). Biomedical text readability after hypernym substitution with fine-tuned large language models. PLOS Digital Health, 3(4), e0000489.</mixed-citation>
                    </ref>
                                    <ref id="ref2">
                        <label>2</label>
                        <mixed-citation publication-type="journal">Moramarco, F., Juric, D., Savkov, A., Flann, J., Lehl, M., Boda, K., ... &amp; Hammerla, N. (2022, February). Towards more patient friendly clinical notes through language models and ontologies. In AMIA Annual Symposium Proceedings (Vol. 2021, p. 881).</mixed-citation>
                    </ref>
                                    <ref id="ref3">
                        <label>3</label>
                        <mixed-citation publication-type="journal">Gotlieb, R., Praska, C., Hendrickson, M. A., Marmet, J., Charpentier, V., Hause, E., ... &amp; Pitt, M. B. (2022). Accuracy in patient understanding of common medical phrases. JAMA network open, 5(11), e2242972-e2242972.</mixed-citation>
                    </ref>
                                    <ref id="ref4">
                        <label>4</label>
                        <mixed-citation publication-type="journal">Paulson, D., &amp; Hernandez, L. (2025). Evaluating Language Models for Simplifying Health Literacy Materials.</mixed-citation>
                    </ref>
                                    <ref id="ref5">
                        <label>5</label>
                        <mixed-citation publication-type="journal">Bakker, J., Papandreou-Lazos, T., &amp; Kamps, J. (2024, November). Biomedical text simplification models trained on aligned abstracts and lay summaries. In The Thirty-Third Text REtrieval Conference Proceedings (TREC 2024), Gaithersburg, MD, USA (Vol. 1329).</mixed-citation>
                    </ref>
                                    <ref id="ref6">
                        <label>6</label>
                        <mixed-citation publication-type="journal">Li, Z., Belkadi, S., Micheletti, N., Han, L., Shardlow, M., &amp; Nenadic, G. (2024, June). Investigating large language models and control mechanisms to improve text readability of biomedical abstracts. In 2024 IEEE 12th International Conference on Healthcare Informatics (ICHI) (pp. 265-274). IEEE.</mixed-citation>
                    </ref>
                                    <ref id="ref7">
                        <label>7</label>
                        <mixed-citation publication-type="journal">Cripwell, L., Legrand, J., &amp; Gardent, C. (2023, May). Document-level planning for text simplification. In 17th Conference of the European Chapter of the Association for Computational Linguistics (pp. 993-1006). Association for Computational Linguistics.</mixed-citation>
                    </ref>
                                    <ref id="ref8">
                        <label>8</label>
                        <mixed-citation publication-type="journal">Rahman, M. M., Irbaz, M. S., North, K., Williams, M. S., Zampieri, M., &amp; Lybarger, K. (2024). Health text simplification: An annotated corpus for digestive cancer education and novel strategies for reinforcement learning. Journal of Biomedical Informatics, 158, 104727.</mixed-citation>
                    </ref>
                                    <ref id="ref9">
                        <label>9</label>
                        <mixed-citation publication-type="journal">Shardlow, Matthew. (2014). A Survey of Automated Text Simplification. International Journal of Advanced Computer Science and Applications. 4. 10.14569/SpecialIssue.2014.040109.</mixed-citation>
                    </ref>
                                    <ref id="ref10">
                        <label>10</label>
                        <mixed-citation publication-type="journal">Vora, A., Chaudhari, T., Hotha, S., &amp; Sonawane, S. (2025). S-3 Pipeline for Biomedical Text Simplification.</mixed-citation>
                    </ref>
                                    <ref id="ref11">
                        <label>11</label>
                        <mixed-citation publication-type="journal">Vaswani, A., Shazeer, N., Parmar, N., Uszkoreit, J., Jones, L., Gomez, A. N., ... &amp; Polosukhin, I. (2017). Attention is all you need. Advances in neural information processing systems, 30.</mixed-citation>
                    </ref>
                                    <ref id="ref12">
                        <label>12</label>
                        <mixed-citation publication-type="journal">Celikten, T., &amp; Onan, A. (2025). Benchmarking Large Language Models for Biomedical Literature Summarization: Abstractive vs. Extractive Paradigms. IEEE Access.</mixed-citation>
                    </ref>
                                    <ref id="ref13">
                        <label>13</label>
                        <mixed-citation publication-type="journal">Xie, Q., Bishop, J. A., Tiwari, P., &amp; Ananiadou, S. (2022). Pre-trained language models with domain knowledge for biomedical extractive summarization. Knowledge-Based Systems, 252, 109460.</mixed-citation>
                    </ref>
                                    <ref id="ref14">
                        <label>14</label>
                        <mixed-citation publication-type="journal">Daraghmi, E., Atwe, L., &amp; Jaber, A. (2025). A comparative study of pegasus, bart, and t5 for text summarization across diverse datasets. Future Internet, 17(9), 389.</mixed-citation>
                    </ref>
                                    <ref id="ref15">
                        <label>15</label>
                        <mixed-citation publication-type="journal">Artsi, Y., Sorin, V., Konen, E., Glicksberg, B. S., Nadkarni, G., &amp; Klang, E. (2024). Large language models in simplifying radiological reports: systematic review. medRxiv, 2024-01.</mixed-citation>
                    </ref>
                                    <ref id="ref16">
                        <label>16</label>
                        <mixed-citation publication-type="journal">Picton, B., Andalib, S., Spina, A., Camp, B., Solomon, S. S., Liang, J., ... &amp; Oh, M. Y. (2025). Assessing AI simplification of medical texts: readability and content fidelity. International Journal of Medical Informatics, 195, 105743.</mixed-citation>
                    </ref>
                                    <ref id="ref17">
                        <label>17</label>
                        <mixed-citation publication-type="journal">Devaraj, A., Marshall, I., Wallace, B. C., &amp; Li, J. J. (2021, June). Paragraph-level simplification of medical texts. In Proceedings of the 2021 conference of the North American chapter of the association for computational linguistics: human language technologies (pp. 4972-4984).</mixed-citation>
                    </ref>
                                    <ref id="ref18">
                        <label>18</label>
                        <mixed-citation publication-type="journal">Pattisapu, N., Prabhu, N., Bhati, S., &amp; Varma, V. (2020, July). Leveraging social media for medical text simplification. In Proceedings of the 43rd International ACM SIGIR Conference on Research and Development in Information Retrieval (pp. 851-860).</mixed-citation>
                    </ref>
                                    <ref id="ref19">
                        <label>19</label>
                        <mixed-citation publication-type="journal">Makhmutova, L., Salton, G. D., Perez-Tellez, F., &amp; Ross, R. J. (2024). Automated Medical Text Simplification for Enhanced Patient Access. In BIOSTEC (2) (pp. 208-218).</mixed-citation>
                    </ref>
                                    <ref id="ref20">
                        <label>20</label>
                        <mixed-citation publication-type="journal">Ferreira, D. J. B., Almeida, T., &amp; Matos, S. (2025, July). A Framework for Fine-Grained Complexity Control in Health Answer Generation. In Proceedings of the 63rd Annual Meeting of the Association for Computational Linguistics (Volume 4: Student Research Workshop) (pp. 1111-1131).</mixed-citation>
                    </ref>
                                    <ref id="ref21">
                        <label>21</label>
                        <mixed-citation publication-type="journal">Rahulprasath, S., Harshan, P., Kabilash, P. V., Lakshithraj, A., &amp; Sreemathy, J. (2025, June). AI in Healthcare: Simplifying Medical Reports for Enhanced Patient Comprehension. In 2025 International Conference on Emerging Technologies in Computing and Communication (ETCC) (pp. 1-6). IEEE.</mixed-citation>
                    </ref>
                                    <ref id="ref22">
                        <label>22</label>
                        <mixed-citation publication-type="journal">GEM. (n.d.). cochrane-simplification [Data set]. Hugging Face. Retrieved November, 2025, from https://huggingface.co/datasets/GEM/cochrane-simplification</mixed-citation>
                    </ref>
                                    <ref id="ref23">
                        <label>23</label>
                        <mixed-citation publication-type="journal">Yogarajan, V., Gouk, H., Smith, T., Mayo, M., &amp; Pfahringer, B. (2020, March). Comparing high dimensional word embeddings trained on medical text to bag-of-words for predicting medical codes. In Asian Conference on Intelligent Information and Database Systems (pp. 97-108). Cham: Springer International Publishing.</mixed-citation>
                    </ref>
                                    <ref id="ref24">
                        <label>24</label>
                        <mixed-citation publication-type="journal">Lewis, M., Liu, Y., Goyal, N., Ghazvininejad, M., Mohamed, A., Levy, O., ... &amp; Zettlemoyer, L. (2020, July). BART: Denoising sequence-to-sequence pre-training for natural language generation, translation, and comprehension. In Proceedings of the 58th annual meeting of the association for computational linguistics (pp. 7871-7880).</mixed-citation>
                    </ref>
                                    <ref id="ref25">
                        <label>25</label>
                        <mixed-citation publication-type="journal">Zhang, J., Zhao, Y., Saleh, M., &amp; Liu, P. (2020, November). Pegasus: Pre-training with extracted gap-sentences for abstractive summarization. In International conference on machine learning (pp. 11328-11339). PMLR.</mixed-citation>
                    </ref>
                                    <ref id="ref26">
                        <label>26</label>
                        <mixed-citation publication-type="journal">Raffel, C., Shazeer, N., Roberts, A., Lee, K., Narang, S., Matena, M., ... &amp; Liu, P. J. (2020). Exploring the limits of transfer learning with a unified text-to-text transformer. Journal of machine learning research, 21(140), 1-67.</mixed-citation>
                    </ref>
                                    <ref id="ref27">
                        <label>27</label>
                        <mixed-citation publication-type="journal">Chung, H. W., Hou, L., Longpre, S., Zoph, B., Tay, Y., Fedus, W., ... &amp; Wei, J. (2024). Scaling instruction-finetuned language models. Journal of Machine Learning Research, 25(70), 1-53.</mixed-citation>
                    </ref>
                                    <ref id="ref28">
                        <label>28</label>
                        <mixed-citation publication-type="journal">Yuan, H., Yuan, Z., Gan, R., Zhang, J., Xie, Y., &amp; Yu, S. (2022). BioBART: Pretraining and evaluation of a biomedical generative language model. arXiv preprint arXiv:2204.03905.</mixed-citation>
                    </ref>
                                    <ref id="ref29">
                        <label>29</label>
                        <mixed-citation publication-type="journal">Phan, L. N., Anibal, J. T., Tran, H., Chanana, S., Bahadroglu, E., Peltekian, A., &amp; Altan-Bonnet, G. (2021). Scifive: a text-to-text transformer model for biomedical literature. arXiv preprint arXiv:2106.03598.</mixed-citation>
                    </ref>
                                    <ref id="ref30">
                        <label>30</label>
                        <mixed-citation publication-type="journal">Lehman, E., &amp; Johnson, A. (2023). Clinical-t5: Large language models built using mimic clinical text. PhysioNet, 101, 215-220.</mixed-citation>
                    </ref>
                                    <ref id="ref31">
                        <label>31</label>
                        <mixed-citation publication-type="journal">Goyal, T., Xu, J., Li, J. J., &amp; Durrett, G. (2022, May). Training dynamics for text summarization models. In Findings of the Association for Computational Linguistics: ACL 2022 (pp. 2061-2073).</mixed-citation>
                    </ref>
                                    <ref id="ref32">
                        <label>32</label>
                        <mixed-citation publication-type="journal">Zhang, Y., Ni, A., Yu, T., Zhang, R., Zhu, C., Deb, B., ... &amp; Radev, D. (2021). An exploratory study on long dialogue summarization: What works and what&#039;s next. arXiv preprint arXiv:2109.04609.</mixed-citation>
                    </ref>
                                    <ref id="ref33">
                        <label>33</label>
                        <mixed-citation publication-type="journal">Lin, C. Y. (2004, July). Rouge: A package for automatic evaluation of summaries. In Text summarization branches out (pp. 74-81).</mixed-citation>
                    </ref>
                                    <ref id="ref34">
                        <label>34</label>
                        <mixed-citation publication-type="journal">Zaman, F., Kamiran, F., Shardlow, M., Hassan, S. U., Karim, A., &amp; Aljohani, N. R. (2024). SATS: simplification aware text summarization of scientific documents. Frontiers in Artificial Intelligence, 7, 1375419.</mixed-citation>
                    </ref>
                                    <ref id="ref35">
                        <label>35</label>
                        <mixed-citation publication-type="journal">Papineni, K., Roukos, S., Ward, T., &amp; Zhu, W. J. (2002, July). Bleu: a method for automatic evaluation of machine translation. In Proceedings of the 40th annual meeting of the Association for Computational Linguistics (pp. 311-318).</mixed-citation>
                    </ref>
                                    <ref id="ref36">
                        <label>36</label>
                        <mixed-citation publication-type="journal">Ormaechea, L., &amp; Tsourakis, N. (2024). Automatic text simplification for French: model fine-tuning for simplicity assessment and simpler text generation. International Journal of Speech Technology, 27(4), 957-976.</mixed-citation>
                    </ref>
                                    <ref id="ref37">
                        <label>37</label>
                        <mixed-citation publication-type="journal">Zhang, T., Kishore, V., Wu, F., Weinberger, K. Q., &amp; Artzi, Y. (2019). Bertscore: Evaluating text generation with bert. arXiv preprint arXiv:1904.09675.</mixed-citation>
                    </ref>
                                    <ref id="ref38">
                        <label>38</label>
                        <mixed-citation publication-type="journal">Xu, W., Napoles, C., Pavlick, E., Chen, Q., &amp; Callison-Burch, C. (2016). Optimizing statistical machine translation for text simplification. Transactions of the Association for Computational Linguistics, 4, 401-415.</mixed-citation>
                    </ref>
                            </ref-list>
                    </back>
    </article>
