<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.4 20241031//EN"
        "https://jats.nlm.nih.gov/publishing/1.4/JATS-journalpublishing1-4.dtd">
<article  article-type="research-article"        dtd-version="1.4">
            <front>

                <journal-meta>
                                    <journal-id></journal-id>
            <journal-title-group>
                                                                                    <journal-title>Politeknik Dergisi</journal-title>
            </journal-title-group>
                                        <issn pub-type="epub">2147-9429</issn>
                                                                                            <publisher>
                    <publisher-name>Gazi Üniversitesi</publisher-name>
                </publisher>
                    </journal-meta>
                <article-meta>
                                        <article-id pub-id-type="doi">10.2339/politeknik.1601441</article-id>
                                                                <article-categories>
                                            <subj-group  xml:lang="en">
                                                            <subject>Deep Learning</subject>
                                                            <subject>Artificial Intelligence (Other)</subject>
                                                    </subj-group>
                                            <subj-group  xml:lang="tr">
                                                            <subject>Derin Öğrenme</subject>
                                                            <subject>Yapay Zeka (Diğer)</subject>
                                                    </subj-group>
                                    </article-categories>
                                                                                                                                                        <title-group>
                                                                                                                        <article-title>RLSE Aktivasyon Fonksiyonu Tasarımının Derin Sinir Ağlarının Performansındaki Etkisi</article-title>
                                                                                                                                                                                                <trans-title-group xml:lang="en">
                                    <trans-title>The Impact of RLSE Activation Function Design on the Performance of Deep Neural Networks</trans-title>
                                </trans-title-group>
                                                                                                    </title-group>
            
                                                    <contrib-group content-type="authors">
                                                                        <contrib contrib-type="author">
                                                                    <contrib-id contrib-id-type="orcid">
                                        https://orcid.org/0000-0001-9469-9550</contrib-id>
                                                                <name>
                                    <surname>Özeloğlu</surname>
                                    <given-names>İsmihan Gül</given-names>
                                </name>
                                                                    <aff>GAZİ ÜNİVERSİTESİ, FEN BİLİMLERİ ENSTİTÜSÜ, ELEKTRİK-ELEKTRONİK MÜHENDİSLİĞİ (YL) (TEZLİ)</aff>
                                                            </contrib>
                                                    <contrib contrib-type="author">
                                                                    <contrib-id contrib-id-type="orcid">
                                        https://orcid.org/0000-0002-9887-3808</contrib-id>
                                                                <name>
                                    <surname>Akman Aydın</surname>
                                    <given-names>Eda</given-names>
                                </name>
                                                                    <aff>GAZİ ÜNİVERSİTESİ, TEKNOLOJİ FAKÜLTESİ, ELEKTRİK-ELEKTRONİK MÜHENDİSLİĞİ BÖLÜMÜ</aff>
                                                            </contrib>
                                                    <contrib contrib-type="author">
                                                                    <contrib-id contrib-id-type="orcid">
                                        https://orcid.org/0000-0002-8762-5091</contrib-id>
                                                                <name>
                                    <surname>Barışçı</surname>
                                    <given-names>Necaattin</given-names>
                                </name>
                                                                    <aff>GAZİ ÜNİVERSİTESİ, TEKNOLOJİ FAKÜLTESİ, BİLGİSAYAR MÜHENDİSLİĞİ BÖLÜMÜ</aff>
                                                            </contrib>
                                                                                </contrib-group>
                        
                                        <pub-date pub-type="pub" iso-8601-date="20260421">
                    <day>04</day>
                    <month>21</month>
                    <year>2026</year>
                </pub-date>
                                        <volume>29</volume>
                                        <issue>4</issue>
                                        <fpage>1</fpage>
                                        <lpage>9</lpage>
                        
                        <history>
                                    <date date-type="received" iso-8601-date="20250101">
                        <day>01</day>
                        <month>01</month>
                        <year>2025</year>
                    </date>
                                                    <date date-type="accepted" iso-8601-date="20250926">
                        <day>09</day>
                        <month>26</month>
                        <year>2025</year>
                    </date>
                            </history>
                                        <permissions>
                    <copyright-statement>Copyright © 1998, Politeknik Dergisi</copyright-statement>
                    <copyright-year>1998</copyright-year>
                    <copyright-holder>Politeknik Dergisi</copyright-holder>
                </permissions>
            
                                                                                                <abstract><p>Aktivasyon fonksiyonu derin sinir ağlarının performansı üzerinde kritik etkisi olan bir bileşendir. Bu çalışmada, derin sinir ağlarında, yüksek sınıflandırma doğruluğu ve düşük kayıp elde etmek için yeni bir aktivasyon fonksiyonu önerilmektedir. Önerilen RLSE (ReLu-LIP-Sigmoid-ELU kombinasyonu) aktivasyon fonksiyonu ile, kaybolan gradyan sorunu ve ölmekte olan ReLU probleminin üstesinden gelinmesi hedeflemektedir. RLSE aktivayon fonksiyonunun performansı MNIST ve Fashion-MNIST veri kümeleri üzerinde değerlendirilmiş ve literatürde bulunan yeni geliştirilmiş aktivasyon fonksiyonlarıyla karşılaştırılmıştır. RLSE aktivasyon fonksiyonunun kullanılması ile, bu çalışmada tasarlanan Evrişimsel Sinir Ağı (ESA) mimarisinde MNIST veri kümesi için %99,04 ve Fashion MNIST veri kümesi için %90,40 doğruluk oranları elde edilmiştir. Sonuçlar, RLSE aktivasyon foksiyonunun diğer aktivasyon fonksiyonlarından daha iyi performans gösterdiğini ortaya koymaktadır.</p></abstract>
                                                                                                                                    <trans-abstract xml:lang="en">
                            <p>The activation function is a critical component that significantly impacts the performance of deep neural networks. In this study, a novel activation function, RLSE (a combination of ReLU-LIP-Sigmoid-ELU), is proposed to achieve high classification accuracy and low loss in deep neural networks. The RLSE activation function aims to address the vanishing gradient problem and the dying ReLU issue. The performance of the RLSE activation function has been evaluated on the MNIST and Fashion-MNIST datasets and compared with recently developed activation functions in the literature. Using the RLSE activation function, the Convolutional Neural Network (CNN) architecture designed in this study achieved accuracy rates of 99.04% for the MNIST dataset and 90.40% for the Fashion-MNIST dataset. The results demonstrate that the RLSE activation function outperforms other activation functions.</p></trans-abstract>
                                                            
            
                                                            <kwd-group>
                                                    <kwd>Aktivasyon fonksiyonu</kwd>
                                                    <kwd>  Derin sinir ağları</kwd>
                                                    <kwd>  Evrişimsel sinir ağı</kwd>
                                            </kwd-group>
                                                        
                                                                            <kwd-group xml:lang="en">
                                                    <kwd>Activation function</kwd>
                                                    <kwd>  Deep neural networks</kwd>
                                                    <kwd>  Convolutional neural network</kwd>
                                            </kwd-group>
                                                                                                            </article-meta>
    </front>
    <back>
                            <ref-list>
                                    <ref id="ref1">
                        <label>1</label>
                        <mixed-citation publication-type="journal">[1]	Sengupta, S., Basak, S., Saikia, P., Paul, S., Tsalavoutis, V., Atiah, F., &amp; Peters, A., “A review of deep learning with special emphasis on architectures, applications and recent trends”, Knowledge-Based Systems, 194: 105596, (2020).</mixed-citation>
                    </ref>
                                    <ref id="ref2">
                        <label>2</label>
                        <mixed-citation publication-type="journal">[2]	Sarker, I. H., “Machine learning: Algorithms, real-world applications and research directions”, SN computer science, 2(3): 160, (2021).</mixed-citation>
                    </ref>
                                    <ref id="ref3">
                        <label>3</label>
                        <mixed-citation publication-type="journal">[3]	Cong, S., &amp; Zhou, Y., “A review of convolutional neural network architectures and their optimizations”, Artificial Intelligence Review, 56(3): 1905-1969, (2023).</mixed-citation>
                    </ref>
                                    <ref id="ref4">
                        <label>4</label>
                        <mixed-citation publication-type="journal">[4]	Zheng, Y., Gao, Z., Wang, Y., &amp; Fu, Q., “MOOC dropout prediction using FWTS-CNN model based on fused feature weighting and time series”, IEEE Access, 8: 225324-225335, (2020).</mixed-citation>
                    </ref>
                                    <ref id="ref5">
                        <label>5</label>
                        <mixed-citation publication-type="journal">[5]	Dubey, S. R., Singh, S. K., &amp; Chaudhuri, B. B., “Activation functions in deep learning: A comprehensive survey and benchmark”, Neurocomputing, 503: 92-108, (2022).</mixed-citation>
                    </ref>
                                    <ref id="ref6">
                        <label>6</label>
                        <mixed-citation publication-type="journal">[6]	Krichen, M., “Convolutional neural networks: A survey”, Computers, 12(8): 151, (2023).</mixed-citation>
                    </ref>
                                    <ref id="ref7">
                        <label>7</label>
                        <mixed-citation publication-type="journal">[7]	V. Nair, G.E. Hinton, Rectified linear units improve restricted boltzmann machines, in: Proc. International Conference on Machine Learning, Haifa, Israel, 807–814, (2010).</mixed-citation>
                    </ref>
                                    <ref id="ref8">
                        <label>8</label>
                        <mixed-citation publication-type="journal">[8]	X. Glorot, A. Bordes, Y. Bengio, Deep sparse rectifier neural networks, in: Proc. International Conference on Artificial Intelligence and Statistics Conference, Ft. Lauderdale, FL, USA, (2011).</mixed-citation>
                    </ref>
                                    <ref id="ref9">
                        <label>9</label>
                        <mixed-citation publication-type="journal">[9]	Apicella A., Donnarumma F., Isgrò F., Prevete R., “A survey on modern trainable activation functions”, Neural Netw., 138: 14-32, (2021).</mixed-citation>
                    </ref>
                                    <ref id="ref10">
                        <label>10</label>
                        <mixed-citation publication-type="journal">[10]	Clevert D.-A., Unterthiner T., Hochreiter S., “Fast and accurate deep network learning by exponential linear units (ELUs)”, arXiv [cs.LG], (2015).</mixed-citation>
                    </ref>
                                    <ref id="ref11">
                        <label>11</label>
                        <mixed-citation publication-type="journal">[11]	Klambauer G., Unterthiner T., Mayr A., Hochreiter S., “Self-normalizing neural networks”, arXiv [cs.LG], (2017).</mixed-citation>
                    </ref>
                                    <ref id="ref12">
                        <label>12</label>
                        <mixed-citation publication-type="journal">[12]	Ramachandran, P., Zoph, B., &amp; Le, Q. V., “Swish: a self-gated activation function”, arXiv preprint arXiv:1710.05941, 7(1): 5, (2017).</mixed-citation>
                    </ref>
                                    <ref id="ref13">
                        <label>13</label>
                        <mixed-citation publication-type="journal">[13]	Hendrycks, D., &amp; Gimpel, K., “Gaussian error linear units (gelus)”, arXiv preprint arXiv:1606.08415, (2016).</mixed-citation>
                    </ref>
                                    <ref id="ref14">
                        <label>14</label>
                        <mixed-citation publication-type="journal">[14]	Lu, Lu, et al. &quot;Dying relu and initialization: Theory and numerical examples.&quot; arXiv preprint arXiv:1903.06733, (2019).</mixed-citation>
                    </ref>
                                    <ref id="ref15">
                        <label>15</label>
                        <mixed-citation publication-type="journal">[15]	Elfwing, Stefan, Eiji Uchibe, and Kenji Doya., &quot;Sigmoid-weighted linear units for neural network function approximation in reinforcement learning&quot;, Neural networks, 107: 3-11, (2018).</mixed-citation>
                    </ref>
                                    <ref id="ref16">
                        <label>16</label>
                        <mixed-citation publication-type="journal">[16]	Venkatappareddy, P., Culli, J., Srivastava, S., &amp; Lall, B., “A Legendre polynomial based activation function: An aid for modeling of max pooling”, Digital Signal Processing, 115: 103093, (2021).</mixed-citation>
                    </ref>
                                    <ref id="ref17">
                        <label>17</label>
                        <mixed-citation publication-type="journal">[17]	Carini, Alberto, et al., &quot;Legendre nonlinear filters&quot;, Signal Processing, 109: 84-94, (2015).</mixed-citation>
                    </ref>
                                    <ref id="ref18">
                        <label>18</label>
                        <mixed-citation publication-type="journal">[18]	Jahan, Israt, et al., &quot;Self-gated rectified linear unit for performance improvement of deep neural networks&quot;, ICT Express, 9(3): 320-325, (2023).</mixed-citation>
                    </ref>
                                    <ref id="ref19">
                        <label>19</label>
                        <mixed-citation publication-type="journal">[19]	Kiliçarslan, S., &amp; Celik, M., “RSigELU: A nonlinear activation function for deep neural networks”, Expert Systems with Applications, 174: 114805, (2021).</mixed-citation>
                    </ref>
                                    <ref id="ref20">
                        <label>20</label>
                        <mixed-citation publication-type="journal">[20]	LeCun, Y., Bottou, L., Bengio, Y., &amp; Haffner, P., “Gradient-based learning applied to document recognition”, Proceedings of the IEEE, 86(11): 2278-2324, (1998).</mixed-citation>
                    </ref>
                                    <ref id="ref21">
                        <label>21</label>
                        <mixed-citation publication-type="journal">[21]	Xiao, H., Rasul, K., &amp; Vollgraf, R., “Fashion-mnist: a novel image dataset for benchmarking machine learning algorithms”, arXiv preprint arXiv:1708.07747, (2017).</mixed-citation>
                    </ref>
                                    <ref id="ref22">
                        <label>22</label>
                        <mixed-citation publication-type="journal">[22]	Zhao, X., Wang, L., Zhang, Y., Han, X., Deveci, M., &amp; Parmar, M., “A review of convolutional neural networks in computer vision”, Artificial Intelligence Review, 57(4): 99, (2024).</mixed-citation>
                    </ref>
                                    <ref id="ref23">
                        <label>23</label>
                        <mixed-citation publication-type="journal">[23]	Fan, C. L., “Multiscale Feature Extraction by Using Convolutional Neural Network: Extraction of Objects from Multiresolution Images of Urban Areas”, ISPRS International Journal of Geo-Information, 13(1): 5, (2023).</mixed-citation>
                    </ref>
                                    <ref id="ref24">
                        <label>24</label>
                        <mixed-citation publication-type="journal">[24]	Taye, M. M., “Theoretical understanding of convolutional neural network: Concepts, architectures, applications, future directions”, Computation, 11(3): 52, (2023).</mixed-citation>
                    </ref>
                                    <ref id="ref25">
                        <label>25</label>
                        <mixed-citation publication-type="journal">[25]	Zhao, L., &amp; Zhang, Z., “A improved pooling method for convolutional neural networks”, Scientific Reports, 14(1): 1589, (2024).</mixed-citation>
                    </ref>
                                    <ref id="ref26">
                        <label>26</label>
                        <mixed-citation publication-type="journal">[26]	Elizar, E., Zulkifley, M. A., Muharar, R., Zaman, M. H. M., &amp; Mustaza, S. M., “A review on multiscale-deep-learning applications”, Sensors, 22(19): 7384, (2022).</mixed-citation>
                    </ref>
                                    <ref id="ref27">
                        <label>27</label>
                        <mixed-citation publication-type="journal">[27]	Dubey, S. R., Singh, S. K., &amp; Chaudhuri, B. B., “Activation functions in deep learning: A comprehensive survey and benchmark”, Neurocomputing, 503: 92-108, (2022).</mixed-citation>
                    </ref>
                                    <ref id="ref28">
                        <label>28</label>
                        <mixed-citation publication-type="journal">[28]	Özdemir, C., “Avg-topk: A new pooling method for convolutional neural networks”, Expert Systems with Applications, 223: 119892, (2023).</mixed-citation>
                    </ref>
                                    <ref id="ref29">
                        <label>29</label>
                        <mixed-citation publication-type="journal">[29]	Zafar, A., Aamir, M., Mohd Nawi, N., Arshad, A., Riaz, S., Alruban, A., ... &amp; Almotairi, S., “A comparison of pooling methods for convolutional neural networks”, Applied Sciences, 12(17): 8643, (2022).</mixed-citation>
                    </ref>
                                    <ref id="ref30">
                        <label>30</label>
                        <mixed-citation publication-type="journal">[30]	Jeczmionek, E., &amp; Kowalski, P. A., “Flattening layer pruning in convolutional neural networks”, Symmetry, 13(7): 1147, (2021).</mixed-citation>
                    </ref>
                                    <ref id="ref31">
                        <label>31</label>
                        <mixed-citation publication-type="journal">[31]	Ullah, U., Jurado, A. G. O., Gonzalez, I. D., &amp; Garcia-Zapirain, B., “A fully connected quantum convolutional neural network for classifying ischemic cardiopathy”, IEEE Access, 10: 134592-134605, (2022).</mixed-citation>
                    </ref>
                            </ref-list>
                    </back>
    </article>
