<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.4 20241031//EN"
        "https://jats.nlm.nih.gov/publishing/1.4/JATS-journalpublishing1-4.dtd">
<article  article-type="research-article"        dtd-version="1.4">
            <front>

                <journal-meta>
                                                                <journal-id>chta</journal-id>
            <journal-title-group>
                                                                                    <journal-title>Chaos Theory and Applications</journal-title>
            </journal-title-group>
                                        <issn pub-type="epub">2687-4539</issn>
                                                                                            <publisher>
                    <publisher-name>Akif AKGÜL</publisher-name>
                </publisher>
                    </journal-meta>
                <article-meta>
                                        <article-id pub-id-type="doi">10.51537/chaos.1857261</article-id>
                                                                <article-categories>
                                            <subj-group  xml:lang="en">
                                                            <subject>Applied Mathematics (Other)</subject>
                                                    </subj-group>
                                            <subj-group  xml:lang="tr">
                                                            <subject>Uygulamalı Matematik (Diğer)</subject>
                                                    </subj-group>
                                    </article-categories>
                                                                                                                                                        <title-group>
                                                                                                                                                            <article-title>Self-Training the Neurochaos Learning Algorithm</article-title>
                                                                                                    </title-group>
            
                                                    <contrib-group content-type="authors">
                                                                        <contrib contrib-type="author">
                                                                    <contrib-id contrib-id-type="orcid">
                                        https://orcid.org/0009-0000-9385-001X</contrib-id>
                                                                <name>
                                    <surname>M</surname>
                                    <given-names>Anusree</given-names>
                                </name>
                                                                    <aff>Amrita Vishwa Vidyapeetham, Amritapuri Campus</aff>
                                                            </contrib>
                                                    <contrib contrib-type="author">
                                                                    <contrib-id contrib-id-type="orcid">
                                        https://orcid.org/0000-0002-0496-0362</contrib-id>
                                                                <name>
                                    <surname>Henry</surname>
                                    <given-names>Akhila</given-names>
                                </name>
                                                                    <aff>Amrita Vishwa Vidyapeetham, Amritapuri Campus</aff>
                                                            </contrib>
                                                    <contrib contrib-type="author">
                                                                    <contrib-id contrib-id-type="orcid">
                                        https://orcid.org/0000-0001-6142-5326</contrib-id>
                                                                <name>
                                    <surname>Nair</surname>
                                    <given-names>Pramod</given-names>
                                </name>
                                                                    <aff>Amrita Vishwa Vidyapeetham, Amritapuri Campus</aff>
                                                            </contrib>
                                                                                </contrib-group>
                        
                                        <pub-date pub-type="pub" iso-8601-date="20260328">
                    <day>03</day>
                    <month>28</month>
                    <year>2026</year>
                </pub-date>
                                        <volume>8</volume>
                                        <issue>1</issue>
                                        <fpage>16</fpage>
                                        <lpage>23</lpage>
                        
                        <history>
                                    <date date-type="received" iso-8601-date="20260106">
                        <day>01</day>
                        <month>06</month>
                        <year>2026</year>
                    </date>
                                                    <date date-type="accepted" iso-8601-date="20260313">
                        <day>03</day>
                        <month>13</month>
                        <year>2026</year>
                    </date>
                            </history>
                                        <permissions>
                    <copyright-statement>Copyright © 2019, Chaos Theory and Applications</copyright-statement>
                    <copyright-year>2019</copyright-year>
                    <copyright-holder>Chaos Theory and Applications</copyright-holder>
                </permissions>
            
                                                                                                                        <abstract><p>In numerous practical applications, acquiring substantial quantities of labelled data is challenging and expensive, but unlabelled data is readily accessible. Conventional supervised learning methods frequently underperform in scenarios characterised by little labelled data or imbalanced datasets. This study introduces a hybrid semi-supervised learning (SSL) architecture that integrates Neurochaos Learning (NL) with a threshold-based Self-Training (ST) method to overcome this constraint. The NL architecture converts input characteristics into chaos-based firing-rate representations that encapsulate nonlinear relationships within the data, whereas ST progressively enlarges the labelled set utilising high-confidence pseudo-labelled samples. The model’s performance is assessed using ten benchmark datasets and five machine learning classifiers, with 85% of the training data considered unlabelled and just 15% utilised as labelled data. The proposed Self-Training Neurochaos Learning (NL+ST) architecture consistently attains superior performance gain relative to standalone ST models, especially on limited, nonlinear and imbalanced datasets like Wine (162.42 %), Iris (121.34 %) and Glass Identification (95.46 %). The results indicate that using chaos-based feature extraction with SSL improves generalisation, resilience, and classification accuracy in low-data contexts.</p></abstract>
                                                            
            
                                                                                        <kwd-group>
                                                    <kwd>Self-training</kwd>
                                                    <kwd>  Neurochaos
learning</kwd>
                                                    <kwd>  Semi-supervised
learning</kwd>
                                                    <kwd>  Chaotic neural
networks</kwd>
                                            </kwd-group>
                            
                                                                                                                                                    </article-meta>
    </front>
    <back>
                            <ref-list>
                                    <ref id="ref1">
                        <label>1</label>
                        <mixed-citation publication-type="journal">Amini, M.-R., V. Feofanov, L. Pauletto, L. Hadjadj, E. Devijver, et al.,
2025 Self-training: A survey. Neurocomputing 616: 128904.</mixed-citation>
                    </ref>
                                    <ref id="ref2">
                        <label>2</label>
                        <mixed-citation publication-type="journal">Anusree, M. and P. N. Pramod, 2025 Understanding chaotic neural
networks: A comprehensive review. Nonlinear Dynamics pp.
1–16.</mixed-citation>
                    </ref>
                                    <ref id="ref3">
                        <label>3</label>
                        <mixed-citation publication-type="journal">Anusree, M., P. Reshmi, J. Valadi, P. P. Nair, and P. Suravajhala,
2024 Hypothetical protein classification using neurochaos learning
architecture. In International Conference on Information and
Communication Technology for Competitive Strategies, pp. 337–346,
Springer.</mixed-citation>
                    </ref>
                                    <ref id="ref4">
                        <label>4</label>
                        <mixed-citation publication-type="journal">AS, R. A., N. B. Harikrishnan, and N. Nagaraj, 2023 Analysis of
logistic map based neurons in neurochaos learning architectures
for data classification. Chaos, Solitons and Fractals 170: 113347.</mixed-citation>
                    </ref>
                                    <ref id="ref5">
                        <label>5</label>
                        <mixed-citation publication-type="journal">AS, R. A. and N. Nagaraj, 2025 Random heterogeneous neurochaos
learning architecture for data classification. Chaos Theory and
Applications 7: 10–30.</mixed-citation>
                    </ref>
                                    <ref id="ref6">
                        <label>6</label>
                        <mixed-citation publication-type="journal">Balakrishnan, H. N., A. Kathpalia, S. Saha, and N. Nagaraj, 2019
Chaosnet: A chaos based artificial neural network architecture
for classification. Chaos: An Interdisciplinary Journal of Nonlinear
Science 29.</mixed-citation>
                    </ref>
                                    <ref id="ref7">
                        <label>7</label>
                        <mixed-citation publication-type="journal">Bennett, K. and A. Demiriz, 1998 Semi-supervised support vector
machines. Advances in Neural Information processing systems
11.</mixed-citation>
                    </ref>
                                    <ref id="ref8">
                        <label>8</label>
                        <mixed-citation publication-type="journal">Blum, A. and T. Mitchell, 1998 Combining labeled and unlabeled
data with co-training. In Proceedings of the eleventh annual conference
on Computational learning theory, pp. 92–100, ACM.</mixed-citation>
                    </ref>
                                    <ref id="ref9">
                        <label>9</label>
                        <mixed-citation publication-type="journal">Deng, L. and X. Li, 2013 Machine learning paradigms for speech
recognition: An overview. IEEE Transactions on Audio, Speech,
and Language Processing 21: 1060–1089.</mixed-citation>
                    </ref>
                                    <ref id="ref10">
                        <label>10</label>
                        <mixed-citation publication-type="journal">Du, J., E. Grave, B. Gunel, V. Chaudhary, O. Celebi, et al., 2021
Self-training improves pre-training for natural language understanding.
In Proceedings of the 2021 Conference of the North American
chapter of the association for computational linguistics: human
language technologies, pp. 5408–5418.</mixed-citation>
                    </ref>
                                    <ref id="ref11">
                        <label>11</label>
                        <mixed-citation publication-type="journal">Duarte, J. M. and L. Berton, 2023 A review of semi-supervised
learning for text classification. Artificial intelligence review 56:
9401–9469.</mixed-citation>
                    </ref>
                                    <ref id="ref12">
                        <label>12</label>
                        <mixed-citation publication-type="journal">Feofanov, V., E. Devijver, and M.-R. Amini, 2024 Multi-class probabilistic
bounds for majority vote classifiers with partially labeled
data. Journal of Machine Learning Research 25: 1–47.</mixed-citation>
                    </ref>
                                    <ref id="ref13">
                        <label>13</label>
                        <mixed-citation publication-type="journal">Ghiasi, G., B. Zoph, E. D. Cubuk, Q. V. Le, and T.-Y. Lin, 2021
Multi-task self-training for learning general representations. In
Proceedings of the IEEE/CVF International Conference on Computer
Vision, pp. 8856–8865.</mixed-citation>
                    </ref>
                                    <ref id="ref14">
                        <label>14</label>
                        <mixed-citation publication-type="journal">Harikrishnan, N. and N. Nagaraj, 2020 Neurochaos inspired hybrid
machine learning architecture for classification. In 2020
International Conference on Signal Processing and Communications
(SPCOM), pp. 1–5, IEEE.</mixed-citation>
                    </ref>
                                    <ref id="ref15">
                        <label>15</label>
                        <mixed-citation publication-type="journal">Harikrishnan, N., S. Pranay, and N. Nagaraj, 2022 Classification of
sars-cov-2 viral genome sequences using neurochaos learning.
Medical &amp; Biological Engineering &amp; Computing 60: 2245–2255.</mixed-citation>
                    </ref>
                                    <ref id="ref16">
                        <label>16</label>
                        <mixed-citation publication-type="journal">Harikrishnan, N. B. and N. Nagaraj, 2021 When noise meets chaos:
Stochastic resonance in neurochaos learning. Neural Networks
143: 425–435.</mixed-citation>
                    </ref>
                                    <ref id="ref17">
                        <label>17</label>
                        <mixed-citation publication-type="journal">Henry, A. and N. Nagaraj, 2025a Augmented regression models
using neurochaos learning. Chaos, Solitons and Fractals 201:
117213.</mixed-citation>
                    </ref>
                                    <ref id="ref18">
                        <label>18</label>
                        <mixed-citation publication-type="journal">Henry, A. and N. Nagaraj, 2025b Neurochaos learning for classification
using composition of chaotic maps. Chaos Theory and
Applications 7: 107–116.</mixed-citation>
                    </ref>
                                    <ref id="ref19">
                        <label>19</label>
                        <mixed-citation publication-type="journal">Henry, A., R. Sundaravaradhan, and N. Nagaraj, 2025 Simplified
neurochaos learning architectures for data classification. Chaos:
An Interdisciplinary Journal of Nonlinear Science 35.</mixed-citation>
                    </ref>
                                    <ref id="ref20">
                        <label>20</label>
                        <mixed-citation publication-type="journal">Jin, C., T. F. Ng, and H. Ibrahim, 2025 Advancements in semisupervised
deep learning for brain tumor segmentation in mri:
A literature review. AI 6: 153.</mixed-citation>
                    </ref>
                                    <ref id="ref21">
                        <label>21</label>
                        <mixed-citation publication-type="journal">Kahn, J., A. Lee, and A. Hannun, 2020 Self-training for end-toend
speech recognition. In ICASSP 2020-2020 IEEE International
Conference on Acoustics, Speech and Signal Processing (ICASSP), pp.
7084–7088, IEEE.</mixed-citation>
                    </ref>
                                    <ref id="ref22">
                        <label>22</label>
                        <mixed-citation publication-type="journal">Mallapragada, P. K., R. Jin, A. K. Jain, and Y. Liu, 2008 Semiboost:
Boosting for semi-supervised learning. IEEE transactions on
pattern analysis and machine intelligence 31: 2000–2014.</mixed-citation>
                    </ref>
                                    <ref id="ref23">
                        <label>23</label>
                        <mixed-citation publication-type="journal">NB, H., A. Kathpalia, and N. Nagaraj, 2022 Causality preserving
chaotic transformation and classification using neurochaos
learning. Advances in Neural Information Processing Systems
35: 2046–2058.</mixed-citation>
                    </ref>
                                    <ref id="ref24">
                        <label>24</label>
                        <mixed-citation publication-type="journal">Pant, K. K., N. Nagaraj, et al., 2025 Advancing forest fires classification
using neurochaos learning. arXiv preprint arXiv:2510.26383
.
Scudder, H., 1965 Adaptive communication receivers. IEEE Transactions
on Information Theory 11: 167–174.</mixed-citation>
                    </ref>
                                    <ref id="ref25">
                        <label>25</label>
                        <mixed-citation publication-type="journal">Sethi, D., N. Nagaraj, et al., 2023 Neurochaos feature transformation
for machine learning. Integration 90: 157–162.</mixed-citation>
                    </ref>
                                    <ref id="ref26">
                        <label>26</label>
                        <mixed-citation publication-type="journal">Silva, N. F. F. D., L. F. Coletta, and E. R. Hruschka, 2016 A survey
and comparative study of tweet sentiment analysis via semisupervised
learning. ACM Computing Surveys (CSUR) 49: 1–26.</mixed-citation>
                    </ref>
                                    <ref id="ref27">
                        <label>27</label>
                        <mixed-citation publication-type="journal">Sneha, K., A. Sudeesh, P. P. Nair, and P. Suravajhala, 2023 Biologically
inspired chaosnet architecture for hypothetical protein classification.
In 2023 Fifth International Conference on Electrical, Computer
and Communication Technologies (ICECCT), pp. 1–6, IEEE.</mixed-citation>
                    </ref>
                                    <ref id="ref28">
                        <label>28</label>
                        <mixed-citation publication-type="journal">Song, Z., X. Yang, Z. Xu, and I. King, 2022 Graph-based semisupervised
learning: A comprehensive review. IEEE Transactions
on Neural Networks and Learning Systems 34: 8174–8194.</mixed-citation>
                    </ref>
                                    <ref id="ref29">
                        <label>29</label>
                        <mixed-citation publication-type="journal">Tamposis, I. A., K. D. Tsirigos, M. C. Theodoropoulou, P. I. Kontou,
and P. G. Bagos, 2019 Semi-supervised learning of hidden
markov models for biological sequence analysis. Bioinformatics
35: 2208–2215.</mixed-citation>
                    </ref>
                                    <ref id="ref30">
                        <label>30</label>
                        <mixed-citation publication-type="journal">Tur, G., D. Hakkani-Tür, and R. E. Schapire, 2005 Combining active
and semi-supervised learning for spoken language understanding.
Speech Communication 45: 171–186.</mixed-citation>
                    </ref>
                                    <ref id="ref31">
                        <label>31</label>
                        <mixed-citation publication-type="journal">Van Engelen, J. E. and H. H. Hoos, 2020 A survey on semisupervised
learning. Machine learning 109: 373–440.</mixed-citation>
                    </ref>
                                    <ref id="ref32">
                        <label>32</label>
                        <mixed-citation publication-type="journal">Wang, Y., H. Chen, Q. Heng, W. Hou, Y. Fan, et al., 2022 Freematch:
Self-adaptive thresholding for semi-supervised learning. arXiv
preprint arXiv:2205.07246 .</mixed-citation>
                    </ref>
                                    <ref id="ref33">
                        <label>33</label>
                        <mixed-citation publication-type="journal">Zhang, B., Y.Wang,W. Hou, H.Wu, J.Wang, et al., 2021 Flexmatch:
Boosting semi-supervised learning with curriculum pseudo labeling.
Advances in neural information processing systems 34:
18408–18419.</mixed-citation>
                    </ref>
                                    <ref id="ref34">
                        <label>34</label>
                        <mixed-citation publication-type="journal">Zhou, Z.-H. and M. Li, 2005 Tri-training: Exploiting unlabeled
data using three classifiers. IEEE Transactions on knowledge
and Data Engineering 17: 1529–1541.</mixed-citation>
                    </ref>
                                    <ref id="ref35">
                        <label>35</label>
                        <mixed-citation publication-type="journal">Zou, Y., Z. Yu, B. Kumar, and J. Wang, 2018 Unsupervised domain
adaptation for semantic segmentation via class-balanced selftraining.
In Proceedings of the European conference on computer
vision (ECCV), pp. 289–305.</mixed-citation>
                    </ref>
                            </ref-list>
                    </back>
    </article>
