<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.4 20241031//EN"
        "https://jats.nlm.nih.gov/publishing/1.4/JATS-journalpublishing1-4.dtd">
<article  article-type="research-article"        dtd-version="1.4">
            <front>

                <journal-meta>
                                                                <journal-id>osmaniye korkut ata university journal of the institute of science and techno</journal-id>
            <journal-title-group>
                                                                                    <journal-title>Osmaniye Korkut Ata Üniversitesi Fen Bilimleri Enstitüsü Dergisi</journal-title>
            </journal-title-group>
                            <issn pub-type="ppub">2687-3729</issn>
                                                                                                        <publisher>
                    <publisher-name>Osmaniye Korkut Ata Üniversitesi</publisher-name>
                </publisher>
                    </journal-meta>
                <article-meta>
                                        <article-id pub-id-type="doi">10.47495/okufbed.1657375</article-id>
                                                                <article-categories>
                                            <subj-group  xml:lang="en">
                                                            <subject>Deep Learning</subject>
                                                            <subject>Neural Networks</subject>
                                                            <subject>Machine Vision </subject>
                                                            <subject>Machine Learning (Other)</subject>
                                                    </subj-group>
                                            <subj-group  xml:lang="tr">
                                                            <subject>Derin Öğrenme</subject>
                                                            <subject>Nöral Ağlar</subject>
                                                            <subject>Yapay Görme</subject>
                                                            <subject>Makine Öğrenme (Diğer)</subject>
                                                    </subj-group>
                                    </article-categories>
                                                                                                                                                        <title-group>
                                                                                                                        <article-title>Görünüm ve Özellik Tabanlı Bir Bakış İzleme Modelinin Geliştirilmesi: TrGaze24</article-title>
                                                                                                                                                                                                <trans-title-group xml:lang="en">
                                    <trans-title>Development of an Appearance- and Feature-Based Gaze Tracking Model: TrGaze24</trans-title>
                                </trans-title-group>
                                                                                                    </title-group>
            
                                                    <contrib-group content-type="authors">
                                                                        <contrib contrib-type="author">
                                                                    <contrib-id contrib-id-type="orcid">
                                        https://orcid.org/0000-0002-4033-0122</contrib-id>
                                                                <name>
                                    <surname>Yıldız</surname>
                                    <given-names>Adem Mehmet</given-names>
                                </name>
                                                                    <aff>KIRKLARELİ ÜNİVERSİTESİ</aff>
                                                            </contrib>
                                                    <contrib contrib-type="author">
                                                                    <contrib-id contrib-id-type="orcid">
                                        https://orcid.org/0000-0002-0954-1263</contrib-id>
                                                                <name>
                                    <surname>Kırmacı</surname>
                                    <given-names>Ömer</given-names>
                                </name>
                                                                    <aff>KIRKLARELİ ÜNİVERSİTESİ</aff>
                                                            </contrib>
                                                    <contrib contrib-type="author">
                                                                    <contrib-id contrib-id-type="orcid">
                                        https://orcid.org/0000-0001-6935-346X</contrib-id>
                                                                <name>
                                    <surname>Uzun</surname>
                                    <given-names>Adem</given-names>
                                </name>
                                                                    <aff>Bursa Uludağ Üniversitesi</aff>
                                                            </contrib>
                                                    <contrib contrib-type="author">
                                                                    <contrib-id contrib-id-type="orcid">
                                        https://orcid.org/0000-0002-0881-8486</contrib-id>
                                                                <name>
                                    <surname>Demirbağ</surname>
                                    <given-names>Mehmet</given-names>
                                </name>
                                                                    <aff>Bursa Uludağ Üniversitesi</aff>
                                                            </contrib>
                                                                                </contrib-group>
                        
                                        <pub-date pub-type="pub" iso-8601-date="20260114">
                    <day>01</day>
                    <month>14</month>
                    <year>2026</year>
                </pub-date>
                                        <volume>9</volume>
                                        <issue>1</issue>
                                        <fpage>29</fpage>
                                        <lpage>54</lpage>
                        
                        <history>
                                    <date date-type="received" iso-8601-date="20250313">
                        <day>03</day>
                        <month>13</month>
                        <year>2025</year>
                    </date>
                                                    <date date-type="accepted" iso-8601-date="20250706">
                        <day>07</day>
                        <month>06</month>
                        <year>2025</year>
                    </date>
                            </history>
                                        <permissions>
                    <copyright-statement>Copyright © 2018, Osmaniye Korkut Ata University Journal of the Institute of Science and Technology</copyright-statement>
                    <copyright-year>2018</copyright-year>
                    <copyright-holder>Osmaniye Korkut Ata University Journal of the Institute of Science and Technology</copyright-holder>
                </permissions>
            
                                                                                                <abstract><p>Bu çalışmada, çok modlu analitikler için bakış noktaları ile açılarını tahmin etmek amacıyla görünüm ve özellik tabanlı yöntemlere dayalı bir bakış izleme modeli geliştirilmiştir. İlk olarak bir göz veri seti oluşturulmuştur. Veri toplama sürecinde, 25 noktalı bir kalibrasyon tasarımı kullanılarak laboratuvar ortamında 128 katılımcıdan veri elde edilmiştir. TrGaze24 adını verdiğimiz veri setinden tahmin modeli oluşturmak için bir evrişimli sinir ağı (CNN) tasarlanmış ve yüz görüntülerinden çıkarılan sağ ve sol gözlere ait 18.410 göz görüntüsü, göz açıları ve yüz özellikleri modele girdi olarak sunulmuştur. Çıktılar, ekran üzerindeki hedef noktaların x ve y koordinatları ile bakış açısı vektörleri şeklinde belirlenmiştir. Modelin performansı için ortalama mutlak hatalar hesaplanmıştır, bakışa ait noktasal uzaklıkların tahminlerinde 3,74 cm ve açısal vektörlerin tahminlerinde 3,32 derece hata oranıyla literatürdeki benzer çalışmalara kıyasla oldukça başarılı bulunmuştur. Özellikle eğitim teknolojileri açısından, bilgisayar destekli eğitim ortamlarında web kamerası tabanlı bakış izleme sistemlerin temelini oluşturma potansiyeli taşımaktadır. Bu model, öğrenme analitiklerinde bilişsel süreçlerin incelenmesine olanak tanırken, aynı zamanda psikoloji, reklamcılık ve sosyal bilimler gibi alanlarda da kullanılabilirliğiyle dikkat çekmektedir.</p></abstract>
                                                                                                                                    <trans-abstract xml:lang="en">
                            <p>This study developed a gaze tracking model based on appearance- and feature-based methods to predict gaze points and angles within the scope of multimodal analytics. First, an eye dataset was created. During the data collection process, data were obtained from 128 participants in a laboratory setting using a 25-point calibration design. To create a prediction model from the dataset, which we named TrGaze24, a convolutional neural network (CNN) was designed. The model utilized 18,410 eye images (right and left eyes) extracted from facial data, along with gaze angles and facial features, as inputs. The outputs were defined as the x and y coordinates of target points on the screen and gaze direction vectors. The model&#039;s performance was evaluated by calculating mean absolute errors, with point-based gaze predictions showing an error of 3,74 cm and angular vector predictions an error of 3.32 degrees. These results demonstrated significant success compared to similar studies in the literature. Particularly in educational technologies, the model holds potential to form the foundation of webcam-based gaze tracking systems in computer-assisted learning environments. In addition to enabling the analysis of cognitive processes in learning analytics, the model also stands out for its applicability in fields such as psychology, advertising, and social sciences.</p></trans-abstract>
                                                            
            
                                                            <kwd-group>
                                                    <kwd>Bakış izleme modeli</kwd>
                                                    <kwd>  Göz veri seti</kwd>
                                                    <kwd>  Göz izleme</kwd>
                                                    <kwd>  Evrişimli sinir ağı mimarisi</kwd>
                                            </kwd-group>
                                                        
                                                                            <kwd-group xml:lang="en">
                                                    <kwd>Gaze tracking model</kwd>
                                                    <kwd>  Eye dataset</kwd>
                                                    <kwd>  Eye tracking</kwd>
                                                    <kwd>  Convolutional neural network architecture</kwd>
                                            </kwd-group>
                                                                                                            </article-meta>
    </front>
    <back>
                            <ref-list>
                                    <ref id="ref1">
                        <label>1</label>
                        <mixed-citation publication-type="journal">Akinyelu AA., Blignaut P. Convolutional neural network-based methods for eye gaze estimation: a survey. IEEE Access 2020; 8: 581-605. https://doi.org/10.1109/ACCESS.2020.3013540</mixed-citation>
                    </ref>
                                    <ref id="ref2">
                        <label>2</label>
                        <mixed-citation publication-type="journal">Altan T., Çağiltay K. An eye-tracking analysis of spatial contiguity effect in educational animations. Learning and Collaboration Technologies: Second International Conference 2023; 3-13, Los Angeles, CA, USA. https://doi.org/10.1007/978-3-319-20609-7_1</mixed-citation>
                    </ref>
                                    <ref id="ref3">
                        <label>3</label>
                        <mixed-citation publication-type="journal">Baluja S., Pomerleau D. Non-intrusive gaze tracking using artificial neural networks. Advances in Neural Information Processing Systems 1993; 753-760.</mixed-citation>
                    </ref>
                                    <ref id="ref4">
                        <label>4</label>
                        <mixed-citation publication-type="journal">Bondareva D., Conati C., Feyzi-Behnagh R., Harley JM., Azevedo R., Bouchet F. Inferring learning from gaze data during interaction with an environment to support self-regulated learning. In: H. C. Lane, K. Yacef, J. Mostow, P. Pavlik (Ed.), Artificial intelligence in education. Springer 2013; 229-238.  https://doi.org/10.1007/978-3-642-39112-5_24</mixed-citation>
                    </ref>
                                    <ref id="ref5">
                        <label>5</label>
                        <mixed-citation publication-type="journal">Buswell GT. How people look at pictures: a study of the psychology and perception in art. Univ Chicago Press 1935.</mixed-citation>
                    </ref>
                                    <ref id="ref6">
                        <label>6</label>
                        <mixed-citation publication-type="journal">Chennamma HR., Yuan X. A survey on eye-gaze tracking techniques. arXiv preprint arXiv:1312.6410, 2013; https://doi.org/10.48550/arXiv.1312.6410</mixed-citation>
                    </ref>
                                    <ref id="ref7">
                        <label>7</label>
                        <mixed-citation publication-type="journal">Chong E., Ruiz N., Wang Y., Zhang Y., Rozga A., Rehg JM. Connecting gaze, scene, and attention: generalized attention estimation via joint modelling of gaze and scene saliency. In Proceedings of the European conference on Computer Vision 2018; 383-398. https://doi.org/10.1007/978-3-030-01228-1_24/FIGURES/7</mixed-citation>
                    </ref>
                                    <ref id="ref8">
                        <label>8</label>
                        <mixed-citation publication-type="journal">Crescenzi-Lanna L. Multimodal learning analytics research with young children: a systematic review. British Journal of Educational Technology 2020; 51(5): 1485-1504. https://doi.org/10.1111/BJET.12959</mixed-citation>
                    </ref>
                                    <ref id="ref9">
                        <label>9</label>
                        <mixed-citation publication-type="journal">Davis EK. dlib Library 2015; https://github.com/davisking/dlib. (Erişim tarihi: 11 Ekim 2024).</mixed-citation>
                    </ref>
                                    <ref id="ref10">
                        <label>10</label>
                        <mixed-citation publication-type="journal">Delabarre EB. A method of recording eye-movements. The American Journal of Psychology 1898; 9(4): 572-574. https://doi.org/10.2307/1412191</mixed-citation>
                    </ref>
                                    <ref id="ref11">
                        <label>11</label>
                        <mixed-citation publication-type="journal">Demjén E., Aboši V., Tomori Z. Eye tracking using artificial neural networks for human computer interaction. Physiological Research 2011; 60(5): 841-844. https://doi.org/10.33549/physiolres.932117</mixed-citation>
                    </ref>
                                    <ref id="ref12">
                        <label>12</label>
                        <mixed-citation publication-type="journal">Duchowski AT. Eye tracking methodology: Theory and practice. Springer International Publishing 2017.
Falch L., Lohan KS. Webcam-based gaze estimation for computer screen interaction. Frontiers in Robotics and AI 2024; 11. https://doi.org/10.3389/FROBT.2024.1369566/BIBTEX</mixed-citation>
                    </ref>
                                    <ref id="ref13">
                        <label>13</label>
                        <mixed-citation publication-type="journal">Fischer T., Chang HJ., Demiris Y. RT-GENE: Real-time eye gaze estimation in natural environments.  In: Proceedings of the European Conference on Computer Vision 2018; 334-352. https://openaccess.thecvf.com/content_ECCV_2018/html/Tobias_Fischer_RT-GENE_Real-Time_Eye_ECCV_2018_paper.html</mixed-citation>
                    </ref>
                                    <ref id="ref14">
                        <label>14</label>
                        <mixed-citation publication-type="journal">Frischen A., Bayliss AP., Tipper SP. Gaze cueing of attention: visual attention, social cognition, and individual differences. Psychological Bulletin 2007; 133(4): 694-724. https://doi.org/10.1037/0033-2909.133.4.694</mixed-citation>
                    </ref>
                                    <ref id="ref15">
                        <label>15</label>
                        <mixed-citation publication-type="journal">Ghosh S., Dhall A., Hayat M., Knibbe J., Ji Q. automatic gaze analysis: a survey of deep learning-based approaches. IEEE Transactions on Pattern Analysis and Machine Intelligence 2024; 46(1): 61-84. https://doi.org/10.1109/TPAMI.2023.3321337</mixed-citation>
                    </ref>
                                    <ref id="ref16">
                        <label>16</label>
                        <mixed-citation publication-type="journal">Goldberg JH., Stimson MJ., Lewenstein M., Scott N., Wichansky AM. Eye tracking in web search tasks: design implications. Proceedings of the 2002 Symposium on Eye Tracking Research &amp; Applications 2002; 51-58. https://doi.org/10.1145/507072.507082</mixed-citation>
                    </ref>
                                    <ref id="ref17">
                        <label>17</label>
                        <mixed-citation publication-type="journal">google-ai-edge/mediapipe. GitHub- google-ai-edge/mediapipe: Cross-platform, customizable ml solutions for live and streaming media 2020; https://github.com/google-ai-edge/mediapipe. (Erişim tarihi: 11 Ekim 2024)</mixed-citation>
                    </ref>
                                    <ref id="ref18">
                        <label>18</label>
                        <mixed-citation publication-type="journal">Greene HH., Rayner K. Eye movements and familiarity effects in visual search. Vision Research 2001; 41(27): 3763-3773. https://doi.org/10.1016/S0042-6989(01)00154-7</mixed-citation>
                    </ref>
                                    <ref id="ref19">
                        <label>19</label>
                        <mixed-citation publication-type="journal">Grillon H., Françoise R., Bruno H., Daniel T. Use of virtual reality as therapeutic tool for behavioural exposure in the ambit of social. In International Conference Series on Disability, Virtual Reality and Associated Technologies 2006.</mixed-citation>
                    </ref>
                                    <ref id="ref20">
                        <label>20</label>
                        <mixed-citation publication-type="journal">Hansen DW., Ji Q.  In the eye of the beholder: a survey of models for eyes and gaze. IEEE Transactions on Pattern Analysis and Machine Intelligence 2010; 32(3): 478-500. https://doi.org/10.1109/TPAMI.2009.30</mixed-citation>
                    </ref>
                                    <ref id="ref21">
                        <label>21</label>
                        <mixed-citation publication-type="journal">Huang Q., Veeraraghavan A., Sabharwal A. TabletGaze: unconstrained appearance-based gaze estimation in mobile tablets. arXiv 2016; https://doi.org/10.48550/arXiv.1508.01244</mixed-citation>
                    </ref>
                                    <ref id="ref22">
                        <label>22</label>
                        <mixed-citation publication-type="journal">Huey EB. Preliminary experiments in the physiology and psychology of reading. The American Journal of Psychology 1898; 9(4): 575-586. https://doi.org/10.2307/1412192</mixed-citation>
                    </ref>
                                    <ref id="ref23">
                        <label>23</label>
                        <mixed-citation publication-type="journal">Iannizzotto G., La Rosa F. Competitive combination of multiple eye detection and tracking techniques. IEEE Transactions on Industrial Electronics 2011; 58(8): 3151-3159. https://doi.org/10.1109/TIE.2010.2102314</mixed-citation>
                    </ref>
                                    <ref id="ref24">
                        <label>24</label>
                        <mixed-citation publication-type="journal">Jacob RJK. What you look at is what you get: eye movement-based interaction techniques. Proceedings of the SIGCHI Conference on Human Factors in Computing Systems 1990; 11-18. https://doi.org/10.1145/97243.97246</mixed-citation>
                    </ref>
                                    <ref id="ref25">
                        <label>25</label>
                        <mixed-citation publication-type="journal">Jongerius C., Callemein T., Goedemé T., Van Beeck K., Romijn JA., Smets EMA., Hillen MA. Eye-tracking glasses in face-to-face interactions: manual versus automated assessment of areas-of-interest. Behavior Research Methods 2021; 53(5): 2037-2048. https://doi.org/10.3758/s13428-021-01544-2</mixed-citation>
                    </ref>
                                    <ref id="ref26">
                        <label>26</label>
                        <mixed-citation publication-type="journal">Jabbarlı G., Kurt M. LightFFDNets: Lightweight convolutional neural networks for rapid facial forgery detection. arXiv 2024. https://doi.org/10.48550/arXiv.2411.11826</mixed-citation>
                    </ref>
                                    <ref id="ref27">
                        <label>27</label>
                        <mixed-citation publication-type="journal">Just MA., Carpenter PA. Eye fixations and cognitive processes. Cognitive Psychology 1976; 8(4): 441-480. https://doi.org/10.1016/0010-0285(76)90015-3</mixed-citation>
                    </ref>
                                    <ref id="ref28">
                        <label>28</label>
                        <mixed-citation publication-type="journal">Kazemi V., Sullivan J. One millisecond face alignment with an ensemble of regression trees. 2014 IEEE Conference on Computer Vision and Pattern Recognition 2014; 1867-1874. https://doi.org/10.1109/CVPR.2014.241</mixed-citation>
                    </ref>
                                    <ref id="ref29">
                        <label>29</label>
                        <mixed-citation publication-type="journal">Kellnhofer P., Recasens A., Stent S., Matusik W., Torralba A. Gaze360: Physically unconstrained gaze estimation in the wild. arXiv 2019; https://doi.org/10.48550/arXiv.1910.10088</mixed-citation>
                    </ref>
                                    <ref id="ref30">
                        <label>30</label>
                        <mixed-citation publication-type="journal">Krafka K., Khosla A., Kellnhofer P., Kannan H., Bhandarkar S., Matusik W., Torralba A. Eye tracking for everyone. IEEE Conference on Computer Vision and Pattern Recognition (CVPR) 2016; 2176-2184. https://doi.org/10.1109/CVPR.2016.239</mixed-citation>
                    </ref>
                                    <ref id="ref31">
                        <label>31</label>
                        <mixed-citation publication-type="journal">Lian D., Zhang Z., Luo W., Hu L., Wu M., Li Z., Yu J., Gao S. RGBD based gaze estimation via multi-task cnn. Proceedings of the AAAI Conference on Artificial Intelligence 2019; 33(01): 2488-2495. https://doi.org/10.1609/AAAI.V33I01.33012488</mixed-citation>
                    </ref>
                                    <ref id="ref32">
                        <label>32</label>
                        <mixed-citation publication-type="journal">Liu J., Chi J., Yang H., Yin X. In the eye of the beholder: a survey of gaze tracking techniques. Pattern Recognition 2022; 132: 108944. https://doi.org/10.1016/j.patcog.2022.108944</mixed-citation>
                    </ref>
                                    <ref id="ref33">
                        <label>33</label>
                        <mixed-citation publication-type="journal">Lopes A., Ward AD., Cecchini M. Eye tracking in digital pathology: a comprehensive literature review. Journal of Pathology Informatics 2024; 15: 100383. https://doi.org/10.1016/J.JPI.2024.100383</mixed-citation>
                    </ref>
                                    <ref id="ref34">
                        <label>34</label>
                        <mixed-citation publication-type="journal">Lu F., Chen X., Sato Y. Appearance-based gaze estimation via uncalibrated gaze pattern recovery. IEEE Transactions on Image Processing 2017; 26(4): 1543-1553. https://doi.org/10.1109/TIP.2017.2657880</mixed-citation>
                    </ref>
                                    <ref id="ref35">
                        <label>35</label>
                        <mixed-citation publication-type="journal">Lu F., Okabe T., Sugano Y., Sato Y. A head pose-free approach for appearance-based gaze estimation. Procedings of the British Machine Vision Conference 2011; 126: 1-11. https://doi.org/10.5244/C.25.126</mixed-citation>
                    </ref>
                                    <ref id="ref36">
                        <label>36</label>
                        <mixed-citation publication-type="journal">Lu F., Okabe T., Sugano Y., Sato Y. Learning gaze biases with head motion for head pose-free gaze estimation. Image and Vision Computing 2014; 32(3): 169-179. https://doi.org/10.1016/j.imavis.2014.01.005</mixed-citation>
                    </ref>
                                    <ref id="ref37">
                        <label>37</label>
                        <mixed-citation publication-type="journal">Lu F., Sugano Y., Okabe T., Sato Y. Head pose-free appearance-based gaze sensing via eye image synthesis. Proceedings of the 21st International Conference on Pattern Recognition 2012; 1008-1011. https://ieeexplore.ieee.org/abstract/document/6460306</mixed-citation>
                    </ref>
                                    <ref id="ref38">
                        <label>38</label>
                        <mixed-citation publication-type="journal">Mahanama B., Jayawardana Y., Jayarathna S. Gaze-Net: Appearance-based gaze estimation using capsule networks.  In Proceedings of the 11th Augmented Human International Conference 2020; 1-14. https://doi.org/10.1145/3396339.3396393</mixed-citation>
                    </ref>
                                    <ref id="ref39">
                        <label>39</label>
                        <mixed-citation publication-type="journal">Majaranta P., Räihä KJ. Twenty years of eye typing: systems and design issues. In Proceedings of the 2002 Symposium on Eye Tracking Research &amp; Applications 2002; 15-22. https://doi.org/10.1145/507072.507076</mixed-citation>
                    </ref>
                                    <ref id="ref40">
                        <label>40</label>
                        <mixed-citation publication-type="journal">Mora KAF., Odobez JM. Gaze estimation from multimodal kinect data. IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops 2012; 25-30. https://doi.org/10.1109/CVPRW.2012.6239182
 
Orozco J., Roca FX., Gonzàlez J. Real-time gaze tracking with appearance-based models. Machine Vision and Applications 2009; 20(6): 353-364. https://doi.org/10.1007/s00138-008-0130-6</mixed-citation>
                    </ref>
                                    <ref id="ref41">
                        <label>41</label>
                        <mixed-citation publication-type="journal">Palmero C., Selva J., Bagheri MA., Escalera S. Recurrent CNN for 3D Gaze estimation using appearance and shape cues. British Machine Vision Conference 2018; https://arxiv.org/abs/1805.03064v3</mixed-citation>
                    </ref>
                                    <ref id="ref42">
                        <label>42</label>
                        <mixed-citation publication-type="journal">Park S., Aksan E., Zhang X., Hilliges O. Towards end-to-end video-based eye-tracking.  In: Computer Vision–ECCV 2020: 16th European Conference 2020; 747-763. Glasgow, UK.  https://doi.org/10.48550/arXiv.2007.13120</mixed-citation>
                    </ref>
                                    <ref id="ref43">
                        <label>43</label>
                        <mixed-citation publication-type="journal">Pathirana P., Senarath S., Meedeniya D., Jayarathna S. Eye gaze estimation: a survey on deep learning-based approaches. Expert Systems with Applications 2022; 199: 116894. https://doi.org/10.1016/J.ESWA.2022.116894</mixed-citation>
                    </ref>
                                    <ref id="ref44">
                        <label>44</label>
                        <mixed-citation publication-type="journal">Porta M., Ricotti S., Perez CJ. Emotional e-learning through eye tracking. Proceedings of the 2012 IEEE Global Engineering Education Conference (EDUCON) 2012; 1-6. https://doi.org/10.1109/EDUCON.2012.6201145</mixed-citation>
                    </ref>
                                    <ref id="ref45">
                        <label>45</label>
                        <mixed-citation publication-type="journal">Radach R., Hyona J., Deubel H. The mind’s eye: cognitive and applied aspects of eye movement research. Elsevier 2003.</mixed-citation>
                    </ref>
                                    <ref id="ref46">
                        <label>46</label>
                        <mixed-citation publication-type="journal">Rayner K. Eye movements in reading and information processing: 20 years of research. Psychological Bulletin 1998; 124(3): 372-422. https://doi.org/10.1037/0033-2909.124.3.372</mixed-citation>
                    </ref>
                                    <ref id="ref47">
                        <label>47</label>
                        <mixed-citation publication-type="journal">Rayner K., Rotello CM., Stewart AJ., Keir J., Duffy SA. Integrating text and pictorial information: eye movements when looking at print advertisements. Journal of Experimental Psychology: Applied 2001; 7(3):219-226. https://doi.org/10.1037/1076-898X.7.3.219</mixed-citation>
                    </ref>
                                    <ref id="ref48">
                        <label>48</label>
                        <mixed-citation publication-type="journal">Reale M., Hung T., Yin L. Pointing with the eyes: gaze estimation using a static/active camera system and 3d iris disk model. 2010 IEEE International Conference on Multimedia and Expo 2010; 280-285. https://doi.org/10.1109/ICME.2010.5583014</mixed-citation>
                    </ref>
                                    <ref id="ref49">
                        <label>49</label>
                        <mixed-citation publication-type="journal">Rikert TD., Jones J. Gaze estimation using morphable models. Proceedings Third IEEE International Conference on Automatic Face and Gesture Recognition 1998; 436-441. https://doi.org/10.1109/AFGR.1998.670987</mixed-citation>
                    </ref>
                                    <ref id="ref50">
                        <label>50</label>
                        <mixed-citation publication-type="journal">Sağlam Z., Yilmaz FGK. Eğitim araştırmalarında göz izleme: araştırmalardaki eğilimlerin belirlenmesi. Gazi Üniversitesi Gazi Eğitim Fakültesi Dergisi 2021; 41(3): 1621-1649. https://dergipark.org.tr/en/pub/gefad/issue/67470/892733</mixed-citation>
                    </ref>
                                    <ref id="ref51">
                        <label>51</label>
                        <mixed-citation publication-type="journal">Saxena S., Fink LK., Lange EB. Deep learning models for webcam eye tracking in online experiments. Behavior Research Methods 2023; 56: 3487–3503. https://doi.org/10.3758/s13428-023-02190-6</mixed-citation>
                    </ref>
                                    <ref id="ref52">
                        <label>52</label>
                        <mixed-citation publication-type="journal">Sewell W., Komogortsev O. Real-time eye gaze tracking with an unmodified commodity webcam employing a neural network. CHI ’10 Extended Abstracts on Human Factors in Computing Systems 2010; 3739-3744. https://doi.org/10.1145/1753846.1754048</mixed-citation>
                    </ref>
                                    <ref id="ref53">
                        <label>53</label>
                        <mixed-citation publication-type="journal">Sharma K., Giannakos M., Dillenbourg P. Eye-tracking and artificial intelligence to enhance motivation and learning. Smart Learning Environments 2020; 7(1): 13. https://doi.org/10.1186/s40561-020-00122-x</mixed-citation>
                    </ref>
                                    <ref id="ref54">
                        <label>54</label>
                        <mixed-citation publication-type="journal">Sigut J., Sidha SA. Iris center corneal reflection method for gaze tracking using visible light. IEEE Transactions on Biomedical Engineering 2011; 58(2): 411-419. https://doi.org/10.1109/TBME.2010.2087330</mixed-citation>
                    </ref>
                                    <ref id="ref55">
                        <label>55</label>
                        <mixed-citation publication-type="journal">Smith BA., Yin Q., Feiner SK., Nayar SK. Gaze locking: passive eye contact detection for human-object interaction. Proceedings of the 26th Annual ACM Symposium on User Interface Software and Technology 2013; 271-280. https://doi.org/10.1145/2501988.2501994</mixed-citation>
                    </ref>
                                    <ref id="ref56">
                        <label>56</label>
                        <mixed-citation publication-type="journal">Snodderly DM., Kagan I., Gur M. Selective activation of visual cortex neurons by fixational eye movements: implications for neural coding. Visual Neuroscience 2001; 18(2): 259-277. https://doi.org/10.1017/S0952523801182118</mixed-citation>
                    </ref>
                                    <ref id="ref57">
                        <label>57</label>
                        <mixed-citation publication-type="journal">Sugano Y., Matsushita Y., Sato Y. Appearance-based gaze estimation using visual saliency. IEEE Transactions on Pattern Analysis and Machine Intelligence 2013; 35(2): 329-341. https://doi.org/10.1109/TPAMI.2012.101</mixed-citation>
                    </ref>
                                    <ref id="ref58">
                        <label>58</label>
                        <mixed-citation publication-type="journal">Sugano Y., Matsushita Y., Sato Y. Learning-by-synthesis for appearance-based 3d gaze estimation. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition 2014; 1821-1828.</mixed-citation>
                    </ref>
                                    <ref id="ref59">
                        <label>59</label>
                        <mixed-citation publication-type="journal">Taba IB. Improving eye-gaze tracking accuracy through personalized calibration of a user’s aspherical corneal model. University of British Columbia. PhD Thesis. 2012; 122. https://doi.org/10.14288/1.0072558</mixed-citation>
                    </ref>
                                    <ref id="ref60">
                        <label>60</label>
                        <mixed-citation publication-type="journal">Tan KH., Kriegman DJ., Ahuja N. Appearance-based eye gaze estimation. Sixth IEEE Workshop on Applications of Computer Vision 2002; 191-195. https://doi.org/10.1109/ACV.2002.1182180</mixed-citation>
                    </ref>
                                    <ref id="ref61">
                        <label>61</label>
                        <mixed-citation publication-type="journal">Tinker MA. Eye movements in reading. The Journal of Educational Research 1936; 30(4): 241-277. https://www.jstor.org/stable/27526226</mixed-citation>
                    </ref>
                                    <ref id="ref62">
                        <label>62</label>
                        <mixed-citation publication-type="journal">Torricelli D., Conforto S., Schmid M., D’Alessio T. A neural-based remote eye gaze tracker under natural head motion. Computer Methods and Programs in Biomedicine 2008; 92(1): 66-78. https://doi.org/10.1016/j.cmpb.2008.06.008</mixed-citation>
                    </ref>
                                    <ref id="ref63">
                        <label>63</label>
                        <mixed-citation publication-type="journal">Viola P., Jones MJ. Robust real-time face detection. International Journal of Computer Vision 2004; 57(2): 137-154. https://doi.org/10.1023/B:VISI.0000013087.49260.fb</mixed-citation>
                    </ref>
                                    <ref id="ref64">
                        <label>64</label>
                        <mixed-citation publication-type="journal">Xu H., Zhang J., Sun H., Qi M., Kong J. Analyzing students’ attention by gaze tracking and object detection in classroom teaching. Data Technologies and Applications 2023; 57(5): 643-667. https://doi.org/10.1108/DTA-09-2021-0236/FULL/PDF</mixed-citation>
                    </ref>
                                    <ref id="ref65">
                        <label>65</label>
                        <mixed-citation publication-type="journal">Xu X., Chen J., Li C., Fu C., Yang L., Yan Y., Lyu Z. Robust gaze point estimation for metaverse with common mode features suppression network. IEEE Transactions on Consumer Electronics 2024; 70(1): 2090-2098. https://doi.org/10.1109/TCE.2024.3351190</mixed-citation>
                    </ref>
                                    <ref id="ref66">
                        <label>66</label>
                        <mixed-citation publication-type="journal">Yang XH., Sun JD., Liu J., Li XC., Yang CX., Liu W. A remote gaze tracking system using gray-distribution-based video processing. Biomedical Engineering: Applications, Basis and Communications 2012; 24(03): 217-227. https://doi.org/10.4015/S1016237212500044</mixed-citation>
                    </ref>
                                    <ref id="ref67">
                        <label>67</label>
                        <mixed-citation publication-type="journal">Yecan E., Çaǧiltay K. Cognitive styles and students’ interaction with an instructional website: tracing users through eye-gaze. In Sixth IEEE International Conference on Advanced Learning Technologies 2006; 340-342. https://doi.org/10.1109/ICALT.2006.1652438</mixed-citation>
                    </ref>
                                    <ref id="ref68">
                        <label>68</label>
                        <mixed-citation publication-type="journal">Zhang X., Park S., Beeler T., Bradley D., Tang S., Hilliges O. ETH-XGaze: A large-scale dataset for gaze estimation under extreme head pose and gaze variation. arXiv 2020; 365-381. https://doi.org/10.48550/arXiv.2007.15837</mixed-citation>
                    </ref>
                                    <ref id="ref69">
                        <label>69</label>
                        <mixed-citation publication-type="journal">Zhang X., Sugano Y., Fritz M., Bulling A. Appearance-based gaze estimation in the wild.  In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition 2015; 4511-4520.</mixed-citation>
                    </ref>
                                    <ref id="ref70">
                        <label>70</label>
                        <mixed-citation publication-type="journal">Zhang X., Sugano Y., Fritz M., Bulling A. It’s written all over your face: full-face appearance-based gaze estimation. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition Workshops 2017; 51-60.</mixed-citation>
                    </ref>
                                    <ref id="ref71">
                        <label>71</label>
                        <mixed-citation publication-type="journal">Zhang X., Sugano Y., Fritz M., Bulling A. MPIIGaze: real-world dataset and deep appearance-based gaze estimation. IEEE Transactions on Pattern Analysis and Machine Intelligence 2019; 41(1): 162-175. https://doi.org/10.1109/TPAMI.2017.2778103.</mixed-citation>
                    </ref>
                            </ref-list>
                    </back>
    </article>
