<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.4 20241031//EN"
        "https://jats.nlm.nih.gov/publishing/1.4/JATS-journalpublishing1-4.dtd">
<article  article-type="research-article"        dtd-version="1.4">
            <front>

                <journal-meta>
                                                                <journal-id>tjst</journal-id>
            <journal-title-group>
                                                                                    <journal-title>Turkish Journal of Science and Technology</journal-title>
            </journal-title-group>
                            <issn pub-type="ppub">1308-9080</issn>
                                        <issn pub-type="epub">1308-9099</issn>
                                                                                            <publisher>
                    <publisher-name>Fırat University</publisher-name>
                </publisher>
                    </journal-meta>
                <article-meta>
                                        <article-id pub-id-type="doi">10.55525/tjst.1777247</article-id>
                                                                <article-categories>
                                            <subj-group  xml:lang="en">
                                                            <subject>Pattern Recognition</subject>
                                                            <subject>Artificial Intelligence (Other)</subject>
                                                    </subj-group>
                                            <subj-group  xml:lang="tr">
                                                            <subject>Örüntü Tanıma</subject>
                                                            <subject>Yapay Zeka (Diğer)</subject>
                                                    </subj-group>
                                    </article-categories>
                                                                                                                                                        <title-group>
                                                                                                                        <article-title>Armed Activity Recognition using Pose-Based Features with Machine Learning and Deep Learning</article-title>
                                                                                                                                                                                                <trans-title-group xml:lang="tr">
                                    <trans-title>Makine Öğrenmesi ve Derin Öğrenme Kullanarak Duruş Tabanlı Özniteliklerle Silahlı Aktivite Tanıma</trans-title>
                                </trans-title-group>
                                                                                                    </title-group>
            
                                                    <contrib-group content-type="authors">
                                                                        <contrib contrib-type="author">
                                                                    <contrib-id contrib-id-type="orcid">
                                        https://orcid.org/0000-0001-7883-3131</contrib-id>
                                                                <name>
                                    <surname>Altan</surname>
                                    <given-names>Gökhan</given-names>
                                </name>
                                                                    <aff>ISKENDERUN TECHNICAL UNIVERSITY</aff>
                                                            </contrib>
                                                    <contrib contrib-type="author">
                                                                    <contrib-id contrib-id-type="orcid">
                                        https://orcid.org/0009-0006-7510-4500</contrib-id>
                                                                <name>
                                    <surname>Daşcı</surname>
                                    <given-names>Ramazan</given-names>
                                </name>
                                                                    <aff>ISKENDERUN TECHNICAL UNIVERSITY, FACULTY OF ENGINEERING AND NATURAL SCIENCES, DEPARTMENT OF COMPUTER ENGINEERING, DEPARTMENT OF COMPUTER ENGINEERING</aff>
                                                            </contrib>
                                                                                </contrib-group>
                        
                                        <pub-date pub-type="pub" iso-8601-date="20260330">
                    <day>03</day>
                    <month>30</month>
                    <year>2026</year>
                </pub-date>
                                        <volume>21</volume>
                                        <issue>1</issue>
                                        <fpage>207</fpage>
                                        <lpage>218</lpage>
                        
                        <history>
                                    <date date-type="received" iso-8601-date="20250907">
                        <day>09</day>
                        <month>07</month>
                        <year>2025</year>
                    </date>
                                                    <date date-type="accepted" iso-8601-date="20260305">
                        <day>03</day>
                        <month>05</month>
                        <year>2026</year>
                    </date>
                            </history>
                                        <permissions>
                    <copyright-statement>Copyright © 2009, Turkish Journal of Science and Technology</copyright-statement>
                    <copyright-year>2009</copyright-year>
                    <copyright-holder>Turkish Journal of Science and Technology</copyright-holder>
                </permissions>
            
                                                                                                <abstract><p>Automatic detection of &quot;armed&quot; activities commonly focuses on the detection of weapons, with the limitations of variety in shapes, types and colours of weapons. This study aims to perform AI-based identification of armed and unarmed activities using postural landmark features of the human body, without requiring weapon visibility. We conducted a comparative analysis on the postures by MediaPipe and YOLOv8-pose frameworks and their impact on the performance of conventional machine learning and deep learning classifiers. Posture landmarks were extracted separately using both frameworks on a total number of 3,866 images (1,934 armed and 1,932 unarmed activities).  Conventional machine learning algorithms and Long Short-Term Memory (LSTM) algorithms were trained without data augmentation on GridSearchCV and the early stopping mechanism. The experimental results showed that the YOLOv8-pose-based posture feature landmarks provide higher armed pose activity classification performance than the MediaPipe-based landmarks. The optimized SVM trained on the YOLOv8-pose feature set achieved the highest test accuracy rate of 94.2%, whereas the most successful deep learning model, the YOLOv8-pose-based Bi-LSTM, reached an accuracy rate of 93.9%. Consequently, the findings demonstrate that feature set responsibility can outweigh model complexity of sequence learning-based algorithms in &quot;armed pose&quot; detection for even a lightweight SVM algorithm.</p></abstract>
                                                                                                                                    <trans-abstract xml:lang="tr">
                            <p>&quot;Silahlı&quot; faaliyetlerin otomatik belirlenmesi genellikle silahların tespitine odaklanır; ancak bu, silahların şekil, tür ve renk çeşitliliğine bağlı sınırlılıklara sahiptir. Bu çalışma, silah görünürlüğüne gerek duymadan, insan vücudunun duruş referans noktalarını kullanarak silahlı ve silahsız faaliyetlerin yapay zeka tabanlı tanımlanmasını amaçlamaktadır. MediaPipe ve YOLOv8-pose kütüphaneleri ile elde edilen duruşlar ve bunların geleneksel makine öğrenimi ve derin öğrenme sınıflandırıcılarının performansına etkileri üzerinde karşılaştırmalı bir analiz gerçekleştirdik. Duruş referans noktaları, toplam 3.866 görüntü (1.934 silahlı ve 1.932 silahsız faaliyet) üzerinde her iki kütüphane kullanılarak ayrı ayrı çıkarıldı. Geleneksel makine öğrenimi algoritmaları ve Uzun Kısa Süreli Bellek (LSTM) algoritmaları, GridSearchCV ve erken durdurma mekanizması kullanılarak veri artırma yapılmadan eğitildi. Deneysel sonuçlar, YOLOv8-pose tabanlı duruş özelliği referans noktalarının, MediaPipe tabanlı referans noktalarına göre daha yüksek silahlı duruş faaliyeti sınıflandırma performansı sağladığını göstermiştir. Optimize edilmiş SVM, YOLOv8-poz özellik kümesi üzerinde eğitilerek %94,2 ile en yüksek test doğruluk oranına ulaşırken, en başarılı derin öğrenme modeli olan YOLOv8-poz tabanlı Bi-LSTM, %93,9 doğruluk oranına ulaşmıştır. Sonuç olarak, bulgular, hafif bir SVM algoritması için bile &quot;silahlı poz&quot; tespitinde öznitelik kümesi sorumluluğunun, dizi öğrenmeye dayalı algoritmaların model karmaşıklığından daha önemli olabileceğini göstermektedir.</p></trans-abstract>
                                                            
            
                                                            <kwd-group>
                                                    <kwd>Armed Pose Detection</kwd>
                                                    <kwd>  deep learning</kwd>
                                                    <kwd>  gesture recognition</kwd>
                                                    <kwd>  machine learning</kwd>
                                            </kwd-group>
                                                        
                                                                            <kwd-group xml:lang="tr">
                                                    <kwd>Silahlı Poz Tespiti</kwd>
                                                    <kwd>  derin öğrenme</kwd>
                                                    <kwd>  duruş tanıma</kwd>
                                                    <kwd>  makine öğrenmesi</kwd>
                                            </kwd-group>
                                                                                                        <funding-group specific-use="FundRef">
                    <award-group>
                                                    <funding-source>
                                <named-content content-type="funder_name">Iskenderun Technical University</named-content>
                            </funding-source>
                                                                    </award-group>
                </funding-group>
                                </article-meta>
    </front>
    <back>
                            <ref-list>
                                    <ref id="ref1">
                        <label>1</label>
                        <mixed-citation publication-type="journal">Abruzzo B, Carey K, Lowrance C, Sturzinger E, Arnold R, Korpela C. Cascaded neural networks for identification and posture-based threat assessment of armed people. In:  IEEE 2019 Int Symp Technol Homel Secur (HST); 05-06 November 2019; Woburn, MA, USA: IEEE. pp.1-7.</mixed-citation>
                    </ref>
                                    <ref id="ref2">
                        <label>2</label>
                        <mixed-citation publication-type="journal">Salido J, Lomas V, Ruiz-Santaquiteria J, Deniz O. Automatic handgun detection with deep learning in video surveillance images. Appl Sci 2021;11(13):6085.</mixed-citation>
                    </ref>
                                    <ref id="ref3">
                        <label>3</label>
                        <mixed-citation publication-type="journal">Ruiz-Santaquiteria J, Velasco-Mata A, Vallez N, Bueno G, Alvarez-Garcia JA, Deniz O. Handgun detection using combined human pose and weapon appearance. IEEE Access 2021;9:123815-123826.</mixed-citation>
                    </ref>
                                    <ref id="ref4">
                        <label>4</label>
                        <mixed-citation publication-type="journal">Lamas A, Tabik S, Montes AC, Pérez-Hernández F, Garcia J, Olmos R, Herrera F. Human pose estimation for mitigating false negatives in weapon detection in video-surveillance. Neurocomputing 2022;489:488-503.</mixed-citation>
                    </ref>
                                    <ref id="ref5">
                        <label>5</label>
                        <mixed-citation publication-type="journal">Amado-Garfias AJ, Conant-Pablos SE, Ortiz-Bayliss JC, Terashima-Marín H. Improving armed people detection on video surveillance through heuristics and machine learning models. IEEE Access 2024;12:20543-20556.</mixed-citation>
                    </ref>
                                    <ref id="ref6">
                        <label>6</label>
                        <mixed-citation publication-type="journal">Singh H, Deniz O, Ruiz-Santaquiteria J, Muñoz JD, Bueno G. DeepGun: Deep feature-driven one-class classifier for firearm detection using visual gun features and human body pose estimation. Appl Sci 2025;15(11):5830.</mixed-citation>
                    </ref>
                                    <ref id="ref7">
                        <label>7</label>
                        <mixed-citation publication-type="journal">Ruiz-Santaquiteria J, Velasco-Mata A, Vallez N, Deniz O, Bueno G. Improving handgun detection through a combination of visual features and body pose-based data. Pattern Recognit 2023;136:109252.</mixed-citation>
                    </ref>
                                    <ref id="ref8">
                        <label>8</label>
                        <mixed-citation publication-type="journal">Hussain S, Siddiqui HUR, Saleem AA, Raza MA, Alemany-Iturriaga J, Velarde-Sotres Á, et al. Smart Physiotherapy: Advancing arm-based exercise classification with PoseNet and ensemble models. Sensors 2024;24(19):6325.</mixed-citation>
                    </ref>
                                    <ref id="ref9">
                        <label>9</label>
                        <mixed-citation publication-type="journal">Sarswat D. Real-time AI driven shooting posture assessment and correction for professional and military training using machine learning, OpenCV, and MediaPipe. In: 2024 4th Int Conf Technol Adv Comput Sci (ICTACS);13-15 November 2024; Tashkent, Uzbekistan. pp.1192-1198.</mixed-citation>
                    </ref>
                                    <ref id="ref10">
                        <label>10</label>
                        <mixed-citation publication-type="journal">Bhatt A, Ganatra A. Explosive weapons and arms detection with singular classification (WARDIC) on novel weapon dataset using deep learning: Enhanced OODA loop. Eng Sci 2022;20(3):252-266.</mixed-citation>
                    </ref>
                                    <ref id="ref11">
                        <label>11</label>
                        <mixed-citation publication-type="journal">Maligireddy AR, Uppula MR, Rastogi N, Parla YR. Gun detection using combined human pose and weapon appearance. arXiv preprint arXiv:2503.12215, 2025.</mixed-citation>
                    </ref>
                                    <ref id="ref12">
                        <label>12</label>
                        <mixed-citation publication-type="journal">Bhatt A, Ganatra A. Weapon operating pose detection and suspicious human activity classification using skeleton graphs. Math Biosci Eng 2023;20(2):2669-2690.</mixed-citation>
                    </ref>
                                    <ref id="ref13">
                        <label>13</label>
                        <mixed-citation publication-type="journal">Li Z, Song X, Chen S, Demachi K. Armed boundary sabotage: A case study of human malicious behaviors identification with computer vision and explainable reasoning methods. Comput Electr Eng 2025;121:109924.</mixed-citation>
                    </ref>
                                    <ref id="ref14">
                        <label>14</label>
                        <mixed-citation publication-type="journal">Widodo YB, Sibuea S, Agustino R. YOLO in Suspicious Human Activity Recognition for Intelligent Environmental Security Systems: A Review. Jurnal Teknologi Informatika dan Komputer, 2026:12(1): 101-119.</mixed-citation>
                    </ref>
                                    <ref id="ref15">
                        <label>15</label>
                        <mixed-citation publication-type="journal">Toshev A, Szegedy C. DeepPose: Human pose estimation via deep neural networks. Proc IEEE Conf Comput Vis Pattern Recognit (CVPR) 2014:1653-1660.</mixed-citation>
                    </ref>
                                    <ref id="ref16">
                        <label>16</label>
                        <mixed-citation publication-type="journal">Rajeti RS, Jyothirmayi T. Object detection using FRCNN with VGG-19. In AIP Conference Proceedings 2026:3345(1): 020257.</mixed-citation>
                    </ref>
                                    <ref id="ref17">
                        <label>17</label>
                        <mixed-citation publication-type="journal">Eneh A, Ochogwu RE, Ebem D. A Framework for Detection and Recognition of Armed Persons Using Convolutional Neural Networks. Scholar J Computational Science, 2025:2(2):64-72.</mixed-citation>
                    </ref>
                                    <ref id="ref18">
                        <label>18</label>
                        <mixed-citation publication-type="journal">Palani D, Kumar MV, Pushpakumari AK, Chowdary AN, Reddy IPK, Chidambaram V. Theft detection with computer vision technique using Yolo-V7 algorithm. AIP Conference Proceedings 2025;3175(1):020003.</mixed-citation>
                    </ref>
                                    <ref id="ref19">
                        <label>19</label>
                        <mixed-citation publication-type="journal">Dphi. Data Sprint 76: Human Action Recognition (HAR) Dataset [Internet]. Kaggle; 2023 [Accessed: Jan 23, 2026]. Available from: https://www.kaggle.com/datasets/meetnagadia/human-action-recognition-har-dataset</mixed-citation>
                    </ref>
                                    <ref id="ref20">
                        <label>20</label>
                        <mixed-citation publication-type="journal">Google. MediaPipe Pose Landmarker task [Internet]. Google for Developers; 2024 [Accessed: Jan 23, 2026]. Available from: https://ai.google.dev/edge/mediapipe/solutions/vision/pose_landmarker</mixed-citation>
                    </ref>
                                    <ref id="ref21">
                        <label>21</label>
                        <mixed-citation publication-type="journal">Ultralytics. Pose estimation with Ultralytics YOLOv8 [Internet]. Ultralytics Blog; 2024 [Accessed: Jan 23, 2026]. Available from: https://www.ultralytics.com/blog/pose-estimation-with-ultralytics-yolov8</mixed-citation>
                    </ref>
                                    <ref id="ref22">
                        <label>22</label>
                        <mixed-citation publication-type="journal">Dong C, Du G. An enhanced real-time human pose estimation method based on modified YOLOv8 framework. Sci Rep 2024;14(1):8012.</mixed-citation>
                    </ref>
                                    <ref id="ref23">
                        <label>23</label>
                        <mixed-citation publication-type="journal">Pedregosa F, Varoquaux G, Gramfort A, Michel V, Thirion B, Grisel O, et al. Scikit-learn: Machine learning in Python. J Mach Learn Res 2011;12:2825-2830.</mixed-citation>
                    </ref>
                                    <ref id="ref24">
                        <label>24</label>
                        <mixed-citation publication-type="journal">Moosaei H, Ketabchi S, Razzaghi M, Tanveer M. Generalized twin support vector machines. Neural Process Lett 2021;53(2):1545-1564.</mixed-citation>
                    </ref>
                                    <ref id="ref25">
                        <label>25</label>
                        <mixed-citation publication-type="journal">Rumelhart DE, Hinton GE, Williams RJ. Learning representations by back-propagating errors. Nature 1986;323(6088):533-536.</mixed-citation>
                    </ref>
                                    <ref id="ref26">
                        <label>26</label>
                        <mixed-citation publication-type="journal">Hochreiter S, Schmidhuber J. Long short-term memory. Neural Comput 1997;9(8):1735-1780.</mixed-citation>
                    </ref>
                                    <ref id="ref27">
                        <label>27</label>
                        <mixed-citation publication-type="journal">Srivastava N, Hinton G, Krizhevsky A, Sutskever I, Salakhutdinov R. Dropout: A simple way to prevent neural networks from overfitting. J Mach Learn Res 2014;15(1):1929-1958.</mixed-citation>
                    </ref>
                                    <ref id="ref28">
                        <label>28</label>
                        <mixed-citation publication-type="journal">Kingma DP, Ba J. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980, 2014.</mixed-citation>
                    </ref>
                                    <ref id="ref29">
                        <label>29</label>
                        <mixed-citation publication-type="journal">Bishop CM, Nasrabadi NM. Pattern Recognition and Machine Learning. New York, NY, USA: Springer, 2006.</mixed-citation>
                    </ref>
                                    <ref id="ref30">
                        <label>30</label>
                        <mixed-citation publication-type="journal">Cortes C, Vapnik V. Support-vector networks. Mach Learn 1995;20:273-297.</mixed-citation>
                    </ref>
                                    <ref id="ref31">
                        <label>31</label>
                        <mixed-citation publication-type="journal">Jo B, Kim S. Comparative analysis of OpenPose, PoseNet, and MoveNet models for pose estimation in mobile devices. Trait Signal 2022;39(1):119-126.</mixed-citation>
                    </ref>
                            </ref-list>
                    </back>
    </article>
