<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "http://dtd.nlm.nih.gov/publishing/2.0/journalpublishing.dtd">
<article xmlns:xlink="http://www.w3.org/1999/xlink" article-type="research-article" dtd-version="2.0">
  <front>
    <journal-meta>
      <journal-id journal-id-type="publisher-id">JMIR</journal-id>
      <journal-id journal-id-type="nlm-ta">J Med Internet Res</journal-id>
      <journal-title>Journal of Medical Internet Research</journal-title>
      <issn pub-type="epub">1438-8871</issn>
      <publisher>
        <publisher-name>JMIR Publications</publisher-name>
        <publisher-loc>Toronto, Canada</publisher-loc>
      </publisher>
    </journal-meta>
    <article-meta>
      <article-id pub-id-type="publisher-id">v26i1e56750</article-id>
      <article-id pub-id-type="pmid">39102676</article-id>
      <article-id pub-id-type="doi">10.2196/56750</article-id>
      <article-categories>
        <subj-group subj-group-type="heading">
          <subject>Original Paper</subject>
        </subj-group>
        <subj-group subj-group-type="article-type">
          <subject>Original Paper</subject>
        </subj-group>
      </article-categories>
      <title-group>
        <article-title>An Effective Deep Learning Framework for Fall Detection: Model Development and Study Design</article-title>
      </title-group>
      <contrib-group>
        <contrib contrib-type="editor">
          <name>
            <surname>Jin</surname>
            <given-names>Qiao</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Alam</surname>
            <given-names>Mohammad Arif Ul</given-names>
          </name>
        </contrib>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Qureshi</surname>
            <given-names>Shahzad Ahmad</given-names>
          </name>
        </contrib>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Okita</surname>
            <given-names>Shusuke</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib id="contrib1" contrib-type="author">
          <name name-style="western">
            <surname>Zhang</surname>
            <given-names>Jinxi</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <xref rid="aff2" ref-type="aff">2</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-8048-6703</ext-link>
        </contrib>
        <contrib id="contrib2" contrib-type="author">
          <name name-style="western">
            <surname>Li</surname>
            <given-names>Zhen</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff3" ref-type="aff">3</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-1432-7472</ext-link>
        </contrib>
        <contrib id="contrib3" contrib-type="author">
          <name name-style="western">
            <surname>Liu</surname>
            <given-names>Yu</given-names>
          </name>
          <degrees>MS</degrees>
          <xref rid="aff4" ref-type="aff">4</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0009-0006-7055-6524</ext-link>
        </contrib>
        <contrib id="contrib4" contrib-type="author">
          <name name-style="western">
            <surname>Li</surname>
            <given-names>Jian</given-names>
          </name>
          <degrees>MS</degrees>
          <xref rid="aff4" ref-type="aff">4</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0009-0000-5175-3158</ext-link>
        </contrib>
        <contrib id="contrib5" contrib-type="author">
          <name name-style="western">
            <surname>Qiu</surname>
            <given-names>Hualong</given-names>
          </name>
          <degrees>MS</degrees>
          <xref rid="aff4" ref-type="aff">4</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0009-0002-0339-5354</ext-link>
        </contrib>
        <contrib id="contrib6" contrib-type="author">
          <name name-style="western">
            <surname>Li</surname>
            <given-names>Mohan</given-names>
          </name>
          <degrees>MS</degrees>
          <xref rid="aff4" ref-type="aff">4</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0009-0009-4262-7593</ext-link>
        </contrib>
        <contrib id="contrib7" contrib-type="author">
          <name name-style="western">
            <surname>Hou</surname>
            <given-names>Guohui</given-names>
          </name>
          <degrees>MS</degrees>
          <xref rid="aff5" ref-type="aff">5</xref>
          <xref rid="aff6" ref-type="aff">6</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0001-7744-1064</ext-link>
        </contrib>
        <contrib id="contrib8" contrib-type="author" corresp="yes">
          <name name-style="western">
            <surname>Zhou</surname>
            <given-names>Zhixiong</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff2" ref-type="aff">2</xref>
          <address>
            <institution>Institute of Artificial Intelligence in Sports</institution>
            <institution>Capital University of Physical Education and Sports</institution>
            <addr-line>11 North Third Ring West Road</addr-line>
            <addr-line>Haidian District</addr-line>
            <addr-line>Beijing, 100191</addr-line>
            <country>China</country>
            <phone>86 13552505679</phone>
            <email>zhouzhixiong@cupes.edu.cn</email>
          </address>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0003-3369-1429</ext-link>
        </contrib>
      </contrib-group>
      <aff id="aff1">
        <label>1</label>
        <institution>Beijing Kupei Sports Culture Corporation Limited</institution>
        <addr-line>Beijing</addr-line>
        <country>China</country>
      </aff>
      <aff id="aff2">
        <label>2</label>
        <institution>Institute of Artificial Intelligence in Sports</institution>
        <institution>Capital University of Physical Education and Sports</institution>
        <addr-line>Beijing</addr-line>
        <country>China</country>
      </aff>
      <aff id="aff3">
        <label>3</label>
        <institution>School of Physical Education and Sport Science</institution>
        <institution>Fujian Normal University</institution>
        <addr-line>Fuzhou</addr-line>
        <country>China</country>
      </aff>
      <aff id="aff4">
        <label>4</label>
        <institution>Institute for Sport Performance and Health Promotion</institution>
        <institution>Capital University of Physical Education and Sports</institution>
        <addr-line>Beijing</addr-line>
        <country>China</country>
      </aff>
      <aff id="aff5">
        <label>5</label>
        <institution>Bioelectronics Center of YZW</institution>
        <addr-line>Shanghai</addr-line>
        <country>China</country>
      </aff>
      <aff id="aff6">
        <label>6</label>
        <institution>Walt Technology Group Co, Ltd</institution>
        <addr-line>Jiaxing</addr-line>
        <country>China</country>
      </aff>
      <author-notes>
        <corresp>Corresponding Author: Zhixiong Zhou <email>zhouzhixiong@cupes.edu.cn</email></corresp>
      </author-notes>
      <pub-date pub-type="collection">
        <year>2024</year>
      </pub-date>
      <pub-date pub-type="epub">
        <day>5</day>
        <month>8</month>
        <year>2024</year>
      </pub-date>
      <volume>26</volume>
      <elocation-id>e56750</elocation-id>
      <history>
        <date date-type="received">
          <day>25</day>
          <month>1</month>
          <year>2024</year>
        </date>
        <date date-type="rev-request">
          <day>14</day>
          <month>5</month>
          <year>2024</year>
        </date>
        <date date-type="rev-recd">
          <day>7</day>
          <month>6</month>
          <year>2024</year>
        </date>
        <date date-type="accepted">
          <day>11</day>
          <month>6</month>
          <year>2024</year>
        </date>
      </history>
      <copyright-statement>©Jinxi Zhang, Zhen Li, Yu Liu, Jian Li, Hualong Qiu, Mohan Li, Guohui Hou, Zhixiong Zhou. Originally published in the Journal of Medical Internet Research (https://www.jmir.org), 05.08.2024.</copyright-statement>
      <copyright-year>2024</copyright-year>
      <license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/">
        <p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (https://creativecommons.org/licenses/by/4.0/), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in the Journal of Medical Internet Research (ISSN 1438-8871), is properly cited. The complete bibliographic information, a link to the original publication on https://www.jmir.org/, as well as this copyright and license information must be included.</p>
      </license>
      <self-uri xlink:href="https://www.jmir.org/2024/1/e56750" xlink:type="simple"/>
      <abstract>
        <sec sec-type="background">
          <title>Background</title>
          <p>Fall detection is of great significance in safeguarding human health. By monitoring the motion data, a fall detection system (FDS) can detect a fall accident. Recently, wearable sensors–based FDSs have become the mainstream of research, which can be categorized into threshold-based FDSs using experience, machine learning–based FDSs using manual feature extraction, and deep learning (DL)–based FDSs using automatic feature extraction. However, most FDSs focus on the global information of sensor data, neglecting the fact that different segments of the data contribute variably to fall detection. This shortcoming makes it challenging for FDSs to accurately distinguish between similar human motion patterns of actual falls and fall-like actions, leading to a decrease in detection accuracy.</p>
        </sec>
        <sec sec-type="objective">
          <title>Objective</title>
          <p>This study aims to develop and validate a DL framework to accurately detect falls using acceleration and gyroscope data from wearable sensors. We aim to explore the essential contributing features extracted from sensor data to distinguish falls from activities of daily life. The significance of this study lies in reforming the FDS by designing a weighted feature representation using DL methods to effectively differentiate between fall events and fall-like activities.</p>
        </sec>
        <sec sec-type="methods">
          <title>Methods</title>
          <p>Based on the 3-axis acceleration and gyroscope data, we proposed a new DL architecture, the dual-stream convolutional neural network self-attention (DSCS) model. Unlike previous studies, the used architecture can extract global feature information from acceleration and gyroscope data. Additionally, we incorporated a self-attention module to assign different weights to the original feature vector, enabling the model to learn the contribution effect of the sensor data and enhance classification accuracy. The proposed model was trained and tested on 2 public data sets: SisFall and MobiFall. In addition, 10 participants were recruited to carry out practical validation of the DSCS model. A total of 1700 trials were performed to test the generalization ability of the model.</p>
        </sec>
        <sec sec-type="results">
          <title>Results</title>
          <p>The fall detection accuracy of the DSCS model was 99.32% (recall=99.15%; precision=98.58%) and 99.65% (recall=100%; precision=98.39%) on the test sets of SisFall and MobiFall, respectively. In the ablation experiment, we compared the DSCS model with state-of-the-art machine learning and DL models. On the SisFall data set, the DSCS model achieved the second-best accuracy; on the MobiFall data set, the DSCS model achieved the best accuracy, recall, and precision. In practical validation, the accuracy of the DSCS model was 96.41% (recall=95.12%; specificity=97.55%).</p>
        </sec>
        <sec sec-type="conclusions">
          <title>Conclusions</title>
          <p>This study demonstrates that the DSCS model can significantly improve the accuracy of fall detection on 2 publicly available data sets and performs robustly in practical validation.</p>
        </sec>
      </abstract>
      <kwd-group>
        <kwd>fall detection</kwd>
        <kwd>deep learning</kwd>
        <kwd>self-attention</kwd>
        <kwd>accelerometer</kwd>
        <kwd>gyroscope</kwd>
        <kwd>human health</kwd>
        <kwd>wearable sensors</kwd>
        <kwd>Sisfall</kwd>
        <kwd>MobiFall</kwd>
      </kwd-group>
    </article-meta>
  </front>
  <body>
    <sec sec-type="introduction">
      <title>Introduction</title>
      <p>According to the World Health Organization, falls rank as the second leading cause of accidental injury-related deaths on a global scale [<xref ref-type="bibr" rid="ref1">1</xref>]. Statistics from the Centers for Disease Control and Prevention [<xref ref-type="bibr" rid="ref2">2</xref>] reveal that a significant proportion, at least a quarter, of US residents aged 65 years and older experience a fall annually. Among the older adult population, accidental falls have been the second leading cause of mortality and injury [<xref ref-type="bibr" rid="ref3">3</xref>]. To prevent falls from causing severe subsequent harm to individuals, it is essential to develop an accurate and efficient fall recognition system, to identify falls and raise the alarm [<xref ref-type="bibr" rid="ref4">4</xref>]. In the existing research, 2 main categories of systems have emerged: fall prediction systems [<xref ref-type="bibr" rid="ref5">5</xref>-<xref ref-type="bibr" rid="ref7">7</xref>] and fall detection systems (FDSs) [<xref ref-type="bibr" rid="ref8">8</xref>-<xref ref-type="bibr" rid="ref21">21</xref>].</p>
      <p>Although fall prediction systems can assist users in proactively preventing potential falls, the associated installation and maintenance costs can be prohibitively high, thereby limiting its widespread application. In contrast, FDSs can promptly issue alerts upon detecting a fall event, ensuring swift assistance for the individual who has experienced the fall. In recent years, extensive research has been conducted on FDSs and related solutions, categorized as follows: (1) vision-based FDSs [<xref ref-type="bibr" rid="ref8">8</xref>,<xref ref-type="bibr" rid="ref9">9</xref>], which monitor and analyze a person’s movements and postures using cameras or imaging devices to detect falls; (2) ambient device–based FDSs [<xref ref-type="bibr" rid="ref10">10</xref>-<xref ref-type="bibr" rid="ref12">12</xref>], which use environmental sensors such as Wi-Fi or radar signals to track an individual’s movement data within their living space and detect falls; and (3) wearable sensors–based FDSs [<xref ref-type="bibr" rid="ref13">13</xref>-<xref ref-type="bibr" rid="ref21">21</xref>], which use sensors attached to the body to monitor a user’s movements and postures to detect falls. Among these solutions, wearable sensor–based fall detection technology has garnered significant attention because of its affordability and nonintrusive characteristics [<xref ref-type="bibr" rid="ref21">21</xref>]. Wearable devices, such as inertial measurement units [<xref ref-type="bibr" rid="ref13">13</xref>,<xref ref-type="bibr" rid="ref14">14</xref>], smartwatches [<xref ref-type="bibr" rid="ref15">15</xref>-<xref ref-type="bibr" rid="ref17">17</xref>], and smartphones [<xref ref-type="bibr" rid="ref18">18</xref>-<xref ref-type="bibr" rid="ref20">20</xref>], use high-precision sensors to collect motion data seamlessly. These devices have been widely applied in fall detection and safety monitoring [<xref ref-type="bibr" rid="ref22">22</xref>].</p>
      <p>In wearable sensor–based FDSs, the design of fall detection algorithms is essential. Presently, algorithms used for fall detection can be classified into 3 categories: threshold-based models, machine learning–based models, and deep learning (DL) models. Threshold-based models and machine learning–based models necessitate the extraction of distinctive features from data sets containing fall incidents. In essence, it primarily entails capturing features such as the intensity of activities (eg, magnitude, energy) and variations in the intensity of activities (eg, frequency and SD) from the input data. Threshold-based models discern falls by comparing feature values against predefined thresholds, while machine learning models, such as k-nearest neighbors (KNN) [<xref ref-type="bibr" rid="ref23">23</xref>], support vector machines (SVM) [<xref ref-type="bibr" rid="ref24">24</xref>], and decision trees (DT) [<xref ref-type="bibr" rid="ref25">25</xref>], categorize falls and activities of daily life (ADL) based on the handcrafted features. In contrast, DL models, such as convolutional neural networks (CNN) [<xref ref-type="bibr" rid="ref26">26</xref>] and long short-term memory (LSTM) [<xref ref-type="bibr" rid="ref27">27</xref>,<xref ref-type="bibr" rid="ref28">28</xref>], automatically extract high-level features and scrutinize the temporal characteristics of the data for fall detection.</p>
      <p>However, the aforementioned models solely use acceleration data as the input for their algorithms. A previous work [<xref ref-type="bibr" rid="ref29">29</xref>] has demonstrated that relying exclusively on acceleration data is inadequate for effectively distinguishing falls from ADL. This insufficiency is primarily due to the sensitivity of acceleration data to sensor placement and its inability to capture spatial rotation information. Consequently, researchers advocate for the integration of acceleration data with gyroscope data to provide a more comprehensive understanding of body movements, which can significantly enhance the accuracy of fall detection [<xref ref-type="bibr" rid="ref30">30</xref>,<xref ref-type="bibr" rid="ref31">31</xref>].</p>
      <p>In this case, Hussain et al [<xref ref-type="bibr" rid="ref32">32</xref>], Son et al [<xref ref-type="bibr" rid="ref33">33</xref>], Liu et al [<xref ref-type="bibr" rid="ref34">34</xref>], and Koo et al [<xref ref-type="bibr" rid="ref35">35</xref>] use acceleration data with gyroscope data as the input to the model. Hussain et al [<xref ref-type="bibr" rid="ref32">32</xref>] and Son et al [<xref ref-type="bibr" rid="ref33">33</xref>] designed fall detection algorithms using KNN and SVM, both of which require manual feature extraction. Additionally, Liu et al [<xref ref-type="bibr" rid="ref34">34</xref>] developed a CNN-LSTM–based FDS. However, using CNN, LSTM, or their combination fails to evaluate the significance of each component of the feature vector, thereby hindering the differentiation between actual falls and fall-like activities. Moreover, Koo et al [<xref ref-type="bibr" rid="ref35">35</xref>] proposed a dual-stream–based algorithm for human activity recognition. While this model effectively differentiates between various daily activities, it struggles with the classification of falls and fall-like activities, which exhibit similar data trends.</p>
      <p>To address the above challenges, we incorporate the self-attention (SA) mechanism [<xref ref-type="bibr" rid="ref36">36</xref>] after the CNN module and propose a dual-stream CNN-SA (DSCS) model for fall detection. The SA mechanism has been widely applied in classification tasks such as sleep apnea [<xref ref-type="bibr" rid="ref37">37</xref>] and skeleton point–based human activity recognition [<xref ref-type="bibr" rid="ref38">38</xref>]. However, it has not been applied in FDSs. The SA mechanisms empower FDS models to allocate varying weights to different segments of the input data or extracted features.</p>
      <p>Compared with existing methods that use manually generated features, the proposed method can automatically extract features using a dual-stream architecture. Specifically, the DSCS model uses acceleration data along with gyroscope data as the input, and then the 2-stream data will pass through a 3-layer CNN to extract discriminable features. Unlike the models using CNN, LSTM, or their combinations, CNN-SA excels at effectively capturing long-term dependencies within input data, enabling the assignment of diverse weights to features from different phases of the fall process. This, in turn, aids the model in achieving an enhanced understanding of the context and pertinent information associated with fall behavior, thereby elevating the model’s capability for contextual modeling.</p>
      <p>In this paper, we introduce an accurate and embeddable DSCS model for fall detection. Considering that the handcrafted features rely on expert knowledge and may not yield satisfactory generalization performance, we design a DL framework. In this framework, both accelerometer and gyroscope data are used as input for the model. Then, we introduce a feature-generating method grounded in the CNN-SA architecture. Here, CNN is used to extract features from dual-stream data and capture spatial patterns, to preliminarily identify the pattern differences between fall and ADLs. The feature vector extracted by CNN subsequently passes through an SA layer, which assigns weights to accelerometer and gyroscope features. Finally, the predicted label (fall or ADL) is output by the classification module.</p>
      <p>The main contributions of this study are as follows: first, we introduce an accurate and embeddable dual-stream model, DSCS, for fall detection using wearable sensors. The DSCS model comprises a feature extraction module, an SA module, and a classification module. Second, the SA mechanism is applied in the task of fall detection, which assigns different weights to the feature vectors, learning the contribution effect of the sensor data, thereby effectively enhancing classification accuracy. Third, we validate the performance of the DSCS model using publicly available data sets and practical validation. Our model outperforms state-of-the-art machine learning and DL models and demonstrates excellent generalization performance.</p>
      <p>The rest of this paper is organized as follows: the <italic>Methods</italic> section illustrates the proposed framework DSCS framework. The <italic>Results</italic> section presents the performance comparison with state-of-the-art algorithms and practical validation. The <italic>Discussion</italic> section covers the results, performance gap, limitations of this study, and conclusions.</p>
    </sec>
    <sec sec-type="methods">
      <title>Methods</title>
      <sec>
        <title>Data Sets for Fall Detection</title>
        <sec>
          <title>Overview</title>
          <p>In this paper, we validated the performance of the DSCS model using 2 publicly available data sets: SisFall [<xref ref-type="bibr" rid="ref39">39</xref>] and MobiFall [<xref ref-type="bibr" rid="ref40">40</xref>]. The 2 data sets consist of the accelerometer and gyroscope data recorded during fall and ADL trials of multiple participants.</p>
        </sec>
        <sec>
          <title>SisFall Data Set</title>
          <p>The SisFall data set collects accelerometer and gyroscope data during human motion, with a sampling frequency of 200 Hz. This data set comprises data on falls and ADLs collected from 38 volunteers, including 23 young individuals (younger than 30 years) and 15 older individuals (60 years and older). It encompasses 15 falls (such as fall forward, fall backward, and lateral fall) and 19 ADLs (eg, walking, jogging, and jumping).</p>
        </sec>
        <sec>
          <title>MobiFall Data Set</title>
          <p>The MobiFall data set collects accelerometer, gyroscope, and orientation data from smartphones’ built-in inertial measurement units during human motion. The accelerometer has a sampling frequency of 87 Hz, while the gyroscope and orientation sensor have a sampling frequency of 100 Hz. This data set is contributed by 24 volunteers and contains 4 distinct fall activities: fall forward, fall forward with knees on the ground, fall lateral, and fall backward onto a chair. Additionally, it contains 9 various daily activities (eg, standing, walking, and jogging).</p>
        </sec>
        <sec>
          <title>Data Processing</title>
          <p>The processing of accelerometer and gyroscope data consisted of 2 steps: data filtering and data segmentation.</p>
          <list list-type="bullet">
            <list-item>
              <p>Data filtering: Considering the susceptibility of sensor data to external environmental noise when worn on the body, we implemented the filtering on the accelerometer and gyroscope data. The filtering process enhances the stability and accuracy of the resulting signals. Since human movements often occur at a frequency of around 20 Hz, a low-pass filter with a cutoff frequency of 20 Hz was used to filter the data [<xref ref-type="bibr" rid="ref41">41</xref>].</p>
            </list-item>
            <list-item>
              <p>Data segmentation: A complete fall event typically lasts 8-12 seconds, encompassing 4 phases: prefall, falling, impact, and postfall. To match the sample length in the SisFall and MobiFall data sets, we used the time windows of 12 and 8 seconds for the data segmentation of the SisFall and MobiFall data sets, respectively. Then, we further reduced the sampling rate to 50 Hz to alleviate computational complexity.</p>
            </list-item>
          </list>
        </sec>
      </sec>
      <sec>
        <title>DL Framework</title>
        <sec>
          <title>Overview</title>
          <p>After data processing, we obtained 1798 fall segments and 4117 ADL segments from the SisFall data set and 288 fall segments and 1137 ADL segments from the MobiFall data set. As input to the model, each segment from the SisFall data set has a length of 12 seconds (600 sampling points), while each segment from the MobiFall data set has a length of 8 seconds (400 sampling points). After passing through a DL architecture, each segment produces a predicted label (fall or ADL).</p>
        </sec>
        <sec>
          <title>Network Architecture</title>
          <p>The deep neural network architecture proposed in this paper is illustrated in <xref rid="figure1" ref-type="fig">Figure 1</xref>, which mainly consists of 3 modules: feature extraction module, SA module, and classification module.</p>
          <fig id="figure1" position="float">
            <label>Figure 1</label>
            <caption>
              <p>The architecture of the proposed dual-stream convolutional neural network self-attention model. ADL: activities of daily life; BN: batch normalization; BS: batch size; FC: fully connected; LoS: length of segment.</p>
            </caption>
            <graphic xlink:href="jmir_v26i1e56750_fig1.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
          </fig>
        </sec>
        <sec>
          <title>Feature Extraction Module</title>
          <p>To extract features, we designed the same architecture for acceleration and gyroscope data stream: a 3-layer CNN to transform the 3-axis data into feature representations. The size of the input data is <italic>N × LoS × 3</italic>, where <italic>N</italic> is the batch size and <italic>LoS</italic> is the length of a segment. The first part of the 3-layer comprises a 1D convolution with 64 filters, a kernel size of 3, and a stride of 1. The outputs of the first 2 layers are activated using a rectified linear unit (ReLU) and followed by a max-pooling layer with a pooling size of 2. The output of the third convolution layer is directly fed into a global pooling layer, which generates a 64D global feature vector for each element in the batch.</p>
        </sec>
        <sec>
          <title>SA Module</title>
          <p>The extracted feature representations from the acceleration and gyroscope data are further refined using the SA module. This module enables the acceleration or gyroscope stream network to efficiently discover the local information highly relevant to discriminating falls and ADL, by capturing the attention weights of local feature information. Specifically, during each training iteration, a total of <italic>N</italic> vectors is fed into the SA module. Within the SA module, these vectors are processed using query, key, and value matrices, each with dimensions of 64×64. Ultimately, a weighted feature vector is generated for each sample in the batch.</p>
        </sec>
        <sec>
          <title>Classification Module</title>
          <p>The weighted acceleration or gyroscope vectors, which have been processed by the SA module, are concatenated to a 128D vector and fed into the classification module. This module is responsible for mapping the concatenated vector to the corresponding prediction label. Specifically, the 128D vector is first processed through a 128D batch normalization (BN) layer, followed by a dropout layer with a dropout probability of 0.5. Finally, the vector is input into a fully connected layer, which comprises a 256D hidden layer and a softmax output layer.</p>
          <p>Next, we will provide an in-depth illustration of the architecture and functionality of the 3 modules.</p>
        </sec>
        <sec>
          <title>Feature Extraction Module</title>
          <p>Following Koo et al [<xref ref-type="bibr" rid="ref35">35</xref>], we design a feature extraction module for accelerometer and gyroscope data streams. This module comprises 3 consecutive encoders, with each encoder combining CNNs and pooling layers.</p>
          <p>For acceleration or gyroscope stream, the input data are initially processed by the first encoder, where a 1D CNN extracts shallow-level feature representations from the input data. Subsequently, a max-pooling layer is applied to reduce data dimensions and emphasize key features.</p>
          <p>In the second encoder, a CNN further extracts features in a local context, enabling the capture of more intricate patterns and relationships within the time series data. Similarly, a max-pooling layer is used to reduce feature map dimensions and highlight critical features.</p>
          <p>The third encoder is dedicated to global feature abstraction, allowing it to capture overarching patterns and essential information across the entire time series, rather than only local features. A global pooling layer enables pooling across the entire feature map, producing a global feature vector that encapsulates summarized information from the entire data sequence.</p>
          <p>Through the hierarchical processing by the 3 encoders, the DSCS model can extract both local and global features from the original 3D accelerometer or gyroscope data, thereby enhancing the model’s prediction performance and generalization capabilities.</p>
        </sec>
        <sec>
          <title>SA Module</title>
          <p>The feature vectors generated by the feature extraction module pass through a SA module. This module uses an attention mechanism to dynamically adjust the weights across different positions of the 1D feature vectors, capturing the interdependencies and correlations between different positions.</p>
          <p>The detailed process of generating a weighted feature vector is illustrated in <xref rid="figure2" ref-type="fig">Figure 2</xref>. Specifically, the SA module constructs query, key, and value vectors by multiplying the input feature vectors with respective weight matrices. The module calculates the dot product between the query and key vectors and then applies the softmax function to determine the relationships between the feature values at each position and those at other positions. The result of the dot product represents the strength of their relationships, commonly referred to as “attention”. Finally, the weighted feature representations for each position are obtained by applying the corresponding attention weights to the value vectors. In the SA mechanism, this weighting operation helps describe the correlations and associations between each position and the others, enabling the model to focus on different positions within the information.</p>
          <fig id="figure2" position="float">
            <label>Figure 2</label>
            <caption>
              <p>The process of generating weighted feature vectors from original feature vectors.</p>
            </caption>
            <graphic xlink:href="jmir_v26i1e56750_fig2.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
          </fig>
        </sec>
        <sec>
          <title>Classification Module</title>
          <p>The classification module is used to transform the input-weighted feature vectors into the final prediction labels. Specifically, each vector first passes through the BN module. The main function of the BN module is to normalize input data, accelerate model training, and improve the model’s adaptability to different features. Next, the dropout layer is deployed, which randomly sets the output of a portion of neurons to 0, to reduce the risk of overfitting and improve the model’s generalization ability. After BN and dropout, the feature vectors pass through a fully connected layer. Finally, the output of the fully connected layer passes through the softmax layer and generates the probability distribution of fall or ADL labels. The final predicted label generated by the model corresponds to the category with the highest probability.</p>
        </sec>
        <sec>
          <title>Training Parameters</title>
          <p>For each data set, we divided the participants in the data set into training and testing groups with a ratio of 0.8 and 0.2, respectively. The proposed model was deployed in Python (version 3.8.10; Python Software Foundation) using the DL library of <italic>PyTorch</italic> (version 1.7.0). The training process was carried out on a computer server equipped with an Intel Xeon Gold 6330 CPU at 2.00 GHz and an NVIDIA GeForce RTX 3090. We used the cross-entropy loss function and used the Adam optimizer for optimizing the network parameters. In the training phase, we used a maximum of 300 iterations with an initial learning rate set at 0.001, gradually decreasing as the training progressed. The batch size was configured at 128 for optimal model refinement. The hyperparameter selection method for training the model is a random search. Parameters are randomly selected from the hyperparameter space, such as batch size values of 32, 64, and 128, and learning rate values of 0.001, 0.005, and 0.01. By exploring various combinations of batch sizes and learning rates, the optimal hyperparameters are identified based on the performance metrics. This approach ensures that the most effective combination is chosen to achieve the best model performance.</p>
        </sec>
      </sec>
      <sec>
        <title>Performance Evaluation Metrics</title>
        <p>To evaluate the fall detection performance of the proposed DSCS model, we used accuracy, recall, precision, and <italic>F</italic><sub>1</sub>-score as the evaluation metrics. Accuracy is the ratio of the number of segments that are correctly predicted (including falls and ADL) to the total number of segments, which is used to measure the overall performance of the model. Recall is the ratio of the number of correctly predicted fall segments to the total number of fall segments, and high recall means that the model has a strong ability to detect falls. Precision is the ratio of the number of correctly predicted fall segments to the total number of predicted fall segments. High precision means that the model can reduce the number of false alarms and accurately distinguish falls from fall-like activities. <italic>F</italic><sub>1</sub>-score is calculated based on recall and precision: <italic>F<sub>1</sub>-score = 2 × precision × recall / (precision + recall)</italic>. In practical validation, we also used specificity to evaluate the model’s performance in accurately distinguishing falls from ADLs, including fall-like activities. Specificity is defined as the ratio of correctly predicted ADL segments to the total number of ADL segments.</p>
      </sec>
      <sec>
        <title>Ethical Considerations</title>
        <p>The studies involving human participants were reviewed and approved by the Ethics Committee of the Capital University of Physical Education and Sports, Beijing, China (approval number 2023A036). The participants provided their written informed consent to participate in this study.</p>
      </sec>
    </sec>
    <sec sec-type="results">
      <title>Results</title>
      <sec>
        <title>Detection Performance</title>
        <p>The performance of the proposed DSCS model was evaluated using the publicly available SisFall and MobiFall data sets. The model demonstrated robust performance on both data sets, achieving an accuracy of 99.32% (recall=99.15%; precision=98.58%; <italic>F</italic><sub>1</sub>-score=98.86%) on the testing sets of SisFall and an accuracy of 99.65% (recall=100%; precision=98.39%; <italic>F</italic><sub>1</sub>-score=99.19%) on the testing sets of MobiFall.</p>
        <p><xref rid="figure3" ref-type="fig">Figure 3</xref> illustrates the confusion matrices for the DSCS model across 2 test sets: SisFall and MobiFall. Notably, out of 1183 test samples from the SisFall data set, only 8 samples were misclassified (5 false positives and 3 false negatives). Similarly, among the 285 test samples from the MobiFall data set, only 1 sample was misclassified (1 false positive). The result shows that the model can achieve high classification accuracy on both data sets.</p>
        <fig id="figure3" position="float">
          <label>Figure 3</label>
          <caption>
            <p>Detection performance of the proposed DSCS model: (A) confusion matrix of SisFall test set and (B) confusion matrix of MobiFall test set. ADL: activities of daily life; DSCS: dual-stream convolutional neural network self-attention.</p>
          </caption>
          <graphic xlink:href="jmir_v26i1e56750_fig3.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
      </sec>
      <sec>
        <title>Cross-Validation</title>
        <p>To further validate the generalization ability and reliability of the proposed model, we conducted cross-validation to thoroughly assess its performance. The results in <xref ref-type="table" rid="table1">Tables 1</xref> and <xref ref-type="table" rid="table2">2</xref> indicate that the proposed model maintains stable performance on both the SisFall and MobiFall data sets. Specifically, the average <italic>F</italic><sub>1</sub>-scores for 5-fold cross-validation were 99.09% and 98.69%, respectively, while for 10-fold cross-validation, the average <italic>F</italic><sub>1</sub>-scores were 99.03% and 98.67%, respectively.</p>
        <table-wrap position="float" id="table1">
          <label>Table 1</label>
          <caption>
            <p>F1-score of the proposed dual-stream convolutional neural network self-attention model under 5-fold cross-validation.</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="150"/>
            <col width="150"/>
            <col width="140"/>
            <col width="140"/>
            <col width="140"/>
            <col width="140"/>
            <col width="140"/>
            <thead>
              <tr valign="top">
                <td>Data set</td>
                <td>Fold 1 (%)</td>
                <td>Fold 2 (%)</td>
                <td>Fold 3 (%)</td>
                <td>Fold 4 (%)</td>
                <td>Fold 5 (%)</td>
                <td>Average (%)</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td>SisFall</td>
                <td>99.29</td>
                <td>99.01</td>
                <td>99.57</td>
                <td>98.86</td>
                <td>98.72</td>
                <td>99.09</td>
              </tr>
              <tr valign="top">
                <td>MobiFall</td>
                <td>99.19</td>
                <td>98.36</td>
                <td>98.33</td>
                <td>98.39</td>
                <td>99.17</td>
                <td>98.69</td>
              </tr>
            </tbody>
          </table>
        </table-wrap>
        <table-wrap position="float" id="table2">
          <label>Table 2</label>
          <caption>
            <p>F1-score of the proposed dual-stream convolutional neural network self-attention model under 10-fold cross-validation.</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="130"/>
            <col width="80"/>
            <col width="80"/>
            <col width="80"/>
            <col width="80"/>
            <col width="80"/>
            <col width="80"/>
            <col width="80"/>
            <col width="80"/>
            <col width="80"/>
            <col width="80"/>
            <col width="70"/>
            <thead>
              <tr valign="top">
                <td>Data set</td>
                <td>Fold 1 (%)</td>
                <td>Fold 2 (%)</td>
                <td>Fold 3 (%)</td>
                <td>Fold 4 (%)</td>
                <td>Fold5 (%)</td>
                <td>Fold 6 (%)</td>
                <td>Fold 7 (%)</td>
                <td>Fold 8 (%)</td>
                <td>Fold9 (%)</td>
                <td>Fold 10 (%)</td>
                <td>Average (%)</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td>SisFall</td>
                <td>99.15</td>
                <td>99.43</td>
                <td>99.43</td>
                <td>99.44</td>
                <td>99.72</td>
                <td>99.15</td>
                <td>99.43</td>
                <td>98.86</td>
                <td>98.58</td>
                <td>99.15</td>
                <td>99.03</td>
              </tr>
              <tr valign="top">
                <td>MobiFall</td>
                <td>98.36</td>
                <td>100</td>
                <td>98.31</td>
                <td>98.31</td>
                <td>100</td>
                <td>98.36</td>
                <td>96.67</td>
                <td>100</td>
                <td>98.36</td>
                <td>98.31</td>
                <td>98.67</td>
              </tr>
            </tbody>
          </table>
        </table-wrap>
      </sec>
      <sec>
        <title>Comparison With State-of-the-Art Algorithms</title>
        <p>We compared our proposed DSCS model with state-of-the-art fall detection models. Specifically, machine learning models—including KNN [<xref ref-type="bibr" rid="ref32">32</xref>], SVM [<xref ref-type="bibr" rid="ref33">33</xref>], DT [<xref ref-type="bibr" rid="ref25">25</xref>]—and DL models—such as CNN [<xref ref-type="bibr" rid="ref26">26</xref>], LSTM [<xref ref-type="bibr" rid="ref27">27</xref>], CNN-LSTM [<xref ref-type="bibr" rid="ref34">34</xref>], contrastive accelerometer-gyroscope embedding [<xref ref-type="bibr" rid="ref35">35</xref>], few-shot transfer learning [<xref ref-type="bibr" rid="ref42">42</xref>], and deep convolutional LSTM (DeepConvLSTM) [<xref ref-type="bibr" rid="ref43">43</xref>]—were used as benchmark models. The results in <xref ref-type="table" rid="table3">Table 3</xref> demonstrate the superior performance of the DSCS model. On the SisFall data set, the DSCS model achieved the second-best performance, only slightly behind KNN, which, however, required manual feature extraction. On the MobiFall data set, the DSCS model outperformed all state-of-the-art machine learning and DL models in terms of accuracy, recall, precision, and <italic>F</italic><sub>1</sub>-score.</p>
        <table-wrap position="float" id="table3">
          <label>Table 3</label>
          <caption>
            <p>Performance comparison of the proposed DSCS<sup>a</sup> model with state-of-the-art fall detection algorithms.</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="30"/>
            <col width="30"/>
            <col width="420"/>
            <col width="0"/>
            <col width="130"/>
            <col width="0"/>
            <col width="130"/>
            <col width="0"/>
            <col width="130"/>
            <col width="0"/>
            <col width="130"/>
            <thead>
              <tr valign="top">
                <td colspan="4">Data set, algorithms, and corresponding models</td>
                <td colspan="2">Accuracy (%)</td>
                <td colspan="2">Recall (%)</td>
                <td colspan="2">Precision (%)</td>
                <td><italic>F</italic><sub>1</sub>-score (%)</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td colspan="11">
                  <bold>SisFall</bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td colspan="10">
                  <bold>ML<sup>b</sup></bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>DT<sup>c</sup> [<xref ref-type="bibr" rid="ref25">25</xref>]</td>
                <td colspan="2">97.97</td>
                <td colspan="2">96.30</td>
                <td colspan="2">96.85</td>
                <td colspan="2">96.57</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>KNN<sup>d</sup> [<xref ref-type="bibr" rid="ref32">32</xref>]</td>
                <td colspan="2">99.41</td>
                <td colspan="2">99.43</td>
                <td colspan="2">98.59</td>
                <td colspan="2">99.01</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>SVM<sup>e</sup> [<xref ref-type="bibr" rid="ref33">33</xref>]</td>
                <td colspan="2">99.24</td>
                <td colspan="2">98.86</td>
                <td colspan="2">98.58</td>
                <td colspan="2">98.72</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td colspan="10">
                  <bold>DL<sup>f</sup></bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>CNN<sup>g</sup> [<xref ref-type="bibr" rid="ref26">26</xref>]</td>
                <td colspan="2">97.97</td>
                <td colspan="2">97.44</td>
                <td colspan="2">95.80</td>
                <td colspan="2">96.61</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>LSTM<sup>h</sup> [<xref ref-type="bibr" rid="ref27">27</xref>]</td>
                <td colspan="2">97.80</td>
                <td colspan="2">96.01</td>
                <td colspan="2">96.56</td>
                <td colspan="2">96.29</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>CNN-LSTM [<xref ref-type="bibr" rid="ref34">34</xref>]</td>
                <td colspan="2">98.73</td>
                <td colspan="2">97.15</td>
                <td colspan="2">98.55</td>
                <td colspan="2">97.85</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>CAGE<sup>i</sup> [<xref ref-type="bibr" rid="ref35">35</xref>]</td>
                <td colspan="2">98.90</td>
                <td colspan="2">98.29</td>
                <td colspan="2">98.01</td>
                <td colspan="2">98.15</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>FSTL<sup>j</sup> [<xref ref-type="bibr" rid="ref42">42</xref>]</td>
                <td colspan="2">98.65</td>
                <td colspan="2">97.44</td>
                <td colspan="2">97.99</td>
                <td colspan="2">97.71</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>DeepConvLSTM<sup>k</sup> [<xref ref-type="bibr" rid="ref43">43</xref>]</td>
                <td colspan="2">98.99</td>
                <td colspan="2">98.58</td>
                <td colspan="2">98.02</td>
                <td colspan="2">98.30</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>DSCS</td>
                <td colspan="2">99.32</td>
                <td colspan="2">99.15</td>
                <td colspan="2">98.58</td>
                <td colspan="2">98.86</td>
              </tr>
              <tr valign="top">
                <td colspan="11">
                  <bold>MobiFall</bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td colspan="10">
                  <bold>ML</bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>DT [<xref ref-type="bibr" rid="ref25">25</xref>]</td>
                <td colspan="2">94.74</td>
                <td colspan="2">95.08</td>
                <td colspan="2">82.86</td>
                <td colspan="2">88.55</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>KNN [<xref ref-type="bibr" rid="ref32">32</xref>]</td>
                <td colspan="2">98.60</td>
                <td colspan="2">96.72</td>
                <td colspan="2">96.72</td>
                <td colspan="2">96.72</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>SVM [<xref ref-type="bibr" rid="ref33">33</xref>]</td>
                <td colspan="2">96.14</td>
                <td colspan="2">90.16</td>
                <td colspan="2">91.67</td>
                <td colspan="2">90.91</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td colspan="10">
                  <bold>DL</bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>CNN [<xref ref-type="bibr" rid="ref26">26</xref>]</td>
                <td colspan="2">95.79</td>
                <td colspan="2">88.52</td>
                <td colspan="2">91.53</td>
                <td colspan="2">90.00</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>LSTM [<xref ref-type="bibr" rid="ref27">27</xref>]</td>
                <td colspan="2">94.74</td>
                <td colspan="2">85.25</td>
                <td colspan="2">89.66</td>
                <td colspan="2">87.39</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>CNN-LSTM [<xref ref-type="bibr" rid="ref34">34</xref>]</td>
                <td colspan="2">98.60</td>
                <td colspan="2">96.72</td>
                <td colspan="2">96.72</td>
                <td colspan="2">96.72</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>CAGE [<xref ref-type="bibr" rid="ref35">35</xref>]</td>
                <td colspan="2">98.95</td>
                <td colspan="2">98.36</td>
                <td colspan="2">96.77</td>
                <td colspan="2">97.56</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>FSTL [<xref ref-type="bibr" rid="ref42">42</xref>]</td>
                <td colspan="2">97.19</td>
                <td colspan="2">95.08</td>
                <td colspan="2">92.06</td>
                <td colspan="2">93.55</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>DeepConvLSTM [<xref ref-type="bibr" rid="ref43">43</xref>]</td>
                <td colspan="2">98.25</td>
                <td colspan="2">95.08</td>
                <td colspan="2">96.67</td>
                <td colspan="2">95.87</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>DSCS</td>
                <td colspan="2">99.65</td>
                <td colspan="2">100</td>
                <td colspan="2">98.39</td>
                <td colspan="2">99.19</td>
              </tr>
            </tbody>
          </table>
          <table-wrap-foot>
            <fn id="table3fn1">
              <p><sup>a</sup>DSCS: dual-stream convolutional neural network self-attention.</p>
            </fn>
            <fn id="table3fn2">
              <p><sup>b</sup>ML: machine learning.</p>
            </fn>
            <fn id="table3fn3">
              <p><sup>c</sup>DT: decision tree.</p>
            </fn>
            <fn id="table3fn4">
              <p><sup>d</sup>KNN: k-nearest neighbors.</p>
            </fn>
            <fn id="table3fn5">
              <p><sup>e</sup>SVM: support vector machine.</p>
            </fn>
            <fn id="table3fn6">
              <p><sup>f</sup>DL: deep learning.</p>
            </fn>
            <fn id="table3fn7">
              <p><sup>g</sup>CNN: convolutional neural network.</p>
            </fn>
            <fn id="table3fn8">
              <p><sup>h</sup>LSTM: long short-term memory.</p>
            </fn>
            <fn id="table3fn9">
              <p><sup>i</sup>CAGE: contrastive accelerometer-gyroscope embedding.</p>
            </fn>
            <fn id="table3fn10">
              <p><sup>j</sup>FSTL: few-shot transfer learning.</p>
            </fn>
            <fn id="table3fn11">
              <p><sup>k</sup>DeepConvLSTM: deep convolutional LSTM.</p>
            </fn>
          </table-wrap-foot>
        </table-wrap>
      </sec>
      <sec>
        <title>Effect of Low Sampling Rate</title>
        <p>To validate the performance of the DSCS model under low sampling rates, we reduced the sampling rate of acceleration and gyroscope data to 10 Hz and 5 Hz and tested the performance of the DSCS model. <xref ref-type="table" rid="table4">Table 4</xref> presents the overall performance comparison of the DSCS model under the original sampling rate of 50 Hz and the reduced sampling rates of 10 Hz and 5 Hz. The results show that the accuracy decreased under a sampling rate of 10 Hz and further declined under a sampling rate of 5 Hz. These findings indicate that the variation in sampling rates significantly impacts the accuracy of fall detection.</p>
        <table-wrap position="float" id="table4">
          <label>Table 4</label>
          <caption>
            <p>Performance comparison of the proposed DSCS<sup>a</sup> model under different sampling rates.</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="30"/>
            <col width="240"/>
            <col width="190"/>
            <col width="160"/>
            <col width="190"/>
            <col width="190"/>
            <thead>
              <tr valign="top">
                <td colspan="2">Data set and model</td>
                <td>Accuracy (%)</td>
                <td>Recall (%)</td>
                <td>Precision (%)</td>
                <td><italic>F</italic><sub>1</sub>-score (%)</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td colspan="6">
                  <bold>SisFall</bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>DSCS</td>
                <td>99.32</td>
                <td>99.15</td>
                <td>98.58</td>
                <td>98.86</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>DSCS (10 Hz)</td>
                <td>97.63</td>
                <td>96.58</td>
                <td>95.49</td>
                <td>96.03</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>DSCS (5 Hz)</td>
                <td>95.60</td>
                <td>92.88</td>
                <td>92.35</td>
                <td>92.61</td>
              </tr>
              <tr valign="top">
                <td colspan="6">
                  <bold>MobiFall</bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>DSCS</td>
                <td>99.65</td>
                <td>100</td>
                <td>98.39</td>
                <td>99.19</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>DSCS (10 Hz)</td>
                <td>96.49</td>
                <td>95.08</td>
                <td>89.23</td>
                <td>92.06</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>DSCS (5 Hz)</td>
                <td>94.04</td>
                <td>86.89</td>
                <td>85.48</td>
                <td>86.18</td>
              </tr>
            </tbody>
          </table>
          <table-wrap-foot>
            <fn id="table4fn1">
              <p><sup>a</sup>DSCS: dual-stream convolutional neural network self-attention.</p>
            </fn>
          </table-wrap-foot>
        </table-wrap>
      </sec>
      <sec>
        <title>Effect of SA Mechanism</title>
        <p>To further explore the data distribution of feature vectors before and after passing through the SA module, we used the t-distributed stochastic neighbor embedding (t-SNE) algorithm for visualizing the test sets from SisFall and MobiFall. The t-SNE algorithm uses nonlinear dimensionality reduction to map the original 64D feature vectors to a more intuitive 2D space while preserving the similarity relationships among data points. The resulting visualization of data distribution through t-SNE allows us to assess the clustering and dispersion of different data categories (fall or ADL) in the 2D space, providing a better understanding and comparison of the separation between fall and ADL. As shown in <xref rid="figure4" ref-type="fig">Figure 4</xref>, after passing through the SA module, the feature vectors of different categories became more dispersed. This illustrates why the proposed DSCS algorithm performs better in fall detection and further confirms the effectiveness of the introduced SA mechanism.</p>
        <fig id="figure4" position="float">
          <label>Figure 4</label>
          <caption>
            <p>t-SNE plot of (A) SisFall: before SA, (B) SisFall: after SA, (C) MobiFall: before SA, and (D) MobiFall: after SA. ADL: activities of daily life; SA: self-attention; t-SNE: t-distributed stochastic neighbor embedding.</p>
          </caption>
          <graphic xlink:href="jmir_v26i1e56750_fig4.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
      </sec>
      <sec>
        <title>Practical Validation of DSCS</title>
        <p>Beyond publicly available data sets, we conducted practical validation on the DSCS model, to assess its generalizability to new data and new users. Specifically, we embedded the DSCS model onto a smartwatch (Huawei Watch 3, Huawei Tech Co, Ltd) equipped with accelerometers and gyroscopes and developed a fall detection alert application. The sensors in the smartwatch operated at a sampling rate of 50 Hz. As depicted in <xref rid="figure5" ref-type="fig">Figure 5</xref>A, participants wore smartwatches during experiments. Every 0.5 seconds, the data from the past 8 seconds (400 sampling points) was fed into the fall detection algorithm. When the algorithm detected a fall, a fall alert page, as shown in <xref rid="figure5" ref-type="fig">Figure 5</xref>B, was triggered.</p>
        <fig id="figure5" position="float">
          <label>Figure 5</label>
          <caption>
            <p>Illustration of practical validation details. (A) Each participant wore a watch. (B) Watch alarm page: prompting detection of fall behavior.</p>
          </caption>
          <graphic xlink:href="jmir_v26i1e56750_fig5.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <p>We recruited 10 healthy students from the Capital University of Physical Education and Sports, Beijing, China as participants. The demographics of the participants are detailed in <xref ref-type="table" rid="table5">Table 5</xref>. Each participant wore a smartwatch and performed 8 fall activities and 9 fall-like activities. The eight fall activities were as follows: (1) fall forward, (2) fall backward, (3) fall to the left, (4) fall to the right, (5) fall from chairs, (6) fall while walking, (7) fall while running, and (8) fall while riding. The nine fall-like activities were as follows: (1) walking, (2) jogging, (3) sprinting, (4) standing and then sitting heavily in a chair, (5) bending down to tie shoelaces, (6) stretching and dropping hands, (7) long jump, (8) descending stairs, and (9) free fall on a trampoline. <xref rid="figure6" ref-type="fig">Figure 6</xref> depicts participants wearing a smartwatch while performing fall activities during the validation process.</p>
        <table-wrap position="float" id="table5">
          <label>Table 5</label>
          <caption>
            <p>Basic information of the 10 participants.</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="200"/>
            <col width="80"/>
            <col width="80"/>
            <col width="80"/>
            <col width="80"/>
            <col width="80"/>
            <col width="80"/>
            <col width="80"/>
            <col width="80"/>
            <col width="80"/>
            <col width="80"/>
            <thead>
              <tr valign="top">
                <td>Information</td>
                <td colspan="10">Participant</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>1</td>
                <td>2</td>
                <td>3</td>
                <td>4</td>
                <td>5</td>
                <td>6</td>
                <td>7</td>
                <td>8</td>
                <td>9</td>
                <td>10</td>
              </tr>
              <tr valign="top">
                <td>Age (years)</td>
                <td>24</td>
                <td>25</td>
                <td>24</td>
                <td>25</td>
                <td>35</td>
                <td>26</td>
                <td>25</td>
                <td>26</td>
                <td>27</td>
                <td>35</td>
              </tr>
              <tr valign="top">
                <td>Sex</td>
                <td>Female</td>
                <td>Male</td>
                <td>Male</td>
                <td>Female</td>
                <td>Male</td>
                <td>Male</td>
                <td>Male</td>
                <td>Female</td>
                <td>Male</td>
                <td>Male</td>
              </tr>
              <tr valign="top">
                <td>Height (cm)</td>
                <td>170</td>
                <td>175</td>
                <td>183</td>
                <td>168</td>
                <td>177</td>
                <td>180</td>
                <td>179</td>
                <td>168</td>
                <td>178</td>
                <td>182</td>
              </tr>
              <tr valign="top">
                <td>Weight (kg)</td>
                <td>53</td>
                <td>65</td>
                <td>92</td>
                <td>70</td>
                <td>75</td>
                <td>76</td>
                <td>75</td>
                <td>55</td>
                <td>70</td>
                <td>80</td>
              </tr>
            </tbody>
          </table>
        </table-wrap>
        <fig id="figure6" position="float">
          <label>Figure 6</label>
          <caption>
            <p>The 4 typical fall activities performed by the participants.</p>
          </caption>
          <graphic xlink:href="jmir_v26i1e56750_fig6.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <p>Each activity was repeated in 100 trials, resulting in a total of 1700 trials (800 fall trials and 900 fall-like trials). The DSCS model achieved satisfactory performance in practical validation, with accuracy, recall, and specificity of 96.41%, 95.12%, and 97.55%, respectively. <xref ref-type="table" rid="table6">Table 6</xref> illustrates the recall performance for fall activities and the specificity performance for fall-like activities. The experimental results indicate that the DSCS model maintains a robust fall detection performance in practical applications and possesses satisfactory generalization ability.</p>
        <table-wrap position="float" id="table6">
          <label>Table 6</label>
          <caption>
            <p>Performance validation of different falls and fall-like activities of daily life.</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="30"/>
            <col width="580"/>
            <col width="170"/>
            <col width="220"/>
            <thead>
              <tr valign="top">
                <td colspan="2">Performance validation and activity</td>
                <td>Recall (%)</td>
                <td>Specificity (%)</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td colspan="4">
                  <bold>Recall performance</bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Fall forward</td>
                <td>97</td>
                <td>—<sup>a</sup></td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Fall to the left</td>
                <td>96</td>
                <td>—</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Fall to the right</td>
                <td>96</td>
                <td>—</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Fall from chairs</td>
                <td>96</td>
                <td>—</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Fall while running</td>
                <td>96</td>
                <td>—</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Fall while walking</td>
                <td>94</td>
                <td>—</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Fall while riding</td>
                <td>93</td>
                <td>—</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Fall backward</td>
                <td>93</td>
                <td>—</td>
              </tr>
              <tr valign="top">
                <td colspan="4">
                  <bold>Specificity performance</bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Descending stairs</td>
                <td>—</td>
                <td>100</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Stand and then sit heavily in a chair</td>
                <td>—</td>
                <td>100</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Jogging</td>
                <td>—</td>
                <td>100</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Walking</td>
                <td>—</td>
                <td>100</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Bend down to tie shoelaces</td>
                <td>—</td>
                <td>99</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Stretch and drop hands</td>
                <td>—</td>
                <td>97</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Long jump</td>
                <td>—</td>
                <td>95</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Sprinting</td>
                <td>—</td>
                <td>95</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Free fall on a trampoline</td>
                <td>—</td>
                <td>92</td>
              </tr>
            </tbody>
          </table>
          <table-wrap-foot>
            <fn id="table6fn1">
              <p><sup>a</sup>Not applicable.</p>
            </fn>
          </table-wrap-foot>
        </table-wrap>
      </sec>
    </sec>
    <sec sec-type="discussion">
      <title>Discussion</title>
      <sec>
        <title>Principal Findings</title>
        <p>In this paper, we proposed a DSCS model for fall detection. The proposed model not only performs well on public data sets: SisFall and MobiFall, but also excels in practical validation. By comparing the performance of fall recognition results, we found that the dual-stream mechanism can effectively improve the accuracy of classification. At the same time, from the visualization results of data distribution, it can be observed that after passing through the SA module, the distance between fall data and ADL data becomes larger, which is beneficial for accurately distinguishing falls and ADLs.</p>
      </sec>
      <sec>
        <title>Comparison With Prior Work</title>
        <p>Unlike previous studies, this paper integrates the dual-stream data processing architecture and SA module for the first time. Compared with other DL schemes, such as CNN, LSTM, CNN-LSTM, contrastive accelerometer-gyroscope embedding, few-shot transfer learning, and DeepConvLSTM, the proposed scheme in this paper can automatically focus on the features closely related to fall behavior, to achieve better classification performance. In Wang et al [<xref ref-type="bibr" rid="ref44">44</xref>], the attention mechanism is introduced into the fall detection algorithm. However, in the feature extraction stage, it still uses a machine learning algorithm for manual extraction. Comparative research shows that on the SisFall data set, the proposed model achieved the second-best accuracy (only second to KNN); on the MobiFall data set, the proposed model achieved the best accuracy, recall, and precision.</p>
      </sec>
      <sec>
        <title>Performance Gap Between Public Data Sets and Practical Validation</title>
        <p>Beyond public data sets, we embedded the proposed model into smartwatches and developed fall detection and alarm software to verify the fall detection algorithm proposed in this paper in practical applications. According to experimental results, we found that the performance of the model in practical validation dropped, but it was still at a good level. Specifically, accuracy degraded to 96.41%, with recall and specificity being 95.12% and 97.55%, respectively. The main reasons for the performance degradation are differences in sensor models and sampling frequencies, differences in participant conditions, and variations in participant movement norms.</p>
      </sec>
      <sec>
        <title>Limitations</title>
        <p>Our research has a few limitations. First, due to practical constraints in data collection, the training and test data sets used in this study may not encompass all possible fall poses, introducing uncertainty regarding the model’s performance in the presence of unique or novel fall scenarios. Second, for safety considerations, the exclusion of older adult data from both public data sets and actual validations during the training and testing of fall detection may compromise the model’s accuracy in handling falls among older adults, who constitute the most vulnerable demographic.</p>
      </sec>
      <sec>
        <title>Conclusions</title>
        <p>In this paper, we present the DSCS model, a DL framework designed for fall detection. The DSCS uses a dual-stream architecture incorporating both acceleration and gyroscope data, followed by a 3-layer CNN, SA mechanism, and classification modules. The DSCS model outperforms state-of-the-art algorithms, achieving detection accuracies of 99.32% and 99.65% on the SisFall and MobiFall data sets, respectively. Furthermore, the model maintains a high practical validation accuracy of 96.41%. These results demonstrate the effectiveness of the CNN-SA architecture for classification tasks. This study also highlights how incorporating SA into FDSs can improve classification accuracy by focusing on critical segments of data that significantly contribute to distinguishing between falls and fall-like activities.</p>
      </sec>
    </sec>
  </body>
  <back>
    <app-group/>
    <glossary>
      <title>Abbreviations</title>
      <def-list>
        <def-item>
          <term id="abb1">ADL</term>
          <def>
            <p>activities of daily life</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb2">BN</term>
          <def>
            <p>batch normalization</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb3">CNN</term>
          <def>
            <p>convolutional neural network</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb4">DeepConvLSTM</term>
          <def>
            <p>deep convolutional LSTM</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb5">DL</term>
          <def>
            <p>deep learning</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb6">DSCS</term>
          <def>
            <p>dual-stream convolutional neural network self-attention</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb7">DT</term>
          <def>
            <p>decision tree</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb8">FDS</term>
          <def>
            <p>fall detection system</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb9">KNN</term>
          <def>
            <p>k-nearest neighbor</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb10">LSTM</term>
          <def>
            <p>long short-term memory</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb11">ReLU</term>
          <def>
            <p>rectified linear unit</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb12">SA</term>
          <def>
            <p>self-attention</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb13">SVM</term>
          <def>
            <p>support vector machine</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb14">t-SNE</term>
          <def>
            <p>t-distributed stochastic neighbor embedding</p>
          </def>
        </def-item>
      </def-list>
    </glossary>
    <ack>
      <p>This work was funded by the National Key Research and Development Program of China (2020YFC2006200) and the Emerging Interdisciplinary Platform for Medicine and Engineering in Sports (EIPMES). This work was also supported by the China Postdoctoral Science Foundation under grant number 2024M750191.</p>
    </ack>
    <fn-group>
      <fn fn-type="conflict">
        <p>None declared.</p>
      </fn>
    </fn-group>
    <ref-list>
      <ref id="ref1">
        <label>1</label>
        <nlm-citation citation-type="web">
          <article-title>Falls</article-title>
          <source>World Health Organization</source>
          <year>2021</year>
          <access-date>2024-06-22</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.who.int/news-room/fact-sheets/detail/falls">https://www.who.int/news-room/fact-sheets/detail/falls</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref2">
        <label>2</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Bergen</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Stevens</surname>
              <given-names>MR</given-names>
            </name>
            <name name-style="western">
              <surname>Burns</surname>
              <given-names>ER</given-names>
            </name>
          </person-group>
          <article-title>Falls and fall injuries among adults aged ≥65 years–United States, 2014</article-title>
          <source>MMWR Morb Mortal Wkly Rep</source>
          <year>2016</year>
          <volume>65</volume>
          <issue>37</issue>
          <fpage>993</fpage>
          <lpage>998</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.15585/mmwr.mm6537a2"/>
          </comment>
          <pub-id pub-id-type="doi">10.15585/mmwr.mm6537a2</pub-id>
          <pub-id pub-id-type="medline">27656914</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref3">
        <label>3</label>
        <nlm-citation citation-type="book">
          <source>Global Report on Aging and Health</source>
          <year>2015</year>
          <publisher-loc>USA</publisher-loc>
          <publisher-name>World Health Organization</publisher-name>
        </nlm-citation>
      </ref>
      <ref id="ref4">
        <label>4</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Huang</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Chao</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Cao</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>L</given-names>
            </name>
          </person-group>
          <article-title>A review of wearable sensors based fall-related recognition systems</article-title>
          <source>Eng Appl Artif Intell</source>
          <year>2023</year>
          <volume>121</volume>
          <fpage>105993</fpage>
          <pub-id pub-id-type="doi">10.1016/j.engappai.2023.105993</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref5">
        <label>5</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ferreira de Sousa</surname>
              <given-names>FAS</given-names>
            </name>
            <name name-style="western">
              <surname>Escriba</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Avina Bravo</surname>
              <given-names>EG</given-names>
            </name>
            <name name-style="western">
              <surname>Brossa</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Fourniols</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Rossi</surname>
              <given-names>C</given-names>
            </name>
          </person-group>
          <article-title>Wearable pre-impact fall detection system based on 3D accelerometer and subject’s height</article-title>
          <source>IEEE Sensors J</source>
          <year>2022</year>
          <volume>22</volume>
          <issue>2</issue>
          <fpage>1738</fpage>
          <lpage>1745</lpage>
          <pub-id pub-id-type="doi">10.1109/jsen.2021.3131037</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref6">
        <label>6</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Jung</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Koo</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Kim</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Kim</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Nam</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Kim</surname>
              <given-names>Y</given-names>
            </name>
          </person-group>
          <article-title>Enhanced algorithm for the detection of preimpact fall for wearable airbags</article-title>
          <source>Sensors</source>
          <year>2020</year>
          <volume>20</volume>
          <issue>5</issue>
          <fpage>1277</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.mdpi.com/resolver?pii=s20051277"/>
          </comment>
          <pub-id pub-id-type="doi">10.3390/s20051277</pub-id>
          <pub-id pub-id-type="medline">32111090</pub-id>
          <pub-id pub-id-type="pii">s20051277</pub-id>
          <pub-id pub-id-type="pmcid">PMC7085770</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref7">
        <label>7</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Jain</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Semwal</surname>
              <given-names>VB</given-names>
            </name>
          </person-group>
          <article-title>A novel feature extraction method for preimpact fall detection system using deep learning and wearable sensors</article-title>
          <source>IEEE Sensors J</source>
          <year>2022</year>
          <volume>22</volume>
          <issue>23</issue>
          <fpage>22943</fpage>
          <lpage>22951</lpage>
          <pub-id pub-id-type="doi">10.1109/jsen.2022.3213814</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref8">
        <label>8</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Zhong</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Ng</surname>
              <given-names>WWY</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Nugent</surname>
              <given-names>CD</given-names>
            </name>
            <name name-style="western">
              <surname>Shewell</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Medina-Quero</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Multi-occupancy fall detection using non-invasive thermal vision sensor</article-title>
          <source>IEEE Sensors J</source>
          <year>2021</year>
          <volume>21</volume>
          <issue>4</issue>
          <fpage>5377</fpage>
          <lpage>5388</lpage>
          <pub-id pub-id-type="doi">10.1109/jsen.2020.3032728</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref9">
        <label>9</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Inturi</surname>
              <given-names>AR</given-names>
            </name>
            <name name-style="western">
              <surname>Manikandan</surname>
              <given-names>VM</given-names>
            </name>
            <name name-style="western">
              <surname>Kumar</surname>
              <given-names>MN</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>Y</given-names>
            </name>
          </person-group>
          <article-title>Synergistic integration of skeletal kinematic features for vision-based fall detection</article-title>
          <source>Sensors</source>
          <year>2023</year>
          <volume>23</volume>
          <issue>14</issue>
          <fpage>6283</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.mdpi.com/resolver?pii=s23146283"/>
          </comment>
          <pub-id pub-id-type="doi">10.3390/s23146283</pub-id>
          <pub-id pub-id-type="medline">37514578</pub-id>
          <pub-id pub-id-type="pii">s23146283</pub-id>
          <pub-id pub-id-type="pmcid">PMC10385725</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref10">
        <label>10</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ding</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>Y</given-names>
            </name>
          </person-group>
          <article-title>A WiFi-based smart home fall detection system using recurrent neural network</article-title>
          <source>IEEE Trans Consumer Electron</source>
          <year>2020</year>
          <volume>66</volume>
          <issue>4</issue>
          <fpage>308</fpage>
          <lpage>317</lpage>
          <pub-id pub-id-type="doi">10.1109/tce.2020.3021398</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref11">
        <label>11</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Sadreazami</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Bolic</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Rajan</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Fall detection using standoff radar-based sensing and deep convolutional neural network</article-title>
          <source>IEEE Trans Circuits Syst II Express Briefs</source>
          <year>2020</year>
          <volume>67</volume>
          <issue>1</issue>
          <fpage>197</fpage>
          <lpage>201</lpage>
          <pub-id pub-id-type="doi">10.1109/tcsii.2019.2904498</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref12">
        <label>12</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Adnan</surname>
              <given-names>SM</given-names>
            </name>
            <name name-style="western">
              <surname>Irtaza</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Aziz</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Ullah</surname>
              <given-names>MO</given-names>
            </name>
            <name name-style="western">
              <surname>Javed</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Mahmood</surname>
              <given-names>MT</given-names>
            </name>
          </person-group>
          <article-title>Fall detection through acoustic local ternary patterns</article-title>
          <source>Appl Acoust</source>
          <year>2018</year>
          <volume>140</volume>
          <fpage>296</fpage>
          <lpage>300</lpage>
          <pub-id pub-id-type="doi">10.1016/j.apacoust.2018.06.013</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref13">
        <label>13</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ahn</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Kim</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Koo</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Kim</surname>
              <given-names>Y</given-names>
            </name>
          </person-group>
          <article-title>Evaluation of inertial sensor–based pre-impact fall detection algorithms using public dataset</article-title>
          <source>Sensors</source>
          <year>2019</year>
          <volume>19</volume>
          <issue>4</issue>
          <fpage>774</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.mdpi.com/resolver?pii=s19040774"/>
          </comment>
          <pub-id pub-id-type="doi">10.3390/s19040774</pub-id>
          <pub-id pub-id-type="medline">30781886</pub-id>
          <pub-id pub-id-type="pii">s19040774</pub-id>
          <pub-id pub-id-type="pmcid">PMC6412321</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref14">
        <label>14</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Siwadamrongpong</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Chinrungrueng</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Hasegawa</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Nantajeewarawat</surname>
              <given-names>E</given-names>
            </name>
          </person-group>
          <article-title>Fall detection and prediction based on IMU and EMG sensors for elders</article-title>
          <year>2022</year>
          <month>06</month>
          <conf-name>19th International Joint Conference on Computer Science and Software Engineering (JCSSE)</conf-name>
          <conf-date>June 22-25, 2022</conf-date>
          <conf-loc>Bangkok, Thailand</conf-loc>
          <pub-id pub-id-type="doi">10.1109/jcsse54890.2022.9836284</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref15">
        <label>15</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ngu</surname>
              <given-names>AH</given-names>
            </name>
            <name name-style="western">
              <surname>Metsis</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Coyne</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Srinivas</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Salad</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Mahmud</surname>
              <given-names>U</given-names>
            </name>
            <name name-style="western">
              <surname>Chee</surname>
              <given-names>KH</given-names>
            </name>
          </person-group>
          <article-title>Personalized watch-based fall detection using a collaborative edge-cloud framework</article-title>
          <source>Int J Neural Syst</source>
          <year>2022</year>
          <volume>32</volume>
          <issue>12</issue>
          <fpage>2250048</fpage>
          <pub-id pub-id-type="doi">10.1142/S0129065722500484</pub-id>
          <pub-id pub-id-type="medline">35972790</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref16">
        <label>16</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Tian</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>N</given-names>
            </name>
          </person-group>
          <article-title>Intelligent fall detection method based on accelerometer data from a wrist-worn smart watch</article-title>
          <source>Measurement</source>
          <year>2019</year>
          <volume>140</volume>
          <fpage>215</fpage>
          <lpage>226</lpage>
          <pub-id pub-id-type="doi">10.1016/j.measurement.2019.03.079</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref17">
        <label>17</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Şengül</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Karakaya</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Misra</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Abayomi-Alli</surname>
              <given-names>OO</given-names>
            </name>
            <name name-style="western">
              <surname>Damaševičius</surname>
              <given-names>R</given-names>
            </name>
          </person-group>
          <article-title>Deep learning based fall detection using smartwatches for healthcare applications</article-title>
          <source>Biomed Signal Process Control</source>
          <year>2022</year>
          <volume>71</volume>
          <fpage>103242</fpage>
          <pub-id pub-id-type="doi">10.1016/j.bspc.2021.103242</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref18">
        <label>18</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Shawen</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Lonini</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Mummidisetty</surname>
              <given-names>CK</given-names>
            </name>
            <name name-style="western">
              <surname>Shparii</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Albert</surname>
              <given-names>MV</given-names>
            </name>
            <name name-style="western">
              <surname>Kording</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Jayaraman</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Addendum of: fall detection in individuals with lower limb amputations using mobile phones: machine learning enhances robustness for real-world applications</article-title>
          <source>JMIR Mhealth Uhealth</source>
          <year>2017</year>
          <volume>5</volume>
          <issue>12</issue>
          <fpage>e167</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://mhealth.jmir.org/2017/12/e167/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/mhealth.9177</pub-id>
          <pub-id pub-id-type="medline">29261509</pub-id>
          <pub-id pub-id-type="pii">v5i12e167</pub-id>
          <pub-id pub-id-type="pmcid">PMC5737617</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref19">
        <label>19</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Shahzad</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Kim</surname>
              <given-names>K</given-names>
            </name>
          </person-group>
          <article-title>FallDroid: an automated smart-phone-based fall detection system using multiple kernel learning</article-title>
          <source>IEEE Trans Ind Inf</source>
          <year>2019</year>
          <volume>15</volume>
          <issue>1</issue>
          <fpage>35</fpage>
          <lpage>44</lpage>
          <pub-id pub-id-type="doi">10.1109/tii.2018.2839749</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref20">
        <label>20</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kerdegari</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Mokaram</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Samsudin</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Ramli</surname>
              <given-names>AR</given-names>
            </name>
          </person-group>
          <article-title>A pervasive neural network based fall detection system on smart phone</article-title>
          <source>J Ambient Intell Smart Environ</source>
          <year>2015</year>
          <volume>7</volume>
          <issue>2</issue>
          <fpage>221</fpage>
          <lpage>230</lpage>
          <pub-id pub-id-type="doi">10.3233/ais-150306</pub-id>
          <pub-id pub-id-type="pii">ADR249002</pub-id>
          <pub-id pub-id-type="pmcid">PMC11091731</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref21">
        <label>21</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Karar</surname>
              <given-names>ME</given-names>
            </name>
            <name name-style="western">
              <surname>Shehata</surname>
              <given-names>HI</given-names>
            </name>
            <name name-style="western">
              <surname>Reyad</surname>
              <given-names>O</given-names>
            </name>
          </person-group>
          <article-title>A survey of IoT-based fall detection for aiding elderly care: sensors, methods, challenges and future trends</article-title>
          <source>Appl Sci</source>
          <year>2022</year>
          <volume>12</volume>
          <issue>7</issue>
          <fpage>3276</fpage>
          <pub-id pub-id-type="doi">10.3390/app12073276</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref22">
        <label>22</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ahmad Qureshi</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Hussain</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Rafique</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Sohail</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Aman</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Rahat Abbas</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Basit</surname>
              <given-names>MA</given-names>
            </name>
            <name name-style="western">
              <surname>Khalid</surname>
              <given-names>MI</given-names>
            </name>
          </person-group>
          <article-title>EML-PSP: a novel ensemble machine learning-based physical security paradigm using cross-domain ultra-fused feature extraction with hybrid data augmentation scheme</article-title>
          <source>Expert Syst Appl</source>
          <year>2024</year>
          <volume>243</volume>
          <fpage>122863</fpage>
          <pub-id pub-id-type="doi">10.1016/j.eswa.2023.122863</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref23">
        <label>23</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Saadeh</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Butt</surname>
              <given-names>SA</given-names>
            </name>
            <name name-style="western">
              <surname>Altaf</surname>
              <given-names>MAB</given-names>
            </name>
          </person-group>
          <article-title>A patient-specific single sensor IoT-based wearable fall prediction and detection system</article-title>
          <source>IEEE Trans Neural Syst Rehabil Eng</source>
          <year>2019</year>
          <volume>27</volume>
          <issue>5</issue>
          <fpage>995</fpage>
          <lpage>1003</lpage>
          <pub-id pub-id-type="doi">10.1109/TNSRE.2019.2911602</pub-id>
          <pub-id pub-id-type="medline">30998473</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref24">
        <label>24</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Alizadeh</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Bogdan</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Classen</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Fricke</surname>
              <given-names>C</given-names>
            </name>
          </person-group>
          <article-title>Support vector machine classifiers show high generalizability in automatic fall detection in older adults</article-title>
          <source>Sensors</source>
          <year>2021</year>
          <volume>21</volume>
          <issue>21</issue>
          <fpage>7166</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.mdpi.com/resolver?pii=s21217166"/>
          </comment>
          <pub-id pub-id-type="doi">10.3390/s21217166</pub-id>
          <pub-id pub-id-type="medline">34770473</pub-id>
          <pub-id pub-id-type="pii">s21217166</pub-id>
          <pub-id pub-id-type="pmcid">PMC8588363</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref25">
        <label>25</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Nahian</surname>
              <given-names>MJA</given-names>
            </name>
            <name name-style="western">
              <surname>Ghosh</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Banna</surname>
              <given-names>MHA</given-names>
            </name>
            <name name-style="western">
              <surname>Aseeri</surname>
              <given-names>MA</given-names>
            </name>
            <name name-style="western">
              <surname>Uddin</surname>
              <given-names>MN</given-names>
            </name>
            <name name-style="western">
              <surname>Ahmed</surname>
              <given-names>MR</given-names>
            </name>
            <name name-style="western">
              <surname>Mahmud</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Kaiser</surname>
              <given-names>MS</given-names>
            </name>
          </person-group>
          <article-title>Towards an accelerometer-based elderly fall detection system using cross-disciplinary time series features</article-title>
          <source>IEEE Access</source>
          <year>2021</year>
          <volume>9</volume>
          <fpage>39413</fpage>
          <lpage>39431</lpage>
          <pub-id pub-id-type="doi">10.1109/access.2021.3056441</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref26">
        <label>26</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Casilari</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Lora-Rivera</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>García-Lagos</surname>
              <given-names>F</given-names>
            </name>
          </person-group>
          <article-title>A study on the application of convolutional neural networks to fall detection evaluated with multiple public datasets</article-title>
          <source>Sensors</source>
          <year>2020</year>
          <volume>20</volume>
          <issue>5</issue>
          <fpage>1466</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.mdpi.com/resolver?pii=s20051466"/>
          </comment>
          <pub-id pub-id-type="doi">10.3390/s20051466</pub-id>
          <pub-id pub-id-type="medline">32155936</pub-id>
          <pub-id pub-id-type="pii">s20051466</pub-id>
          <pub-id pub-id-type="pmcid">PMC7085732</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref27">
        <label>27</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Luna-Perejón</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Domínguez-Morales</surname>
              <given-names>MJ</given-names>
            </name>
            <name name-style="western">
              <surname>Civit-Balcells</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Wearable fall detector using recurrent neural networks</article-title>
          <source>Sensors</source>
          <year>2019</year>
          <volume>19</volume>
          <issue>22</issue>
          <fpage>4885</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.mdpi.com/resolver?pii=s19224885"/>
          </comment>
          <pub-id pub-id-type="doi">10.3390/s19224885</pub-id>
          <pub-id pub-id-type="medline">31717442</pub-id>
          <pub-id pub-id-type="pii">s19224885</pub-id>
          <pub-id pub-id-type="pmcid">PMC6891713</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref28">
        <label>28</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Lin</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Dai</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Xia</surname>
              <given-names>X</given-names>
            </name>
          </person-group>
          <article-title>Efficient fall detection in four directions based on smart insoles and RDAE-LSTM model</article-title>
          <source>Expert Syst Appl</source>
          <year>2022</year>
          <volume>205</volume>
          <fpage>117661</fpage>
          <pub-id pub-id-type="doi">10.1016/j.eswa.2022.117661</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref29">
        <label>29</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Ellul</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Azzopardi</surname>
              <given-names>G</given-names>
            </name>
          </person-group>
          <article-title>Elderly fall detection systems: a literature survey</article-title>
          <source>Front Robot AI</source>
          <year>2020</year>
          <volume>7</volume>
          <fpage>71</fpage>
          <pub-id pub-id-type="doi">10.3389/frobt.2020.00071</pub-id>
          <pub-id pub-id-type="medline">33501238</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref30">
        <label>30</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Alarifi</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Alwadain</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Killer heuristic optimized convolution neural network-based fall detection with wearable IoT sensor devices</article-title>
          <source>Measurement</source>
          <year>2021</year>
          <volume>167</volume>
          <fpage>108258</fpage>
          <pub-id pub-id-type="doi">10.1016/j.measurement.2020.108258</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref31">
        <label>31</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kim</surname>
              <given-names>TH</given-names>
            </name>
            <name name-style="western">
              <surname>Choi</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Heo</surname>
              <given-names>HM</given-names>
            </name>
            <name name-style="western">
              <surname>Kim</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Mun</surname>
              <given-names>JH</given-names>
            </name>
          </person-group>
          <article-title>Acceleration magnitude at impact following loss of balance can be estimated using deep learning model</article-title>
          <source>Sensors</source>
          <year>2020</year>
          <volume>20</volume>
          <issue>21</issue>
          <fpage>6126</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.mdpi.com/resolver?pii=s20216126"/>
          </comment>
          <pub-id pub-id-type="doi">10.3390/s20216126</pub-id>
          <pub-id pub-id-type="medline">33126491</pub-id>
          <pub-id pub-id-type="pii">s20216126</pub-id>
          <pub-id pub-id-type="pmcid">PMC7663134</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref32">
        <label>32</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Hussain</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Hussain</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Ehatisham-ul-Haq</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Azam</surname>
              <given-names>MA</given-names>
            </name>
          </person-group>
          <article-title>Activity-aware fall detection and recognition based on wearable sensors</article-title>
          <source>IEEE Sensors J</source>
          <year>2019</year>
          <volume>19</volume>
          <issue>12</issue>
          <fpage>4528</fpage>
          <lpage>4536</lpage>
          <pub-id pub-id-type="doi">10.1109/jsen.2019.2898891</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref33">
        <label>33</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Son</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Lim</surname>
              <given-names>JW</given-names>
            </name>
            <name name-style="western">
              <surname>Park</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Park</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Han</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Kim</surname>
              <given-names>HB</given-names>
            </name>
          </person-group>
          <article-title>A machine learning approach for the classification of falls and activities of daily living in agricultural workers</article-title>
          <source>IEEE Access</source>
          <year>2022</year>
          <volume>10</volume>
          <fpage>77418</fpage>
          <lpage>77431</lpage>
          <pub-id pub-id-type="doi">10.1109/ACCESS.2022.3190618</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref34">
        <label>34</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Hou</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>He</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Lungu</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Dong</surname>
              <given-names>R</given-names>
            </name>
          </person-group>
          <article-title>An energy-efficient fall detection method based on FD-DNN for elderly people</article-title>
          <source>Sensors</source>
          <year>2020</year>
          <volume>20</volume>
          <issue>15</issue>
          <fpage>4192</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.mdpi.com/resolver?pii=s20154192"/>
          </comment>
          <pub-id pub-id-type="doi">10.3390/s20154192</pub-id>
          <pub-id pub-id-type="medline">32731465</pub-id>
          <pub-id pub-id-type="pii">s20154192</pub-id>
          <pub-id pub-id-type="pmcid">PMC7435651</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref35">
        <label>35</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Koo</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Park</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Jeong</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Kim</surname>
              <given-names>C</given-names>
            </name>
          </person-group>
          <article-title>Contrastive accelerometer-gyroscope embedding model for human activity recognition</article-title>
          <source>IEEE Sensors J</source>
          <year>2023</year>
          <volume>23</volume>
          <issue>1</issue>
          <fpage>506</fpage>
          <lpage>513</lpage>
          <pub-id pub-id-type="doi">10.1109/jsen.2022.3222825</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref36">
        <label>36</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Vaswani</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Shazeer</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Parmar</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Uszkoreit</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Jones</surname>
              <given-names>LA</given-names>
            </name>
            <name name-style="western">
              <surname>Gomez</surname>
              <given-names>AN</given-names>
            </name>
            <name name-style="western">
              <surname>Kaiser</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Polosukhin</surname>
              <given-names>I</given-names>
            </name>
          </person-group>
          <article-title>Attention is all you need</article-title>
          <year>2017</year>
          <month>12</month>
          <conf-name>Proceedings of the 31st International Conference on Neural Information Processing Systems</conf-name>
          <conf-date>December 4-9, 2017</conf-date>
          <conf-loc>Long Beach, CA, USA</conf-loc>
          <fpage>5998</fpage>
          <lpage>6008</lpage>
        </nlm-citation>
      </ref>
      <ref id="ref37">
        <label>37</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Fan</surname>
              <given-names>Xi</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Ma</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Gao</surname>
              <given-names>W</given-names>
            </name>
          </person-group>
          <article-title>BAFNet: bottleneck attention based fusion network for sleep apnea detection</article-title>
          <source>IEEE J Biomed Health Inform</source>
          <year>2024</year>
          <volume>28</volume>
          <issue>5</issue>
          <fpage>2473</fpage>
          <lpage>2484</lpage>
          <pub-id pub-id-type="doi">10.1109/JBHI.2023.3278657</pub-id>
          <pub-id pub-id-type="medline">37216250</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref38">
        <label>38</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Khan</surname>
              <given-names>IU</given-names>
            </name>
            <name name-style="western">
              <surname>Lee</surname>
              <given-names>JW</given-names>
            </name>
          </person-group>
          <article-title>PAR-Net: an enhanced dual-stream CNN-ESN architecture for human physical activity recognition</article-title>
          <source>Sensors</source>
          <year>2024</year>
          <volume>24</volume>
          <issue>6</issue>
          <fpage>1908</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.mdpi.com/resolver?pii=s24061908"/>
          </comment>
          <pub-id pub-id-type="doi">10.3390/s24061908</pub-id>
          <pub-id pub-id-type="medline">38544172</pub-id>
          <pub-id pub-id-type="pii">s24061908</pub-id>
          <pub-id pub-id-type="pmcid">PMC10974682</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref39">
        <label>39</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Sucerquia</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>López</surname>
              <given-names>JD</given-names>
            </name>
            <name name-style="western">
              <surname>Vargas-Bonilla</surname>
              <given-names>JF</given-names>
            </name>
          </person-group>
          <article-title>SisFall: a fall and movement dataset</article-title>
          <source>Sensors</source>
          <year>2017</year>
          <volume>17</volume>
          <issue>1</issue>
          <fpage>198</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.mdpi.com/resolver?pii=s17010198"/>
          </comment>
          <pub-id pub-id-type="doi">10.3390/s17010198</pub-id>
          <pub-id pub-id-type="medline">28117691</pub-id>
          <pub-id pub-id-type="pii">s17010198</pub-id>
          <pub-id pub-id-type="pmcid">PMC5298771</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref40">
        <label>40</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Vavoulas</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Pediaditis</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Spanakis</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Tsiknakis</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>The MobiFall dataset: an initial evaluation of fall detection algorithms using smartphones</article-title>
          <year>2013</year>
          <month>11</month>
          <conf-name>IEEE 13th International Conference on Bioinformatics and Bioengineering (BIBE)</conf-name>
          <conf-date>November 10-13, 2013</conf-date>
          <conf-loc>Chania, Greece</conf-loc>
          <publisher-name>GREECE</publisher-name>
          <pub-id pub-id-type="doi">10.1109/bibe.2013.6701629</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref41">
        <label>41</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Cho</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Cho</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Kyung</surname>
              <given-names>CM</given-names>
            </name>
          </person-group>
          <article-title>Design and implementation of practical step detection algorithm for wrist-worn devices</article-title>
          <source>IEEE Sensors J</source>
          <year>2016</year>
          <volume>16</volume>
          <issue>21</issue>
          <fpage>7720</fpage>
          <lpage>7730</lpage>
          <pub-id pub-id-type="doi">10.1109/jsen.2016.2603163</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref42">
        <label>42</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ganesha</surname>
              <given-names>HS</given-names>
            </name>
            <name name-style="western">
              <surname>Gupta</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Gupta</surname>
              <given-names>SH</given-names>
            </name>
            <name name-style="western">
              <surname>Rajan</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Few-shot transfer learning for wearable IMU-based human activity recognition</article-title>
          <source>Neural Comput Appl</source>
          <year>2024</year>
          <volume>36</volume>
          <issue>18</issue>
          <fpage>10811</fpage>
          <lpage>10823</lpage>
          <pub-id pub-id-type="doi">10.1007/s00521-024-09645-7</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref43">
        <label>43</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Hellmers</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Krey</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Gashi</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Koschate</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Schmidt</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Stuckenschneider</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Hein</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Zieschang</surname>
              <given-names>T</given-names>
            </name>
          </person-group>
          <article-title>Comparison of machine learning approaches for near-fall-detection with motion sensors</article-title>
          <source>Front Digit Health</source>
          <year>2023</year>
          <volume>5</volume>
          <fpage>1223845</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/37564882"/>
          </comment>
          <pub-id pub-id-type="doi">10.3389/fdgth.2023.1223845</pub-id>
          <pub-id pub-id-type="medline">37564882</pub-id>
          <pub-id pub-id-type="pmcid">PMC10410450</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref44">
        <label>44</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>Q</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>Z</given-names>
            </name>
          </person-group>
          <article-title>CMFALL: a cascade and parallel multi-state fall detection algorithm using waist-mounted tri-axial accelerometer signals</article-title>
          <source>IEEE Trans Consumer Electron</source>
          <year>2020</year>
          <volume>66</volume>
          <issue>3</issue>
          <fpage>261</fpage>
          <lpage>270</lpage>
          <pub-id pub-id-type="doi">10.1109/tce.2020.3000338</pub-id>
        </nlm-citation>
      </ref>
    </ref-list>
  </back>
</article>
