<?xml version="1.0" encoding="UTF-8"?>
<rdf:RDF xmlns="http://purl.org/rss/1.0/"
 xmlns:dc="http://purl.org/dc/elements/1.1/"
 xmlns:dcterms="http://purl.org/dc/terms/"
 xmlns:cc="http://web.resource.org/cc/"
 xmlns:prism="http://prismstandard.org/namespaces/basic/2.0/"
 xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
 xmlns:admin="http://webns.net/mvcb/"
 xmlns:content="http://purl.org/rss/1.0/modules/content/">
    <channel rdf:about="https://www.mdpi.com/rss/journal/signals">
		<title>Signals</title>
		<description>Latest open access articles published in Signals at https://www.mdpi.com/journal/signals</description>
		<link>https://www.mdpi.com/journal/signals</link>
		<admin:generatorAgent rdf:resource="https://www.mdpi.com/journal/signals"/>
		<admin:errorReportsTo rdf:resource="mailto:support@mdpi.com"/>
		<dc:publisher>MDPI</dc:publisher>
		<dc:language>en</dc:language>
		<dc:rights>Creative Commons Attribution (CC-BY)</dc:rights>
						<prism:copyright>MDPI</prism:copyright>
		<prism:rightsAgent>support@mdpi.com</prism:rightsAgent>
		<image rdf:resource="https://pub.mdpi-res.com/img/design/mdpi-pub-logo.png?13cf3b5bd783e021?1778841406"/>
				<items>
			<rdf:Seq>
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-6120/7/3/46" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-6120/7/3/45" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-6120/7/3/44" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-6120/7/3/43" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-6120/7/3/42" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-6120/7/3/41" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-6120/7/3/40" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-6120/7/3/39" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-6120/7/2/38" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-6120/7/2/37" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-6120/7/2/36" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-6120/7/2/35" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-6120/7/2/34" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-6120/7/2/33" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-6120/7/2/32" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-6120/7/2/31" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-6120/7/2/30" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-6120/7/2/29" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-6120/7/2/28" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-6120/7/2/27" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-6120/7/2/26" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-6120/7/2/25" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-6120/7/2/24" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-6120/7/2/23" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-6120/7/2/21" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-6120/7/2/22" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-6120/7/2/20" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-6120/7/2/19" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-6120/7/1/18" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-6120/7/1/17" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-6120/7/1/16" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-6120/7/1/15" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-6120/7/1/14" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-6120/7/1/13" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-6120/7/1/12" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-6120/7/1/11" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-6120/7/1/10" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-6120/7/1/9" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-6120/7/1/8" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-6120/7/1/7" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-6120/7/1/6" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-6120/7/1/5" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-6120/7/1/4" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-6120/7/1/3" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-6120/7/1/2" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-6120/7/1/1" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-6120/6/4/74" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-6120/6/4/73" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-6120/6/4/72" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-6120/6/4/71" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-6120/6/4/70" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-6120/6/4/69" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-6120/6/4/68" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-6120/6/4/67" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-6120/6/4/66" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-6120/6/4/65" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-6120/6/4/64" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-6120/6/4/63" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-6120/6/4/62" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-6120/6/4/61" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-6120/6/4/60" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-6120/6/4/59" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-6120/6/4/58" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-6120/6/4/57" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-6120/6/4/56" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-6120/6/4/55" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-6120/6/4/54" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-6120/6/4/53" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-6120/6/4/52" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-6120/6/4/51" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-6120/6/4/50" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-6120/6/3/49" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-6120/6/3/48" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-6120/6/3/47" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-6120/6/3/46" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-6120/6/3/45" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-6120/6/3/44" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-6120/6/3/43" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-6120/6/3/42" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-6120/6/3/41" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-6120/6/3/40" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-6120/6/3/39" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-6120/6/3/38" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-6120/6/3/37" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-6120/6/3/36" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-6120/6/3/35" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-6120/6/3/34" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-6120/6/3/33" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-6120/6/3/32" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-6120/6/3/31" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-6120/6/3/30" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-6120/6/2/29" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-6120/6/2/28" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-6120/6/2/27" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-6120/6/2/26" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-6120/6/2/25" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-6120/6/2/24" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-6120/6/2/23" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-6120/6/2/22" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-6120/6/2/21" />
                    	</rdf:Seq>
		</items>
				<cc:license rdf:resource="https://creativecommons.org/licenses/by/4.0/" />
	</channel>

        <item rdf:about="https://www.mdpi.com/2624-6120/7/3/46">

	<title>Signals, Vol. 7, Pages 46: Exploratory Analysis of Electroencephalography Characteristics Shared by Major Depressive Disorder and Parkinson&amp;rsquo;s Disease: A Database Study</title>
	<link>https://www.mdpi.com/2624-6120/7/3/46</link>
	<description>Despite being distinct clinical entities, major depressive disorder (MDD) and Parkinson&amp;amp;rsquo;s disease (PD) have some shared physiological pathways, including mitochondrial dysfunction and inflammation. Our interest was whether these common physiological mechanisms are reflected in brain activity variations as well. Therefore, this study aimed to identify common characteristics in resting-state electroencephalography (EEG) between the conditions by comparing features among patients with MDD, PD, and healthy controls. The methodology comprised two stages: analyzing differences between patients and healthy individuals and exploring consistent trends between PD and MDD, based on EEG data from PRED + CT database. Age-corrected regression analysis of five EEG features revealed PD and MDD had the following overlapping features: shared abnormalities in theta, alpha and beta relative power, as well as sample entropy in the delta (centroparietal, temporal, and parietal areas), theta (parieto-occipital), and gamma (central) bands. Furthermore, interhemispheric asymmetry was evident across all bands, especially in the frontal and centroparietal regions. When combining these findings with their directional trends (positive or negative), common EEG features included increased theta and decreased alpha-beta power, along with increased parieto-occipital and reduced gamma entropy at FCz. These findings suggest shared EEG markers between PD and MDD, supporting the potential for efficient neurological disorder diagnosis.</description>
	<pubDate>2026-05-08</pubDate>

	<content:encoded><![CDATA[
	<p><b>Signals, Vol. 7, Pages 46: Exploratory Analysis of Electroencephalography Characteristics Shared by Major Depressive Disorder and Parkinson&amp;rsquo;s Disease: A Database Study</b></p>
	<p>Signals <a href="https://www.mdpi.com/2624-6120/7/3/46">doi: 10.3390/signals7030046</a></p>
	<p>Authors:
		Chia-Yen Yang
		Fan-Ning Kuo
		Hsin-Yung Chen
		</p>
	<p>Despite being distinct clinical entities, major depressive disorder (MDD) and Parkinson&amp;amp;rsquo;s disease (PD) have some shared physiological pathways, including mitochondrial dysfunction and inflammation. Our interest was whether these common physiological mechanisms are reflected in brain activity variations as well. Therefore, this study aimed to identify common characteristics in resting-state electroencephalography (EEG) between the conditions by comparing features among patients with MDD, PD, and healthy controls. The methodology comprised two stages: analyzing differences between patients and healthy individuals and exploring consistent trends between PD and MDD, based on EEG data from PRED + CT database. Age-corrected regression analysis of five EEG features revealed PD and MDD had the following overlapping features: shared abnormalities in theta, alpha and beta relative power, as well as sample entropy in the delta (centroparietal, temporal, and parietal areas), theta (parieto-occipital), and gamma (central) bands. Furthermore, interhemispheric asymmetry was evident across all bands, especially in the frontal and centroparietal regions. When combining these findings with their directional trends (positive or negative), common EEG features included increased theta and decreased alpha-beta power, along with increased parieto-occipital and reduced gamma entropy at FCz. These findings suggest shared EEG markers between PD and MDD, supporting the potential for efficient neurological disorder diagnosis.</p>
	]]></content:encoded>

	<dc:title>Exploratory Analysis of Electroencephalography Characteristics Shared by Major Depressive Disorder and Parkinson&amp;amp;rsquo;s Disease: A Database Study</dc:title>
			<dc:creator>Chia-Yen Yang</dc:creator>
			<dc:creator>Fan-Ning Kuo</dc:creator>
			<dc:creator>Hsin-Yung Chen</dc:creator>
		<dc:identifier>doi: 10.3390/signals7030046</dc:identifier>
	<dc:source>Signals</dc:source>
	<dc:date>2026-05-08</dc:date>

	<prism:publicationName>Signals</prism:publicationName>
	<prism:publicationDate>2026-05-08</prism:publicationDate>
	<prism:volume>7</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>46</prism:startingPage>
		<prism:doi>10.3390/signals7030046</prism:doi>
	<prism:url>https://www.mdpi.com/2624-6120/7/3/46</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-6120/7/3/45">

	<title>Signals, Vol. 7, Pages 45: High-Frequency Infrared Thermography Reveals Short-Term Pressure Variations in CO2 Natural Vents at Mefite d&amp;rsquo;Ansanto (Italy)</title>
	<link>https://www.mdpi.com/2624-6120/7/3/45</link>
	<description>A thermal infrared (TIR) camera was installed at Mefite Lake in Valle d&amp;amp;rsquo;Ansanto, Irpinia (Italy), to assess whether small variations in cold CO2 flux can be resolved thermally. To our knowledge, this is the first systematic attempt to extract short-period degassing dynamics from TIR data at Mefite. Infrared thermal images taken over a three-hour nighttime interval revealed the spatial distribution and extent of natural CO2 emissions. The high sampling frequency of one minute detected unexpected thermal variability from the source. The extent of temperature variations across the entire site reached almost 3 &amp;amp;deg;C, with durations typically ranging from a few minutes to tens of minutes. Spectral analysis of the temperature time series reported a 1/f-type noise pattern, with significant periods of 2&amp;amp;ndash;3 min, 5 min, 26 min, and 61 min observed at different locations. Further intermediate periods were observed at individual points. Differences and delays in temperature variations appeared to be related to distance from the structure&amp;amp;rsquo;s centre and the presence of water. These temperature fluctuations were interpreted as changes in the gaseous emission flow caused by a few kPa of CO2 escaping due to pressure variations. The gas thermally interacts with the underlying soil, adding or removing heat at the surface. These results demonstrate that high-frequency infrared thermography provides a sensitive and practical tool for quantifying short-term flux variability at natural CO2 vents and for improving the characterisation of their degassing dynamics.</description>
	<pubDate>2026-05-08</pubDate>

	<content:encoded><![CDATA[
	<p><b>Signals, Vol. 7, Pages 45: High-Frequency Infrared Thermography Reveals Short-Term Pressure Variations in CO2 Natural Vents at Mefite d&amp;rsquo;Ansanto (Italy)</b></p>
	<p>Signals <a href="https://www.mdpi.com/2624-6120/7/3/45">doi: 10.3390/signals7030045</a></p>
	<p>Authors:
		Cristiano Fidani
		Alessandro Piscini
		Massimo Calcara
		Gianfranco Cianchini
		Maurizio Soldani
		Angelo De Santis
		Dario Sabbagh
		Martina Orlando
		Loredana Perrone
		</p>
	<p>A thermal infrared (TIR) camera was installed at Mefite Lake in Valle d&amp;amp;rsquo;Ansanto, Irpinia (Italy), to assess whether small variations in cold CO2 flux can be resolved thermally. To our knowledge, this is the first systematic attempt to extract short-period degassing dynamics from TIR data at Mefite. Infrared thermal images taken over a three-hour nighttime interval revealed the spatial distribution and extent of natural CO2 emissions. The high sampling frequency of one minute detected unexpected thermal variability from the source. The extent of temperature variations across the entire site reached almost 3 &amp;amp;deg;C, with durations typically ranging from a few minutes to tens of minutes. Spectral analysis of the temperature time series reported a 1/f-type noise pattern, with significant periods of 2&amp;amp;ndash;3 min, 5 min, 26 min, and 61 min observed at different locations. Further intermediate periods were observed at individual points. Differences and delays in temperature variations appeared to be related to distance from the structure&amp;amp;rsquo;s centre and the presence of water. These temperature fluctuations were interpreted as changes in the gaseous emission flow caused by a few kPa of CO2 escaping due to pressure variations. The gas thermally interacts with the underlying soil, adding or removing heat at the surface. These results demonstrate that high-frequency infrared thermography provides a sensitive and practical tool for quantifying short-term flux variability at natural CO2 vents and for improving the characterisation of their degassing dynamics.</p>
	]]></content:encoded>

	<dc:title>High-Frequency Infrared Thermography Reveals Short-Term Pressure Variations in CO2 Natural Vents at Mefite d&amp;amp;rsquo;Ansanto (Italy)</dc:title>
			<dc:creator>Cristiano Fidani</dc:creator>
			<dc:creator>Alessandro Piscini</dc:creator>
			<dc:creator>Massimo Calcara</dc:creator>
			<dc:creator>Gianfranco Cianchini</dc:creator>
			<dc:creator>Maurizio Soldani</dc:creator>
			<dc:creator>Angelo De Santis</dc:creator>
			<dc:creator>Dario Sabbagh</dc:creator>
			<dc:creator>Martina Orlando</dc:creator>
			<dc:creator>Loredana Perrone</dc:creator>
		<dc:identifier>doi: 10.3390/signals7030045</dc:identifier>
	<dc:source>Signals</dc:source>
	<dc:date>2026-05-08</dc:date>

	<prism:publicationName>Signals</prism:publicationName>
	<prism:publicationDate>2026-05-08</prism:publicationDate>
	<prism:volume>7</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>45</prism:startingPage>
		<prism:doi>10.3390/signals7030045</prism:doi>
	<prism:url>https://www.mdpi.com/2624-6120/7/3/45</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-6120/7/3/44">

	<title>Signals, Vol. 7, Pages 44: Investigating Sibilant Fricative Representation in Bangla Telemedicine Speech: A Cost-Aware Sampling Rate Optimization Study</title>
	<link>https://www.mdpi.com/2624-6120/7/3/44</link>
	<description>Automatic speech recognition has advanced rapidly for high-resource languages, yet performance remains limited for low-resource languages such as Bangla, particularly in telehealth settings. Most systems rely on a standardized 16 kHz sampling rate, a design choice despite evidence that Bangla contains sibilant fricatives and other phonetic cues with substantial high-frequency energy that may be suppressed under bandwidth and latency constraints. This study evaluates audio sampling rate as a controllable signal-level parameter for Bangla telehealth ASR to identify an empirically grounded operating range balancing transcription accuracy, execution time, and network bandwidth. Twenty real-world Bangla doctor&amp;amp;ndash;patient consultations were deterministically resampled to 55 configurations between 8 kHz and 32 kHz and transcribed using a fixed cloud-based ASR system. Session-level Word Error Rate, execution latency, payload bandwidth, and high-frequency phonetic content were analyzed using a composite sibilant-likelihood score. WER decreased from 0.338 at 8 kHz to a local minimum of 0.232 at 18.75 kHz, with gains plateauing beyond this range despite substantial bandwidth increases. Elbow-point, Pareto frontier, weighted scoring, and Minimum Acceptable Trade-off analyses converged on an optimal region between 17.25 and 18.75 kHz, demonstrating that sampling rate optimization improves ASR accuracy without proportional resource costs in telehealth settings.</description>
	<pubDate>2026-05-07</pubDate>

	<content:encoded><![CDATA[
	<p><b>Signals, Vol. 7, Pages 44: Investigating Sibilant Fricative Representation in Bangla Telemedicine Speech: A Cost-Aware Sampling Rate Optimization Study</b></p>
	<p>Signals <a href="https://www.mdpi.com/2624-6120/7/3/44">doi: 10.3390/signals7030044</a></p>
	<p>Authors:
		Prajat Paul
		Mohamed Mehfoud Bouh
		Manan Vinod Shah
		Forhad Hossain
		Ashir Ahmed
		</p>
	<p>Automatic speech recognition has advanced rapidly for high-resource languages, yet performance remains limited for low-resource languages such as Bangla, particularly in telehealth settings. Most systems rely on a standardized 16 kHz sampling rate, a design choice despite evidence that Bangla contains sibilant fricatives and other phonetic cues with substantial high-frequency energy that may be suppressed under bandwidth and latency constraints. This study evaluates audio sampling rate as a controllable signal-level parameter for Bangla telehealth ASR to identify an empirically grounded operating range balancing transcription accuracy, execution time, and network bandwidth. Twenty real-world Bangla doctor&amp;amp;ndash;patient consultations were deterministically resampled to 55 configurations between 8 kHz and 32 kHz and transcribed using a fixed cloud-based ASR system. Session-level Word Error Rate, execution latency, payload bandwidth, and high-frequency phonetic content were analyzed using a composite sibilant-likelihood score. WER decreased from 0.338 at 8 kHz to a local minimum of 0.232 at 18.75 kHz, with gains plateauing beyond this range despite substantial bandwidth increases. Elbow-point, Pareto frontier, weighted scoring, and Minimum Acceptable Trade-off analyses converged on an optimal region between 17.25 and 18.75 kHz, demonstrating that sampling rate optimization improves ASR accuracy without proportional resource costs in telehealth settings.</p>
	]]></content:encoded>

	<dc:title>Investigating Sibilant Fricative Representation in Bangla Telemedicine Speech: A Cost-Aware Sampling Rate Optimization Study</dc:title>
			<dc:creator>Prajat Paul</dc:creator>
			<dc:creator>Mohamed Mehfoud Bouh</dc:creator>
			<dc:creator>Manan Vinod Shah</dc:creator>
			<dc:creator>Forhad Hossain</dc:creator>
			<dc:creator>Ashir Ahmed</dc:creator>
		<dc:identifier>doi: 10.3390/signals7030044</dc:identifier>
	<dc:source>Signals</dc:source>
	<dc:date>2026-05-07</dc:date>

	<prism:publicationName>Signals</prism:publicationName>
	<prism:publicationDate>2026-05-07</prism:publicationDate>
	<prism:volume>7</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>44</prism:startingPage>
		<prism:doi>10.3390/signals7030044</prism:doi>
	<prism:url>https://www.mdpi.com/2624-6120/7/3/44</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-6120/7/3/43">

	<title>Signals, Vol. 7, Pages 43: Dual-Mode Control in a Single-Cavity SIW Bandpass Filter for High-Q 5.8 GHz WiMAX Using Combined Magnetic&amp;ndash;Electric Perturbation</title>
	<link>https://www.mdpi.com/2624-6120/7/3/43</link>
	<description>This paper presents a compact, single-layer substrate-integrated waveguide (SIW) bandpass filter for 5.8 GHz WiMAX applications. The filter achieves an improved performance trade-off through a novel hybrid design strategy that combines central vertical perturbation vias with symmetrically etched complementary split-ring resonators (CSRRs). This configuration implements a hybrid magnetic&amp;amp;ndash;electric perturbation within a single cavity, enabling simultaneous control of electric and magnetic field confinement. The proposed topology achieves an optimized balance among unloaded quality factor Qu, insertion loss, selectivity, and structural simplicity. Through targeted intra-cavity field manipulation, the filter attains a Qu of 239.7, a narrow fractional bandwidth of 3.08% (5.75&amp;amp;ndash;5.93 GHz), and a low insertion loss of 1.12 dB. It also delivers enhanced selectivity compared to conventional single-cavity designs and performs competitively with multi-resonator architectures. An equivalent circuit model accurately captures the via&amp;amp;ndash;CSRR interaction and agrees closely with full-wave electromagnetic simulations. Experimental results confirm excellent return loss and robust performance across the entire WiMAX band (5.725&amp;amp;ndash;5.850 GHz). Thus, the proposed filter offers a practical, high-performance, and manufacturable solution for selective RF front-end applications.</description>
	<pubDate>2026-05-07</pubDate>

	<content:encoded><![CDATA[
	<p><b>Signals, Vol. 7, Pages 43: Dual-Mode Control in a Single-Cavity SIW Bandpass Filter for High-Q 5.8 GHz WiMAX Using Combined Magnetic&amp;ndash;Electric Perturbation</b></p>
	<p>Signals <a href="https://www.mdpi.com/2624-6120/7/3/43">doi: 10.3390/signals7030043</a></p>
	<p>Authors:
		Sirine Aouine Chaieb
		Mahdi Abdelkarim
		Majdi Bahrouni
		Ali Gharsallah
		</p>
	<p>This paper presents a compact, single-layer substrate-integrated waveguide (SIW) bandpass filter for 5.8 GHz WiMAX applications. The filter achieves an improved performance trade-off through a novel hybrid design strategy that combines central vertical perturbation vias with symmetrically etched complementary split-ring resonators (CSRRs). This configuration implements a hybrid magnetic&amp;amp;ndash;electric perturbation within a single cavity, enabling simultaneous control of electric and magnetic field confinement. The proposed topology achieves an optimized balance among unloaded quality factor Qu, insertion loss, selectivity, and structural simplicity. Through targeted intra-cavity field manipulation, the filter attains a Qu of 239.7, a narrow fractional bandwidth of 3.08% (5.75&amp;amp;ndash;5.93 GHz), and a low insertion loss of 1.12 dB. It also delivers enhanced selectivity compared to conventional single-cavity designs and performs competitively with multi-resonator architectures. An equivalent circuit model accurately captures the via&amp;amp;ndash;CSRR interaction and agrees closely with full-wave electromagnetic simulations. Experimental results confirm excellent return loss and robust performance across the entire WiMAX band (5.725&amp;amp;ndash;5.850 GHz). Thus, the proposed filter offers a practical, high-performance, and manufacturable solution for selective RF front-end applications.</p>
	]]></content:encoded>

	<dc:title>Dual-Mode Control in a Single-Cavity SIW Bandpass Filter for High-Q 5.8 GHz WiMAX Using Combined Magnetic&amp;amp;ndash;Electric Perturbation</dc:title>
			<dc:creator>Sirine Aouine Chaieb</dc:creator>
			<dc:creator>Mahdi Abdelkarim</dc:creator>
			<dc:creator>Majdi Bahrouni</dc:creator>
			<dc:creator>Ali Gharsallah</dc:creator>
		<dc:identifier>doi: 10.3390/signals7030043</dc:identifier>
	<dc:source>Signals</dc:source>
	<dc:date>2026-05-07</dc:date>

	<prism:publicationName>Signals</prism:publicationName>
	<prism:publicationDate>2026-05-07</prism:publicationDate>
	<prism:volume>7</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>43</prism:startingPage>
		<prism:doi>10.3390/signals7030043</prism:doi>
	<prism:url>https://www.mdpi.com/2624-6120/7/3/43</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-6120/7/3/42">

	<title>Signals, Vol. 7, Pages 42: Frame-Level Audio Forgery Localization Using Handcrafted and Neural Features</title>
	<link>https://www.mdpi.com/2624-6120/7/3/42</link>
	<description>Audio forgery has emerged as a significant security and forensic challenge, driven by rapid advances in generative artificial intelligence and the widespread availability of audio editing tools, which enable the creation of highly realistic manipulated speech with minimal technical expertise. Existing approaches predominantly operate at the file level, providing only coarse binary decisions without identifying when or where manipulation occurs. This study addresses fine-grained temporal localization through a unified frame-level localization framework. We introduce a controlled forgery generation framework derived from the TIMIT speech corpus, applying atomic, localized manipulations under strict temporal constraints and producing precise frame-level annotations across diverse manipulation types. Building on this dataset, we then propose a transform-agnostic localization-driven detection approach using temporal inconsistency modeling, enabling unified analysis across heterogeneous manipulations at frame-level resolution. To analyze forensic evidence, we present an evidence-stratified modeling paradigm comparing three complementary strategies: a handcrafted anomaly-based method, a deep localization model leveraging pretrained wav2vec 2.0 representations, and a hybrid approach combining both through confidence-aware fusion and temporal consistency reinforcement. A systematic experimental analysis evaluates the effects of representation adaptation, hybrid fusion, and manipulation type on detection and localization performance. Results show that handcrafted features are insufficient for reliable frame-level localization, while task-adapted wav2vec 2.0 achieves strong and consistent performance. The hybrid approach does not consistently improve frame-level accuracy but yields substantial gains in segment-level localization by enforcing temporal coherence. Per-transform analysis confirms robust performance across most manipulations, with deletion-based operations remaining the most challenging.</description>
	<pubDate>2026-05-07</pubDate>

	<content:encoded><![CDATA[
	<p><b>Signals, Vol. 7, Pages 42: Frame-Level Audio Forgery Localization Using Handcrafted and Neural Features</b></p>
	<p>Signals <a href="https://www.mdpi.com/2624-6120/7/3/42">doi: 10.3390/signals7030042</a></p>
	<p>Authors:
		Mostafa Moallim
		Taqwa A. Alhaj
		Fatin A. Elhaj
		Inshirah Idris
		Tasneem Darwish
		</p>
	<p>Audio forgery has emerged as a significant security and forensic challenge, driven by rapid advances in generative artificial intelligence and the widespread availability of audio editing tools, which enable the creation of highly realistic manipulated speech with minimal technical expertise. Existing approaches predominantly operate at the file level, providing only coarse binary decisions without identifying when or where manipulation occurs. This study addresses fine-grained temporal localization through a unified frame-level localization framework. We introduce a controlled forgery generation framework derived from the TIMIT speech corpus, applying atomic, localized manipulations under strict temporal constraints and producing precise frame-level annotations across diverse manipulation types. Building on this dataset, we then propose a transform-agnostic localization-driven detection approach using temporal inconsistency modeling, enabling unified analysis across heterogeneous manipulations at frame-level resolution. To analyze forensic evidence, we present an evidence-stratified modeling paradigm comparing three complementary strategies: a handcrafted anomaly-based method, a deep localization model leveraging pretrained wav2vec 2.0 representations, and a hybrid approach combining both through confidence-aware fusion and temporal consistency reinforcement. A systematic experimental analysis evaluates the effects of representation adaptation, hybrid fusion, and manipulation type on detection and localization performance. Results show that handcrafted features are insufficient for reliable frame-level localization, while task-adapted wav2vec 2.0 achieves strong and consistent performance. The hybrid approach does not consistently improve frame-level accuracy but yields substantial gains in segment-level localization by enforcing temporal coherence. Per-transform analysis confirms robust performance across most manipulations, with deletion-based operations remaining the most challenging.</p>
	]]></content:encoded>

	<dc:title>Frame-Level Audio Forgery Localization Using Handcrafted and Neural Features</dc:title>
			<dc:creator>Mostafa Moallim</dc:creator>
			<dc:creator>Taqwa A. Alhaj</dc:creator>
			<dc:creator>Fatin A. Elhaj</dc:creator>
			<dc:creator>Inshirah Idris</dc:creator>
			<dc:creator>Tasneem Darwish</dc:creator>
		<dc:identifier>doi: 10.3390/signals7030042</dc:identifier>
	<dc:source>Signals</dc:source>
	<dc:date>2026-05-07</dc:date>

	<prism:publicationName>Signals</prism:publicationName>
	<prism:publicationDate>2026-05-07</prism:publicationDate>
	<prism:volume>7</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>42</prism:startingPage>
		<prism:doi>10.3390/signals7030042</prism:doi>
	<prism:url>https://www.mdpi.com/2624-6120/7/3/42</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-6120/7/3/41">

	<title>Signals, Vol. 7, Pages 41: Evaluating the Performance of eGeMAPS Features in Detecting Depression Using Resampling Methods</title>
	<link>https://www.mdpi.com/2624-6120/7/3/41</link>
	<description>This paper investigates how well eGeMAPS features can be used to classify depression from a patient&amp;amp;rsquo;s speech audio samples through the use of statistical resampling methods. We use permutation tests to evaluate, with high confidence, whether eGeMAPS features and the speaker&amp;amp;rsquo;s depression status are dependent. We use bootstrap confidence intervals to test, with high confidence, whether eGeMAPS features are able to better discriminate depression in male speakers than in female speakers. Lastly, we compare the detection power of different subsets of the eGeMAPS features. We use an open-source dataset of depressed and non-depressed speakers (E-DAIC), an open-source audio feature extractor (eGeMAPS), and open-source machine learning classifiers (WEKA) to enable replication of results and establish a baseline for future studies.</description>
	<pubDate>2026-05-06</pubDate>

	<content:encoded><![CDATA[
	<p><b>Signals, Vol. 7, Pages 41: Evaluating the Performance of eGeMAPS Features in Detecting Depression Using Resampling Methods</b></p>
	<p>Signals <a href="https://www.mdpi.com/2624-6120/7/3/41">doi: 10.3390/signals7030041</a></p>
	<p>Authors:
		Joshua Turnipseed
		Benedito J. B. Fonseca
		</p>
	<p>This paper investigates how well eGeMAPS features can be used to classify depression from a patient&amp;amp;rsquo;s speech audio samples through the use of statistical resampling methods. We use permutation tests to evaluate, with high confidence, whether eGeMAPS features and the speaker&amp;amp;rsquo;s depression status are dependent. We use bootstrap confidence intervals to test, with high confidence, whether eGeMAPS features are able to better discriminate depression in male speakers than in female speakers. Lastly, we compare the detection power of different subsets of the eGeMAPS features. We use an open-source dataset of depressed and non-depressed speakers (E-DAIC), an open-source audio feature extractor (eGeMAPS), and open-source machine learning classifiers (WEKA) to enable replication of results and establish a baseline for future studies.</p>
	]]></content:encoded>

	<dc:title>Evaluating the Performance of eGeMAPS Features in Detecting Depression Using Resampling Methods</dc:title>
			<dc:creator>Joshua Turnipseed</dc:creator>
			<dc:creator>Benedito J. B. Fonseca</dc:creator>
		<dc:identifier>doi: 10.3390/signals7030041</dc:identifier>
	<dc:source>Signals</dc:source>
	<dc:date>2026-05-06</dc:date>

	<prism:publicationName>Signals</prism:publicationName>
	<prism:publicationDate>2026-05-06</prism:publicationDate>
	<prism:volume>7</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>41</prism:startingPage>
		<prism:doi>10.3390/signals7030041</prism:doi>
	<prism:url>https://www.mdpi.com/2624-6120/7/3/41</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-6120/7/3/40">

	<title>Signals, Vol. 7, Pages 40: A Machine Learning-Augmented Microwave Sensor for Metallic Landmine Detection</title>
	<link>https://www.mdpi.com/2624-6120/7/3/40</link>
	<description>This paper presents a non-imaging landmine detection system that integrates a highly sensitive multiple-input multiple-output (MIMO) microwave sensor with a machine learning (ML) classifier for automated classification. The proposed sensor consists of two circular patch elements fed with two ports designed in a unique configuration, comprising a dual loop with a cross dipole, for enhancing sensitivity to changes in the environmental electrical properties (dielectric constant and electrical conductivity) induced by buried metallic objects. It operates in dual bands of 1.58 GHz and 1.75 GHz, within the operating frequency range of 1.3 to 2 GHz. The system&amp;amp;rsquo;s performance was assessed using full-wave simulations and experimental measurements, involving a sand-filled foam container with a metal surrogate landmine placed at different depths. The sensor&amp;amp;rsquo;s performance was evaluated by monitoring changes in the magnitude and phase of the reflection coefficient (S11) and the transmission coefficient (S21). The acquired scattering parameters data were processed using a Support Vector Machine (SVM) algorithm for automated classification. Results demonstrate the sensor&amp;amp;rsquo;s high capability in detecting metallic targets at various depths and standoff distances. Compared to conventional imaging technologies, this system offers significant advantages in cost, simplicity, and ease of data processing. The SVM models trained on measurement data with proper feature selection showed a high level of agreement with their counterparts trained on simulation data. Stratified k-fold cross-validation was used to improve the reliability of accuracy metrics, with results showing 85% or higher mean accuracy in all classification scenarios.</description>
	<pubDate>2026-05-02</pubDate>

	<content:encoded><![CDATA[
	<p><b>Signals, Vol. 7, Pages 40: A Machine Learning-Augmented Microwave Sensor for Metallic Landmine Detection</b></p>
	<p>Signals <a href="https://www.mdpi.com/2624-6120/7/3/40">doi: 10.3390/signals7030040</a></p>
	<p>Authors:
		Maged A. Aldhaeebi
		Abdulbaset Ali
		Thamer S. Almoneef
		</p>
	<p>This paper presents a non-imaging landmine detection system that integrates a highly sensitive multiple-input multiple-output (MIMO) microwave sensor with a machine learning (ML) classifier for automated classification. The proposed sensor consists of two circular patch elements fed with two ports designed in a unique configuration, comprising a dual loop with a cross dipole, for enhancing sensitivity to changes in the environmental electrical properties (dielectric constant and electrical conductivity) induced by buried metallic objects. It operates in dual bands of 1.58 GHz and 1.75 GHz, within the operating frequency range of 1.3 to 2 GHz. The system&amp;amp;rsquo;s performance was assessed using full-wave simulations and experimental measurements, involving a sand-filled foam container with a metal surrogate landmine placed at different depths. The sensor&amp;amp;rsquo;s performance was evaluated by monitoring changes in the magnitude and phase of the reflection coefficient (S11) and the transmission coefficient (S21). The acquired scattering parameters data were processed using a Support Vector Machine (SVM) algorithm for automated classification. Results demonstrate the sensor&amp;amp;rsquo;s high capability in detecting metallic targets at various depths and standoff distances. Compared to conventional imaging technologies, this system offers significant advantages in cost, simplicity, and ease of data processing. The SVM models trained on measurement data with proper feature selection showed a high level of agreement with their counterparts trained on simulation data. Stratified k-fold cross-validation was used to improve the reliability of accuracy metrics, with results showing 85% or higher mean accuracy in all classification scenarios.</p>
	]]></content:encoded>

	<dc:title>A Machine Learning-Augmented Microwave Sensor for Metallic Landmine Detection</dc:title>
			<dc:creator>Maged A. Aldhaeebi</dc:creator>
			<dc:creator>Abdulbaset Ali</dc:creator>
			<dc:creator>Thamer S. Almoneef</dc:creator>
		<dc:identifier>doi: 10.3390/signals7030040</dc:identifier>
	<dc:source>Signals</dc:source>
	<dc:date>2026-05-02</dc:date>

	<prism:publicationName>Signals</prism:publicationName>
	<prism:publicationDate>2026-05-02</prism:publicationDate>
	<prism:volume>7</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>40</prism:startingPage>
		<prism:doi>10.3390/signals7030040</prism:doi>
	<prism:url>https://www.mdpi.com/2624-6120/7/3/40</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-6120/7/3/39">

	<title>Signals, Vol. 7, Pages 39: Wavelet Basis Selection in Signal Denoising Based on Wavelet-Coefficient Distribution Shape</title>
	<link>https://www.mdpi.com/2624-6120/7/3/39</link>
	<description>Denoising one-dimensional signals by wavelet shrinkage critically depends on the choice of wavelet basis, yet basis selection is often guided by heuristics rather than explicit statistical criteria. This paper investigates the relationship between wavelet-basis properties and the shape of the probability density function (PDF) of the detail coefficients in the coarsest retained detail subband. On this basis, it proposes the shape of this PDF as a criterion for wavelet-basis selection. We hypothesize that, for a fixed decomposition depth, noise model, and shrinkage rule, a basis better matched to the signal&amp;amp;rsquo;s local regularity produces a narrower and more sharply peaked coefficient PDF in this subband than a mismatched basis and can therefore serve as a data-driven indicator for basis selection. To evaluate the consistency of this proposal, we perform controlled hard-thresholding experiments on six canonical test signals, five wavelet bases, and additive white Gaussian noise. Although the test signals differ significantly in local regularity and features, the relationship between basis suitability and PDF shape is confirmed for each of them. Therefore, the results suggest that the proposed PDF-shape criterion is a valuable indicator for wavelet-basis selection.</description>
	<pubDate>2026-05-02</pubDate>

	<content:encoded><![CDATA[
	<p><b>Signals, Vol. 7, Pages 39: Wavelet Basis Selection in Signal Denoising Based on Wavelet-Coefficient Distribution Shape</b></p>
	<p>Signals <a href="https://www.mdpi.com/2624-6120/7/3/39">doi: 10.3390/signals7030039</a></p>
	<p>Authors:
		Mladen Tomic
		Marko Gulic
		</p>
	<p>Denoising one-dimensional signals by wavelet shrinkage critically depends on the choice of wavelet basis, yet basis selection is often guided by heuristics rather than explicit statistical criteria. This paper investigates the relationship between wavelet-basis properties and the shape of the probability density function (PDF) of the detail coefficients in the coarsest retained detail subband. On this basis, it proposes the shape of this PDF as a criterion for wavelet-basis selection. We hypothesize that, for a fixed decomposition depth, noise model, and shrinkage rule, a basis better matched to the signal&amp;amp;rsquo;s local regularity produces a narrower and more sharply peaked coefficient PDF in this subband than a mismatched basis and can therefore serve as a data-driven indicator for basis selection. To evaluate the consistency of this proposal, we perform controlled hard-thresholding experiments on six canonical test signals, five wavelet bases, and additive white Gaussian noise. Although the test signals differ significantly in local regularity and features, the relationship between basis suitability and PDF shape is confirmed for each of them. Therefore, the results suggest that the proposed PDF-shape criterion is a valuable indicator for wavelet-basis selection.</p>
	]]></content:encoded>

	<dc:title>Wavelet Basis Selection in Signal Denoising Based on Wavelet-Coefficient Distribution Shape</dc:title>
			<dc:creator>Mladen Tomic</dc:creator>
			<dc:creator>Marko Gulic</dc:creator>
		<dc:identifier>doi: 10.3390/signals7030039</dc:identifier>
	<dc:source>Signals</dc:source>
	<dc:date>2026-05-02</dc:date>

	<prism:publicationName>Signals</prism:publicationName>
	<prism:publicationDate>2026-05-02</prism:publicationDate>
	<prism:volume>7</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>39</prism:startingPage>
		<prism:doi>10.3390/signals7030039</prism:doi>
	<prism:url>https://www.mdpi.com/2624-6120/7/3/39</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-6120/7/2/38">

	<title>Signals, Vol. 7, Pages 38: Crypto-Agile FPGA Architecture with Single-Cycle Switching for OFDM-Based Vehicular Networks</title>
	<link>https://www.mdpi.com/2624-6120/7/2/38</link>
	<description>This paper presents a hardware-accelerated signal processing architecture for OFDM-based vehicular networks that integrates crypto-agile adaptive encryption on a Xilinx Kintex-7 FPGA. The encryption layer is tightly coupled to the OFDM modulation/demodulation pipeline, enabling secure real-time signal processing for V2X communications without disrupting the baseband chain. A context-aware pre-selection unit dynamically selects among hardware cipher primitives based on latency constraints, security requirements, and channel conditions. The current prototype implements and synthesizes AES-128 as the primary block cipher, while ASCON (NIST lightweight AEAD) and Keccak (SHA-3 foundation) are validated through RTL simulation and architectural integration, demonstrating crypto-agility across block, AEAD, and sponge-based primitives. DES is retained solely as a legacy reference for backward-compatibility evaluation and is not recommended for secure V2X deployment. The design adopts a modular decoupling strategy in which cryptographic engines interface with a unified buffering and interleaving subsystem, enabling hardware-based single-cycle cipher switching without partial reconfiguration. FPGA results demonstrate sub-microsecond cryptographic processing latencies with moderate resource utilization, preserving the timing budget of latency-sensitive vehicular services. AES-128 provides standard-strength encryption, while ASCON and Keccak offer lightweight and sponge-based alternatives suited to constrained IoV platforms. Specifically, the implemented AES-128 core achieves a throughput of 1.02 Gbps with a switching latency of 86 ns, verified across 10 randomized transitions with a 99.99% success rate and zero data corruption. The ASCON and Keccak cores attain throughput-to-area efficiencies of 2.01 and 1.47 Mbps/LUT, respectively, at a unified clock frequency of 50 MHz. All acronyms are defined at first use and a complete list of abbreviations is provided prior to the reference section.</description>
	<pubDate>2026-04-16</pubDate>

	<content:encoded><![CDATA[
	<p><b>Signals, Vol. 7, Pages 38: Crypto-Agile FPGA Architecture with Single-Cycle Switching for OFDM-Based Vehicular Networks</b></p>
	<p>Signals <a href="https://www.mdpi.com/2624-6120/7/2/38">doi: 10.3390/signals7020038</a></p>
	<p>Authors:
		Mahmoud Elomda
		Ahmed A. Ibrahim
		Mahmoud Abdelaziz
		</p>
	<p>This paper presents a hardware-accelerated signal processing architecture for OFDM-based vehicular networks that integrates crypto-agile adaptive encryption on a Xilinx Kintex-7 FPGA. The encryption layer is tightly coupled to the OFDM modulation/demodulation pipeline, enabling secure real-time signal processing for V2X communications without disrupting the baseband chain. A context-aware pre-selection unit dynamically selects among hardware cipher primitives based on latency constraints, security requirements, and channel conditions. The current prototype implements and synthesizes AES-128 as the primary block cipher, while ASCON (NIST lightweight AEAD) and Keccak (SHA-3 foundation) are validated through RTL simulation and architectural integration, demonstrating crypto-agility across block, AEAD, and sponge-based primitives. DES is retained solely as a legacy reference for backward-compatibility evaluation and is not recommended for secure V2X deployment. The design adopts a modular decoupling strategy in which cryptographic engines interface with a unified buffering and interleaving subsystem, enabling hardware-based single-cycle cipher switching without partial reconfiguration. FPGA results demonstrate sub-microsecond cryptographic processing latencies with moderate resource utilization, preserving the timing budget of latency-sensitive vehicular services. AES-128 provides standard-strength encryption, while ASCON and Keccak offer lightweight and sponge-based alternatives suited to constrained IoV platforms. Specifically, the implemented AES-128 core achieves a throughput of 1.02 Gbps with a switching latency of 86 ns, verified across 10 randomized transitions with a 99.99% success rate and zero data corruption. The ASCON and Keccak cores attain throughput-to-area efficiencies of 2.01 and 1.47 Mbps/LUT, respectively, at a unified clock frequency of 50 MHz. All acronyms are defined at first use and a complete list of abbreviations is provided prior to the reference section.</p>
	]]></content:encoded>

	<dc:title>Crypto-Agile FPGA Architecture with Single-Cycle Switching for OFDM-Based Vehicular Networks</dc:title>
			<dc:creator>Mahmoud Elomda</dc:creator>
			<dc:creator>Ahmed A. Ibrahim</dc:creator>
			<dc:creator>Mahmoud Abdelaziz</dc:creator>
		<dc:identifier>doi: 10.3390/signals7020038</dc:identifier>
	<dc:source>Signals</dc:source>
	<dc:date>2026-04-16</dc:date>

	<prism:publicationName>Signals</prism:publicationName>
	<prism:publicationDate>2026-04-16</prism:publicationDate>
	<prism:volume>7</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>38</prism:startingPage>
		<prism:doi>10.3390/signals7020038</prism:doi>
	<prism:url>https://www.mdpi.com/2624-6120/7/2/38</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-6120/7/2/37">

	<title>Signals, Vol. 7, Pages 37: Circular Polarization-Based Quantum Encoding for Image Transmission over Error-Prone Channels</title>
	<link>https://www.mdpi.com/2624-6120/7/2/37</link>
	<description>Quantum image transmission over noisy communication channels remains a challenge due to the fragility of quantum states and their susceptibility to channel impairments. Existing quantum encoding schemes often exhibit limited noise resilience, while advanced approaches introduce computational and implementation complexity. To address these limitations, this paper proposes a circular polarization-based quantum encoding framework for image transmission over error-prone channels. In the proposed approach, source images are compressed and source-encoded using standard image coding formats, including the joint photographic experts group (JPEG) standard and the high-efficiency image file format (HEIF), and converted into classical bitstreams. The resulting bitstreams are protected using channel coding and mapped onto quantum states via circular polarization representations, where left- and right-hand circularly polarized states encode binary information. The encoded quantum states are transmitted over noisy quantum channels to model channel impairments. At the receiver, appropriate quantum decoding and channel decoding operations are applied to recover the classical bitstream, followed by source decoding to reconstruct the image. The performance of the proposed framework is evaluated using image quality metrics, including peak signal-to-noise ratio (PSNR), structural similarity index measure (SSIM), and universal quality index (UQI). Simulation results demonstrate that the proposed circular polarization-based encoding scheme outperforms existing quantum image encoding techniques, achieving channel SNR gains of 4 dB over state-of-the-art Hadamard-based encoding and 3 dB over frequency-domain quantum encoding methods under severe noise conditions. These results indicate that circular polarization-based quantum encoding provides improved noise robustness and reconstruction fidelity for practical quantum image transmission systems.</description>
	<pubDate>2026-04-08</pubDate>

	<content:encoded><![CDATA[
	<p><b>Signals, Vol. 7, Pages 37: Circular Polarization-Based Quantum Encoding for Image Transmission over Error-Prone Channels</b></p>
	<p>Signals <a href="https://www.mdpi.com/2624-6120/7/2/37">doi: 10.3390/signals7020037</a></p>
	<p>Authors:
		Udara Jayasinghe
		Anil Fernando
		</p>
	<p>Quantum image transmission over noisy communication channels remains a challenge due to the fragility of quantum states and their susceptibility to channel impairments. Existing quantum encoding schemes often exhibit limited noise resilience, while advanced approaches introduce computational and implementation complexity. To address these limitations, this paper proposes a circular polarization-based quantum encoding framework for image transmission over error-prone channels. In the proposed approach, source images are compressed and source-encoded using standard image coding formats, including the joint photographic experts group (JPEG) standard and the high-efficiency image file format (HEIF), and converted into classical bitstreams. The resulting bitstreams are protected using channel coding and mapped onto quantum states via circular polarization representations, where left- and right-hand circularly polarized states encode binary information. The encoded quantum states are transmitted over noisy quantum channels to model channel impairments. At the receiver, appropriate quantum decoding and channel decoding operations are applied to recover the classical bitstream, followed by source decoding to reconstruct the image. The performance of the proposed framework is evaluated using image quality metrics, including peak signal-to-noise ratio (PSNR), structural similarity index measure (SSIM), and universal quality index (UQI). Simulation results demonstrate that the proposed circular polarization-based encoding scheme outperforms existing quantum image encoding techniques, achieving channel SNR gains of 4 dB over state-of-the-art Hadamard-based encoding and 3 dB over frequency-domain quantum encoding methods under severe noise conditions. These results indicate that circular polarization-based quantum encoding provides improved noise robustness and reconstruction fidelity for practical quantum image transmission systems.</p>
	]]></content:encoded>

	<dc:title>Circular Polarization-Based Quantum Encoding for Image Transmission over Error-Prone Channels</dc:title>
			<dc:creator>Udara Jayasinghe</dc:creator>
			<dc:creator>Anil Fernando</dc:creator>
		<dc:identifier>doi: 10.3390/signals7020037</dc:identifier>
	<dc:source>Signals</dc:source>
	<dc:date>2026-04-08</dc:date>

	<prism:publicationName>Signals</prism:publicationName>
	<prism:publicationDate>2026-04-08</prism:publicationDate>
	<prism:volume>7</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>37</prism:startingPage>
		<prism:doi>10.3390/signals7020037</prism:doi>
	<prism:url>https://www.mdpi.com/2624-6120/7/2/37</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-6120/7/2/36">

	<title>Signals, Vol. 7, Pages 36: Selection of Number of IMFs and Order of Their AR Models for Feature Extraction in SVM-Based Bearing Diagnosis</title>
	<link>https://www.mdpi.com/2624-6120/7/2/36</link>
	<description>This study investigated the influence of hyperparameter selection within an EEMD&amp;amp;ndash;AR&amp;amp;ndash;SVM framework for bearing fault diagnosis under constant- and variable-speed operating conditions. Two preprocessing configurations, namely, Method 1, in which EEMD was applied after segmentation, and Method 2, in which EEMD preceded segmentation, were evaluated under three rotational regimes&amp;amp;mdash;constant speed, acceleration (Test A), and deceleration (Test B)&amp;amp;mdash;while number of Intrinsic Mode Functions (N), autoregressive model order (L), and segment length were systematically varied towards identifying combinations that maximized classification accuracy. The results showed the methods achieved 100% accuracy under constant-speed operation. However, Method 2 consistently outperformed Method 1 under nonstationary regimes, reaching 94.12% accuracy during acceleration and 95.00% during deceleration. The outer race remained the most challenging fault type, although its separability substantially improved when EEMD was performed prior to segmentation. The findings demonstrated, in a clear and interpretable manner, that the empirical choice of N and L directly affects classifier accuracy in stationary and nonstationary scenarios and the order of preprocessing steps plays a decisive role in diagnostic reliability. Such contributions provide a reproducible methodological basis for advancing vibration-based fault diagnosis and support the development of interpretable, high-performance predictive maintenance strategies for industrial environments.</description>
	<pubDate>2026-04-07</pubDate>

	<content:encoded><![CDATA[
	<p><b>Signals, Vol. 7, Pages 36: Selection of Number of IMFs and Order of Their AR Models for Feature Extraction in SVM-Based Bearing Diagnosis</b></p>
	<p>Signals <a href="https://www.mdpi.com/2624-6120/7/2/36">doi: 10.3390/signals7020036</a></p>
	<p>Authors:
		Domingos Sávio Tavares Mendes Junior
		Rafael Suzuki Bayma
		Alexandre Luiz Amarante Mesquita
		</p>
	<p>This study investigated the influence of hyperparameter selection within an EEMD&amp;amp;ndash;AR&amp;amp;ndash;SVM framework for bearing fault diagnosis under constant- and variable-speed operating conditions. Two preprocessing configurations, namely, Method 1, in which EEMD was applied after segmentation, and Method 2, in which EEMD preceded segmentation, were evaluated under three rotational regimes&amp;amp;mdash;constant speed, acceleration (Test A), and deceleration (Test B)&amp;amp;mdash;while number of Intrinsic Mode Functions (N), autoregressive model order (L), and segment length were systematically varied towards identifying combinations that maximized classification accuracy. The results showed the methods achieved 100% accuracy under constant-speed operation. However, Method 2 consistently outperformed Method 1 under nonstationary regimes, reaching 94.12% accuracy during acceleration and 95.00% during deceleration. The outer race remained the most challenging fault type, although its separability substantially improved when EEMD was performed prior to segmentation. The findings demonstrated, in a clear and interpretable manner, that the empirical choice of N and L directly affects classifier accuracy in stationary and nonstationary scenarios and the order of preprocessing steps plays a decisive role in diagnostic reliability. Such contributions provide a reproducible methodological basis for advancing vibration-based fault diagnosis and support the development of interpretable, high-performance predictive maintenance strategies for industrial environments.</p>
	]]></content:encoded>

	<dc:title>Selection of Number of IMFs and Order of Their AR Models for Feature Extraction in SVM-Based Bearing Diagnosis</dc:title>
			<dc:creator>Domingos Sávio Tavares Mendes Junior</dc:creator>
			<dc:creator>Rafael Suzuki Bayma</dc:creator>
			<dc:creator>Alexandre Luiz Amarante Mesquita</dc:creator>
		<dc:identifier>doi: 10.3390/signals7020036</dc:identifier>
	<dc:source>Signals</dc:source>
	<dc:date>2026-04-07</dc:date>

	<prism:publicationName>Signals</prism:publicationName>
	<prism:publicationDate>2026-04-07</prism:publicationDate>
	<prism:volume>7</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>36</prism:startingPage>
		<prism:doi>10.3390/signals7020036</prism:doi>
	<prism:url>https://www.mdpi.com/2624-6120/7/2/36</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-6120/7/2/35">

	<title>Signals, Vol. 7, Pages 35: An Active Deception Combined Jamming Identification Method Based on Waveform Modulation</title>
	<link>https://www.mdpi.com/2624-6120/7/2/35</link>
	<description>Jamming pattern identification is a crucial prerequisite for countering jamming. Combined jamming exhibits complex structures and diverse forms, making it difficult for traditional identification methods to extract suitable and stable features for effective discrimination. To address this challenge, this paper proposes a combined jamming identification method based on joint modulation of linear frequency modulation, phase coding and phase coding frequency modulation (LFM-PC-PCFM) waveforms. Building upon the time&amp;amp;ndash;frequency entropy features of combined interference, this method enhances the separability of jamming features in the radar-transmitted waveform dimension. The experiment employed the SVM classification algorithm based on particle swarm optimization for validation. Experiments demonstrate that the combined jamming recognition method under LFM-PC-PCFM waveform modulation achieves higher and more stable recognition accuracy than traditional LFM single-waveform modulation under jamming-to-noise ratios ranging from &amp;amp;minus;10 dB to 30 dB.</description>
	<pubDate>2026-04-07</pubDate>

	<content:encoded><![CDATA[
	<p><b>Signals, Vol. 7, Pages 35: An Active Deception Combined Jamming Identification Method Based on Waveform Modulation</b></p>
	<p>Signals <a href="https://www.mdpi.com/2624-6120/7/2/35">doi: 10.3390/signals7020035</a></p>
	<p>Authors:
		Yun Zhou
		Fulai Wang
		Nan Jiang
		Zhanling Wang
		Chen Pang
		Lei Zhang
		Yongzhen Li
		Ping Wang
		</p>
	<p>Jamming pattern identification is a crucial prerequisite for countering jamming. Combined jamming exhibits complex structures and diverse forms, making it difficult for traditional identification methods to extract suitable and stable features for effective discrimination. To address this challenge, this paper proposes a combined jamming identification method based on joint modulation of linear frequency modulation, phase coding and phase coding frequency modulation (LFM-PC-PCFM) waveforms. Building upon the time&amp;amp;ndash;frequency entropy features of combined interference, this method enhances the separability of jamming features in the radar-transmitted waveform dimension. The experiment employed the SVM classification algorithm based on particle swarm optimization for validation. Experiments demonstrate that the combined jamming recognition method under LFM-PC-PCFM waveform modulation achieves higher and more stable recognition accuracy than traditional LFM single-waveform modulation under jamming-to-noise ratios ranging from &amp;amp;minus;10 dB to 30 dB.</p>
	]]></content:encoded>

	<dc:title>An Active Deception Combined Jamming Identification Method Based on Waveform Modulation</dc:title>
			<dc:creator>Yun Zhou</dc:creator>
			<dc:creator>Fulai Wang</dc:creator>
			<dc:creator>Nan Jiang</dc:creator>
			<dc:creator>Zhanling Wang</dc:creator>
			<dc:creator>Chen Pang</dc:creator>
			<dc:creator>Lei Zhang</dc:creator>
			<dc:creator>Yongzhen Li</dc:creator>
			<dc:creator>Ping Wang</dc:creator>
		<dc:identifier>doi: 10.3390/signals7020035</dc:identifier>
	<dc:source>Signals</dc:source>
	<dc:date>2026-04-07</dc:date>

	<prism:publicationName>Signals</prism:publicationName>
	<prism:publicationDate>2026-04-07</prism:publicationDate>
	<prism:volume>7</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>35</prism:startingPage>
		<prism:doi>10.3390/signals7020035</prism:doi>
	<prism:url>https://www.mdpi.com/2624-6120/7/2/35</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-6120/7/2/34">

	<title>Signals, Vol. 7, Pages 34: Custom Deep Learning Framework for Interpreting Diabetic Retinopathy in Healthcare Diagnostics</title>
	<link>https://www.mdpi.com/2624-6120/7/2/34</link>
	<description>Diabetic retinopathy is a prevalent condition and a major public health concern due to its detrimental impact on eyesight. Diabetes is a root cause of its development and damages small blood vessels caused by prolonged high blood sugar levels. The degenerative consequences of diabetic retinopathy are irrevocable if not diagnosed in the early stages of its progression. This ailment triggers the development of retinal lesions, which can be identified for diagnosis and prognosis. However, lesion detection is challenging due to their similarity in intensity profiles to other retinal features, inconsistent sizes, and random locations. This research evaluates a custom deep learning network for classifying retinal images and compares it with the state-of-the-art classifiers. The novel preprocessing method is introduced to reduce the complexity of the diagnostic process and to enhance classification performance by adaptively enhancing images. Despite being a shallow network, the proposed model yields competitive results with an accuracy of 87.66% and an F1-score of 0.78. The evaluation metrics indicate that class imbalance affects the performance of the proposed model despite using the weighted cross-entropy loss. The future contribution will be the inclusion of generative adversarial networks for generating synthetic images to balance the dataset. This research aims to develop a robust computer-aided diagnostic system as a second interpreter for ophthalmologists during the diagnosis and prognosis stages.</description>
	<pubDate>2026-04-07</pubDate>

	<content:encoded><![CDATA[
	<p><b>Signals, Vol. 7, Pages 34: Custom Deep Learning Framework for Interpreting Diabetic Retinopathy in Healthcare Diagnostics</b></p>
	<p>Signals <a href="https://www.mdpi.com/2624-6120/7/2/34">doi: 10.3390/signals7020034</a></p>
	<p>Authors:
		Tamoor Aziz
		Chalie Charoenlarpnopparut
		Srijidtra Mahapakulchai
		Babatunde Oluwaseun Ajayi
		Mayowa Emmanuel Bamisaye
		</p>
	<p>Diabetic retinopathy is a prevalent condition and a major public health concern due to its detrimental impact on eyesight. Diabetes is a root cause of its development and damages small blood vessels caused by prolonged high blood sugar levels. The degenerative consequences of diabetic retinopathy are irrevocable if not diagnosed in the early stages of its progression. This ailment triggers the development of retinal lesions, which can be identified for diagnosis and prognosis. However, lesion detection is challenging due to their similarity in intensity profiles to other retinal features, inconsistent sizes, and random locations. This research evaluates a custom deep learning network for classifying retinal images and compares it with the state-of-the-art classifiers. The novel preprocessing method is introduced to reduce the complexity of the diagnostic process and to enhance classification performance by adaptively enhancing images. Despite being a shallow network, the proposed model yields competitive results with an accuracy of 87.66% and an F1-score of 0.78. The evaluation metrics indicate that class imbalance affects the performance of the proposed model despite using the weighted cross-entropy loss. The future contribution will be the inclusion of generative adversarial networks for generating synthetic images to balance the dataset. This research aims to develop a robust computer-aided diagnostic system as a second interpreter for ophthalmologists during the diagnosis and prognosis stages.</p>
	]]></content:encoded>

	<dc:title>Custom Deep Learning Framework for Interpreting Diabetic Retinopathy in Healthcare Diagnostics</dc:title>
			<dc:creator>Tamoor Aziz</dc:creator>
			<dc:creator>Chalie Charoenlarpnopparut</dc:creator>
			<dc:creator>Srijidtra Mahapakulchai</dc:creator>
			<dc:creator>Babatunde Oluwaseun Ajayi</dc:creator>
			<dc:creator>Mayowa Emmanuel Bamisaye</dc:creator>
		<dc:identifier>doi: 10.3390/signals7020034</dc:identifier>
	<dc:source>Signals</dc:source>
	<dc:date>2026-04-07</dc:date>

	<prism:publicationName>Signals</prism:publicationName>
	<prism:publicationDate>2026-04-07</prism:publicationDate>
	<prism:volume>7</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>34</prism:startingPage>
		<prism:doi>10.3390/signals7020034</prism:doi>
	<prism:url>https://www.mdpi.com/2624-6120/7/2/34</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-6120/7/2/33">

	<title>Signals, Vol. 7, Pages 33: Adjustable Complexity Transformer Architecture for Image Denoising</title>
	<link>https://www.mdpi.com/2624-6120/7/2/33</link>
	<description>In recent years, image denoising has seen a shift from traditional non-local self-similarity methods like BM3D to deep-learning based approaches that use learnable convolutions and attention mechanisms. While pixel-level attention is effective at capturing long-range relationships similar to non-local self-similarity based methods, it incurs extremely high computational costs that scale quadratically with image resolution. As an alternative, channel-wise attention is resolution-independent and computationally efficient but may miss crucial spatial details. In this paper, an adjustable attention mechanism is introduced that bridges the gap between pixel and channel attentions. In the proposed model, average pooling and variable-size convolutions are added before attention calculation to adjust spatial resolution and, thus, allow dynamical adjustment of computational complexity. This adjustable attention is applied in a transformer-based U-Net architecture and achieves performance comparable to state-of-the-art methods in both real and Gaussian blind denoising tasks. To be more concrete, the proposed method achieves a Peak Signal-to-Noise Ratio of 39.65 dB and a Structural Similarity Index Measure of 0.913 on the Smartphone Image Denoising Dataset. Therefore, the proposed method demonstrates a balance between efficiency and denoising quality.</description>
	<pubDate>2026-04-06</pubDate>

	<content:encoded><![CDATA[
	<p><b>Signals, Vol. 7, Pages 33: Adjustable Complexity Transformer Architecture for Image Denoising</b></p>
	<p>Signals <a href="https://www.mdpi.com/2624-6120/7/2/33">doi: 10.3390/signals7020033</a></p>
	<p>Authors:
		Jan-Ray Liao
		Wen Lin
		Li-Wen Chang
		</p>
	<p>In recent years, image denoising has seen a shift from traditional non-local self-similarity methods like BM3D to deep-learning based approaches that use learnable convolutions and attention mechanisms. While pixel-level attention is effective at capturing long-range relationships similar to non-local self-similarity based methods, it incurs extremely high computational costs that scale quadratically with image resolution. As an alternative, channel-wise attention is resolution-independent and computationally efficient but may miss crucial spatial details. In this paper, an adjustable attention mechanism is introduced that bridges the gap between pixel and channel attentions. In the proposed model, average pooling and variable-size convolutions are added before attention calculation to adjust spatial resolution and, thus, allow dynamical adjustment of computational complexity. This adjustable attention is applied in a transformer-based U-Net architecture and achieves performance comparable to state-of-the-art methods in both real and Gaussian blind denoising tasks. To be more concrete, the proposed method achieves a Peak Signal-to-Noise Ratio of 39.65 dB and a Structural Similarity Index Measure of 0.913 on the Smartphone Image Denoising Dataset. Therefore, the proposed method demonstrates a balance between efficiency and denoising quality.</p>
	]]></content:encoded>

	<dc:title>Adjustable Complexity Transformer Architecture for Image Denoising</dc:title>
			<dc:creator>Jan-Ray Liao</dc:creator>
			<dc:creator>Wen Lin</dc:creator>
			<dc:creator>Li-Wen Chang</dc:creator>
		<dc:identifier>doi: 10.3390/signals7020033</dc:identifier>
	<dc:source>Signals</dc:source>
	<dc:date>2026-04-06</dc:date>

	<prism:publicationName>Signals</prism:publicationName>
	<prism:publicationDate>2026-04-06</prism:publicationDate>
	<prism:volume>7</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>33</prism:startingPage>
		<prism:doi>10.3390/signals7020033</prism:doi>
	<prism:url>https://www.mdpi.com/2624-6120/7/2/33</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-6120/7/2/32">

	<title>Signals, Vol. 7, Pages 32: Universal Joint Maximum Likelihood Frame Synchronization and PLS Decoding for DVB-S2X Systems</title>
	<link>https://www.mdpi.com/2624-6120/7/2/32</link>
	<description>Compared to DVB-S2, DVB-S2X features a more intricate signaling structure. These signaling fields are employed not only in standard frames but are also frequently utilized within superframe structures. While rapid synchronization and decoding of these fields are critical, utilizing brute-force search methods incurs prohibitive computational costs. Therefore, this paper proposes a Joint Maximum Likelihood (JML) detection model tailored for the Fast Walsh&amp;amp;ndash;Hadamard Transform (FWHT). This approach allows for simultaneous synchronization and decoding while reducing number of real addition operations per codeword by approximately 15 times compared to brute-force methods. Consequently, the proposed architecture provides a highly efficient solution applicable to DVB-S2X and backward compatible with DVB-S2.</description>
	<pubDate>2026-04-03</pubDate>

	<content:encoded><![CDATA[
	<p><b>Signals, Vol. 7, Pages 32: Universal Joint Maximum Likelihood Frame Synchronization and PLS Decoding for DVB-S2X Systems</b></p>
	<p>Signals <a href="https://www.mdpi.com/2624-6120/7/2/32">doi: 10.3390/signals7020032</a></p>
	<p>Authors:
		Xin-Qi Liao
		Yih-Min Chen
		</p>
	<p>Compared to DVB-S2, DVB-S2X features a more intricate signaling structure. These signaling fields are employed not only in standard frames but are also frequently utilized within superframe structures. While rapid synchronization and decoding of these fields are critical, utilizing brute-force search methods incurs prohibitive computational costs. Therefore, this paper proposes a Joint Maximum Likelihood (JML) detection model tailored for the Fast Walsh&amp;amp;ndash;Hadamard Transform (FWHT). This approach allows for simultaneous synchronization and decoding while reducing number of real addition operations per codeword by approximately 15 times compared to brute-force methods. Consequently, the proposed architecture provides a highly efficient solution applicable to DVB-S2X and backward compatible with DVB-S2.</p>
	]]></content:encoded>

	<dc:title>Universal Joint Maximum Likelihood Frame Synchronization and PLS Decoding for DVB-S2X Systems</dc:title>
			<dc:creator>Xin-Qi Liao</dc:creator>
			<dc:creator>Yih-Min Chen</dc:creator>
		<dc:identifier>doi: 10.3390/signals7020032</dc:identifier>
	<dc:source>Signals</dc:source>
	<dc:date>2026-04-03</dc:date>

	<prism:publicationName>Signals</prism:publicationName>
	<prism:publicationDate>2026-04-03</prism:publicationDate>
	<prism:volume>7</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>32</prism:startingPage>
		<prism:doi>10.3390/signals7020032</prism:doi>
	<prism:url>https://www.mdpi.com/2624-6120/7/2/32</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-6120/7/2/31">

	<title>Signals, Vol. 7, Pages 31: Factors Affecting the Cushioning Performance of Granular Materials and the Application in AEM Signal Surveys</title>
	<link>https://www.mdpi.com/2624-6120/7/2/31</link>
	<description>Airborne electromagnetic (AEM) surveys map subsurface electrical structures by deploying transmitter and receiver coils on an airborne platform. However, platform-induced vibrations are transmitted to the sensors, generating strong motion-induced noise that severely degrades signal quality. To mitigate such noise, this study proposed the use of granular materials as a cushioning medium. An impact model based on the Discrete Element Method (DEM) was developed and validated against drop-weight experiments. Both granular material properties and impactor characteristics were investigated. The study examined the cushioning effects on both the base plate and the impactor under impact loading, and the sensitivity of key parameters was evaluated. The results showed that granular properties had minimal influence on the impactor peak force. Increasing particle Young&amp;amp;rsquo;s modulus, density, or friction coefficient led to higher peak forces on the base plate, with Young&amp;amp;rsquo;s modulus and density having significantly stronger effects than friction coefficient. Additionally, both the impactor size and velocity correlate positively with the peak forces transmitted to the base plate and experienced by the impactor. Under thin layer conditions, the impactor force was more sensitive to impact parameters, while in thick layers it was mainly determined by particle rearrangement and energy dissipation mechanisms. These findings reveal the mechanisms governing granular cushioning and provide a theoretical basis for vibration isolation design in AEM systems to preserve high-fidelity signals.</description>
	<pubDate>2026-04-02</pubDate>

	<content:encoded><![CDATA[
	<p><b>Signals, Vol. 7, Pages 31: Factors Affecting the Cushioning Performance of Granular Materials and the Application in AEM Signal Surveys</b></p>
	<p>Signals <a href="https://www.mdpi.com/2624-6120/7/2/31">doi: 10.3390/signals7020031</a></p>
	<p>Authors:
		Lifang Fan
		Shaomin Liang
		Yanpeng Liu
		Guangbo Xiang
		Wei Zhang
		Xuexi Min
		</p>
	<p>Airborne electromagnetic (AEM) surveys map subsurface electrical structures by deploying transmitter and receiver coils on an airborne platform. However, platform-induced vibrations are transmitted to the sensors, generating strong motion-induced noise that severely degrades signal quality. To mitigate such noise, this study proposed the use of granular materials as a cushioning medium. An impact model based on the Discrete Element Method (DEM) was developed and validated against drop-weight experiments. Both granular material properties and impactor characteristics were investigated. The study examined the cushioning effects on both the base plate and the impactor under impact loading, and the sensitivity of key parameters was evaluated. The results showed that granular properties had minimal influence on the impactor peak force. Increasing particle Young&amp;amp;rsquo;s modulus, density, or friction coefficient led to higher peak forces on the base plate, with Young&amp;amp;rsquo;s modulus and density having significantly stronger effects than friction coefficient. Additionally, both the impactor size and velocity correlate positively with the peak forces transmitted to the base plate and experienced by the impactor. Under thin layer conditions, the impactor force was more sensitive to impact parameters, while in thick layers it was mainly determined by particle rearrangement and energy dissipation mechanisms. These findings reveal the mechanisms governing granular cushioning and provide a theoretical basis for vibration isolation design in AEM systems to preserve high-fidelity signals.</p>
	]]></content:encoded>

	<dc:title>Factors Affecting the Cushioning Performance of Granular Materials and the Application in AEM Signal Surveys</dc:title>
			<dc:creator>Lifang Fan</dc:creator>
			<dc:creator>Shaomin Liang</dc:creator>
			<dc:creator>Yanpeng Liu</dc:creator>
			<dc:creator>Guangbo Xiang</dc:creator>
			<dc:creator>Wei Zhang</dc:creator>
			<dc:creator>Xuexi Min</dc:creator>
		<dc:identifier>doi: 10.3390/signals7020031</dc:identifier>
	<dc:source>Signals</dc:source>
	<dc:date>2026-04-02</dc:date>

	<prism:publicationName>Signals</prism:publicationName>
	<prism:publicationDate>2026-04-02</prism:publicationDate>
	<prism:volume>7</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>31</prism:startingPage>
		<prism:doi>10.3390/signals7020031</prism:doi>
	<prism:url>https://www.mdpi.com/2624-6120/7/2/31</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-6120/7/2/30">

	<title>Signals, Vol. 7, Pages 30: Active Vibration Control of a Servo-Driven Pneumatic Isolation Platform for Airborne Electromagnetic Detection Systems</title>
	<link>https://www.mdpi.com/2624-6120/7/2/30</link>
	<description>Airborne electromagnetic detection systems are highly susceptible to low-frequency motion-induced noise, which significantly degrades the extraction of weak geological signals. Conventional signal processing methods alone are often insufficient to suppress mechanically induced vibration noise, resulting in signal distortion and reduced detection reliability. To address this limitation, this study proposes an active noise suppression strategy that integrates mechanical vibration isolation with advanced signal processing. A pneumatic vibration isolation platform based on a cable-driven parallel robot (CDPR) architecture is developed to achieve precise orientation correction and effective vibration isolation. The system employs kinematic modeling and a servo-controlled pneumatic cylinder driven by a proportional directional valve to enable accurate dynamic regulation. Numerical simulations conducted in the Advanced Modeling and Simulation Environment (AMESim), combined with proportional&amp;amp;ndash;integral&amp;amp;ndash;derivative (PID) control, demonstrate that piston displacement overshoot is constrained within 0.2 mm. Furthermore, targeted filtering techniques are applied to enhance signal quality. Experimental results show that the response time for continuous step input is 0.18&amp;amp;ndash;0.2 s, with a steady-state error below 0.3 mm, confirming robust control performance. The proposed framework provides an effective low-noise solution for airborne electromagnetic detection and can improve survey reliability in deep resource exploration.</description>
	<pubDate>2026-04-01</pubDate>

	<content:encoded><![CDATA[
	<p><b>Signals, Vol. 7, Pages 30: Active Vibration Control of a Servo-Driven Pneumatic Isolation Platform for Airborne Electromagnetic Detection Systems</b></p>
	<p>Signals <a href="https://www.mdpi.com/2624-6120/7/2/30">doi: 10.3390/signals7020030</a></p>
	<p>Authors:
		Ziqiang Zhu
		Haigen Zhou
		Ao Wei
		Junfeng Yuan
		Handong Tan
		Manping Yang
		Zuoxi Jiang
		Marco Alfano
		</p>
	<p>Airborne electromagnetic detection systems are highly susceptible to low-frequency motion-induced noise, which significantly degrades the extraction of weak geological signals. Conventional signal processing methods alone are often insufficient to suppress mechanically induced vibration noise, resulting in signal distortion and reduced detection reliability. To address this limitation, this study proposes an active noise suppression strategy that integrates mechanical vibration isolation with advanced signal processing. A pneumatic vibration isolation platform based on a cable-driven parallel robot (CDPR) architecture is developed to achieve precise orientation correction and effective vibration isolation. The system employs kinematic modeling and a servo-controlled pneumatic cylinder driven by a proportional directional valve to enable accurate dynamic regulation. Numerical simulations conducted in the Advanced Modeling and Simulation Environment (AMESim), combined with proportional&amp;amp;ndash;integral&amp;amp;ndash;derivative (PID) control, demonstrate that piston displacement overshoot is constrained within 0.2 mm. Furthermore, targeted filtering techniques are applied to enhance signal quality. Experimental results show that the response time for continuous step input is 0.18&amp;amp;ndash;0.2 s, with a steady-state error below 0.3 mm, confirming robust control performance. The proposed framework provides an effective low-noise solution for airborne electromagnetic detection and can improve survey reliability in deep resource exploration.</p>
	]]></content:encoded>

	<dc:title>Active Vibration Control of a Servo-Driven Pneumatic Isolation Platform for Airborne Electromagnetic Detection Systems</dc:title>
			<dc:creator>Ziqiang Zhu</dc:creator>
			<dc:creator>Haigen Zhou</dc:creator>
			<dc:creator>Ao Wei</dc:creator>
			<dc:creator>Junfeng Yuan</dc:creator>
			<dc:creator>Handong Tan</dc:creator>
			<dc:creator>Manping Yang</dc:creator>
			<dc:creator>Zuoxi Jiang</dc:creator>
			<dc:creator>Marco Alfano</dc:creator>
		<dc:identifier>doi: 10.3390/signals7020030</dc:identifier>
	<dc:source>Signals</dc:source>
	<dc:date>2026-04-01</dc:date>

	<prism:publicationName>Signals</prism:publicationName>
	<prism:publicationDate>2026-04-01</prism:publicationDate>
	<prism:volume>7</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>30</prism:startingPage>
		<prism:doi>10.3390/signals7020030</prism:doi>
	<prism:url>https://www.mdpi.com/2624-6120/7/2/30</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-6120/7/2/29">

	<title>Signals, Vol. 7, Pages 29: Virial Extension for Discrete Data Series</title>
	<link>https://www.mdpi.com/2624-6120/7/2/29</link>
	<description>The Virial theorem has been applied with considerable success in various fields of natural sciences. This work proposes an extension of the theorem applied to discrete data series. This application will be called the Virial theorem extension and can be applied to the numerical solution of nonlinear dynamic systems represented by difference equations, such as logistic, discubic and random number generators, the numerical solution of differential equations like the nonlinear double pendulum and a series of pseudorandom numbers and its reciprocals. For this purpose, a coefficient was derived from the discrete Virial formalism. This coefficient can be used to detect when a time series is obtained as the solution of a differential equation, in which case the coefficient is close to 1, and when the data come from other sources, in which case it takes different values. With reference to chaotic dynamic systems, the discrete Virial coefficient shows the feasibility in the detection of a change in behavior, as an alternative to the traditional calculation of Lyapunov exponents, and it is a thousand times faster. The convergence speed of the final value of the discrete Virial coefficient of a dynamic system in a non-chaotic regime is between one and five orders of magnitude greater than in the chaotic regime, thus extending results in non-Hamiltonian systems, previously found by another author in Hamiltonian systems. The results obtained show that the proposal characterizes and distinguishes different types of behavior from the series under study. It also shows great sensitivity to the evolution of the series, even anticipating critical points. The proposed method to construct the discrete Virial extension does not require the existence of a Hamiltonian, which allows its application to a series obtained experimentally or from any differential equation. From a general point of view, this research shows a series of properties that can be reinterpreted in light of the discrete Virial coefficient, providing a novel and versatile tool, given its minimal applicability requirements. For pseudorandom number series, the extension reveals a consistent, quasi-mirror behavior between its kinetic and potential factors, suggesting an underlying structural property.</description>
	<pubDate>2026-04-01</pubDate>

	<content:encoded><![CDATA[
	<p><b>Signals, Vol. 7, Pages 29: Virial Extension for Discrete Data Series</b></p>
	<p>Signals <a href="https://www.mdpi.com/2624-6120/7/2/29">doi: 10.3390/signals7020029</a></p>
	<p>Authors:
		Dino Otero
		Ariel Amadio
		Leandro Robles Dávila
		Marcos Maillot
		Cristian Bonini
		Walter Legnani
		</p>
	<p>The Virial theorem has been applied with considerable success in various fields of natural sciences. This work proposes an extension of the theorem applied to discrete data series. This application will be called the Virial theorem extension and can be applied to the numerical solution of nonlinear dynamic systems represented by difference equations, such as logistic, discubic and random number generators, the numerical solution of differential equations like the nonlinear double pendulum and a series of pseudorandom numbers and its reciprocals. For this purpose, a coefficient was derived from the discrete Virial formalism. This coefficient can be used to detect when a time series is obtained as the solution of a differential equation, in which case the coefficient is close to 1, and when the data come from other sources, in which case it takes different values. With reference to chaotic dynamic systems, the discrete Virial coefficient shows the feasibility in the detection of a change in behavior, as an alternative to the traditional calculation of Lyapunov exponents, and it is a thousand times faster. The convergence speed of the final value of the discrete Virial coefficient of a dynamic system in a non-chaotic regime is between one and five orders of magnitude greater than in the chaotic regime, thus extending results in non-Hamiltonian systems, previously found by another author in Hamiltonian systems. The results obtained show that the proposal characterizes and distinguishes different types of behavior from the series under study. It also shows great sensitivity to the evolution of the series, even anticipating critical points. The proposed method to construct the discrete Virial extension does not require the existence of a Hamiltonian, which allows its application to a series obtained experimentally or from any differential equation. From a general point of view, this research shows a series of properties that can be reinterpreted in light of the discrete Virial coefficient, providing a novel and versatile tool, given its minimal applicability requirements. For pseudorandom number series, the extension reveals a consistent, quasi-mirror behavior between its kinetic and potential factors, suggesting an underlying structural property.</p>
	]]></content:encoded>

	<dc:title>Virial Extension for Discrete Data Series</dc:title>
			<dc:creator>Dino Otero</dc:creator>
			<dc:creator>Ariel Amadio</dc:creator>
			<dc:creator>Leandro Robles Dávila</dc:creator>
			<dc:creator>Marcos Maillot</dc:creator>
			<dc:creator>Cristian Bonini</dc:creator>
			<dc:creator>Walter Legnani</dc:creator>
		<dc:identifier>doi: 10.3390/signals7020029</dc:identifier>
	<dc:source>Signals</dc:source>
	<dc:date>2026-04-01</dc:date>

	<prism:publicationName>Signals</prism:publicationName>
	<prism:publicationDate>2026-04-01</prism:publicationDate>
	<prism:volume>7</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>29</prism:startingPage>
		<prism:doi>10.3390/signals7020029</prism:doi>
	<prism:url>https://www.mdpi.com/2624-6120/7/2/29</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-6120/7/2/28">

	<title>Signals, Vol. 7, Pages 28: Sample-Wise False-Positive Reduction in ECG P-, R-, and T-Peak Detection via Physiological Temporal Constraints and Lightweight Binary Classifiers</title>
	<link>https://www.mdpi.com/2624-6120/7/2/28</link>
	<description>Sample-wise detection of P-, R-, and T-peaks in electrocardiograms (ECGs) is challenging because each peak type is sparsely represented (&amp;amp;asymp;1:500 samples in a typical 10-s, 500-Hz ECG at 60 bpm), such that even a small number of false-positives (FPs) can markedly degrade positive predictive value (PPV) and limit the practicality of classifier-only approaches. This study proposes a lightweight ECG peak detection framework that combines binary classifiers with physiological temporal constraints (PTC) to address extreme sample-level class imbalance. Local morphological features are first evaluated using lightweight machine-learning models, among which XGBoost (XGB) exhibited the most stable score-ranking performance. Rather than directly thresholding classifier outputs, prediction scores are interpreted within the framework, which encodes physiological timing relationships. R-peaks are detected using score ranking combined with a refractory-period constraint, and the detected R-peaks serve as temporal landmarks for subsequent P- and T-peak detection within physiologically plausible time windows reflecting the P&amp;amp;ndash;QRS&amp;amp;ndash;T sequence. Quantitative evaluation was conducted using the Lobachevsky University Electrocardiography Database, hereafter referred to as LUDB. With a temporal tolerance of &amp;amp;plusmn;20 ms, the XGB-based system achieved an F1-score of 0.87 for R-peak detection (sensitivity 0.96, PPV 0.79), corresponding to approximately 9&amp;amp;ndash;10 true R-peaks with only 2&amp;amp;ndash;3 FP samples per 10-s segment. For P- and T-peaks, F1-scores of 0.70 and 0.69 were obtained, respectively. Additional evaluation on arrhythmic LUDB records demonstrated robust R-peak detection across rhythm types. In AF-related rhythms, where organized P waves are physiologically absent, the framework appropriately suppressed P-peak detections, with false-positive rates remaining below 0.31%. Qualitative application to ECG recordings from the PTB-XL database further demonstrated physiologically consistent behavior. These results indicate that reliable and interpretable ECG peak detection under extreme class imbalance can be achieved by integrating lightweight classifiers within the proposed framework, without reliance on complex deep learning architectures.</description>
	<pubDate>2026-03-16</pubDate>

	<content:encoded><![CDATA[
	<p><b>Signals, Vol. 7, Pages 28: Sample-Wise False-Positive Reduction in ECG P-, R-, and T-Peak Detection via Physiological Temporal Constraints and Lightweight Binary Classifiers</b></p>
	<p>Signals <a href="https://www.mdpi.com/2624-6120/7/2/28">doi: 10.3390/signals7020028</a></p>
	<p>Authors:
		Yutaka Yoshida
		Kiyoko Yokoyama
		</p>
	<p>Sample-wise detection of P-, R-, and T-peaks in electrocardiograms (ECGs) is challenging because each peak type is sparsely represented (&amp;amp;asymp;1:500 samples in a typical 10-s, 500-Hz ECG at 60 bpm), such that even a small number of false-positives (FPs) can markedly degrade positive predictive value (PPV) and limit the practicality of classifier-only approaches. This study proposes a lightweight ECG peak detection framework that combines binary classifiers with physiological temporal constraints (PTC) to address extreme sample-level class imbalance. Local morphological features are first evaluated using lightweight machine-learning models, among which XGBoost (XGB) exhibited the most stable score-ranking performance. Rather than directly thresholding classifier outputs, prediction scores are interpreted within the framework, which encodes physiological timing relationships. R-peaks are detected using score ranking combined with a refractory-period constraint, and the detected R-peaks serve as temporal landmarks for subsequent P- and T-peak detection within physiologically plausible time windows reflecting the P&amp;amp;ndash;QRS&amp;amp;ndash;T sequence. Quantitative evaluation was conducted using the Lobachevsky University Electrocardiography Database, hereafter referred to as LUDB. With a temporal tolerance of &amp;amp;plusmn;20 ms, the XGB-based system achieved an F1-score of 0.87 for R-peak detection (sensitivity 0.96, PPV 0.79), corresponding to approximately 9&amp;amp;ndash;10 true R-peaks with only 2&amp;amp;ndash;3 FP samples per 10-s segment. For P- and T-peaks, F1-scores of 0.70 and 0.69 were obtained, respectively. Additional evaluation on arrhythmic LUDB records demonstrated robust R-peak detection across rhythm types. In AF-related rhythms, where organized P waves are physiologically absent, the framework appropriately suppressed P-peak detections, with false-positive rates remaining below 0.31%. Qualitative application to ECG recordings from the PTB-XL database further demonstrated physiologically consistent behavior. These results indicate that reliable and interpretable ECG peak detection under extreme class imbalance can be achieved by integrating lightweight classifiers within the proposed framework, without reliance on complex deep learning architectures.</p>
	]]></content:encoded>

	<dc:title>Sample-Wise False-Positive Reduction in ECG P-, R-, and T-Peak Detection via Physiological Temporal Constraints and Lightweight Binary Classifiers</dc:title>
			<dc:creator>Yutaka Yoshida</dc:creator>
			<dc:creator>Kiyoko Yokoyama</dc:creator>
		<dc:identifier>doi: 10.3390/signals7020028</dc:identifier>
	<dc:source>Signals</dc:source>
	<dc:date>2026-03-16</dc:date>

	<prism:publicationName>Signals</prism:publicationName>
	<prism:publicationDate>2026-03-16</prism:publicationDate>
	<prism:volume>7</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>28</prism:startingPage>
		<prism:doi>10.3390/signals7020028</prism:doi>
	<prism:url>https://www.mdpi.com/2624-6120/7/2/28</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-6120/7/2/27">

	<title>Signals, Vol. 7, Pages 27: A Spectral Entropy-Based Metric for Evaluating Speech Perceptual Quality with Emphasis on Spectral Coherence</title>
	<link>https://www.mdpi.com/2624-6120/7/2/27</link>
	<description>Distortion of speech in real-life communication is inevitable, affecting its quality. Conventionally, the effectiveness of a speech system in terms of the perceptual quality of the speech it produces has been assessed using a time-consuming subjective metric, the mean opinion score. There are a number of objective metrics that can be used instead of the mean opinion score to assess the perceptual quality of the speech signal. The objective of this paper is to propose and validate a new objective metric, the spectral entropy-based metric (SEM), designed to evaluate the perceptual quality of speech and perceptual naturalness by quantifying spectral coherence. While other metrics focus on intelligibility, this study aims to fill a gap in naturalness assessment. The core novelty of this work lies in offering a diagnostic perspective on spectral coherence, an indicator of speech naturalness that is often not explicitly addressed by other metrics. To demonstrate the effectiveness of the proposed metric in evaluating the perceptual quality, we consider fixed-beam and steerable-beam first-order differential microphone arrays. Compared with other objective metrics, it is shown that the proposed SEM is more sensitive to spectral coherence, a predominant indicator of the naturalness of the output speech signal of a speech system.</description>
	<pubDate>2026-03-16</pubDate>

	<content:encoded><![CDATA[
	<p><b>Signals, Vol. 7, Pages 27: A Spectral Entropy-Based Metric for Evaluating Speech Perceptual Quality with Emphasis on Spectral Coherence</b></p>
	<p>Signals <a href="https://www.mdpi.com/2624-6120/7/2/27">doi: 10.3390/signals7020027</a></p>
	<p>Authors:
		Ali Sarafnia
		M. Omair Ahmad
		M.N.S. Swamy
		</p>
	<p>Distortion of speech in real-life communication is inevitable, affecting its quality. Conventionally, the effectiveness of a speech system in terms of the perceptual quality of the speech it produces has been assessed using a time-consuming subjective metric, the mean opinion score. There are a number of objective metrics that can be used instead of the mean opinion score to assess the perceptual quality of the speech signal. The objective of this paper is to propose and validate a new objective metric, the spectral entropy-based metric (SEM), designed to evaluate the perceptual quality of speech and perceptual naturalness by quantifying spectral coherence. While other metrics focus on intelligibility, this study aims to fill a gap in naturalness assessment. The core novelty of this work lies in offering a diagnostic perspective on spectral coherence, an indicator of speech naturalness that is often not explicitly addressed by other metrics. To demonstrate the effectiveness of the proposed metric in evaluating the perceptual quality, we consider fixed-beam and steerable-beam first-order differential microphone arrays. Compared with other objective metrics, it is shown that the proposed SEM is more sensitive to spectral coherence, a predominant indicator of the naturalness of the output speech signal of a speech system.</p>
	]]></content:encoded>

	<dc:title>A Spectral Entropy-Based Metric for Evaluating Speech Perceptual Quality with Emphasis on Spectral Coherence</dc:title>
			<dc:creator>Ali Sarafnia</dc:creator>
			<dc:creator>M. Omair Ahmad</dc:creator>
			<dc:creator>M.N.S. Swamy</dc:creator>
		<dc:identifier>doi: 10.3390/signals7020027</dc:identifier>
	<dc:source>Signals</dc:source>
	<dc:date>2026-03-16</dc:date>

	<prism:publicationName>Signals</prism:publicationName>
	<prism:publicationDate>2026-03-16</prism:publicationDate>
	<prism:volume>7</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>27</prism:startingPage>
		<prism:doi>10.3390/signals7020027</prism:doi>
	<prism:url>https://www.mdpi.com/2624-6120/7/2/27</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-6120/7/2/26">

	<title>Signals, Vol. 7, Pages 26: A Robust Fingerprint-Based Machine Learning Model for Indoor Navigation in Real Time</title>
	<link>https://www.mdpi.com/2624-6120/7/2/26</link>
	<description>The accurate positioning of location in indoor environment has become crucial in many location-based services, mainly where global positioning systems (GPSs) are unavailable or fail to navigate correctly. Conventional fingerprint-based approaches face challenges with instability, low accuracy, and being sensitive to changes in the environment. This study proposes a robust fingerprint-based machine learning (ML) model for dynamic environment indoor navigation in real time. The proposed model uses link quality indicator (LQI) values from IEEE 802.15.4 as fingerprints and supervised learning algorithms, showing high accuracy and a strong ability to adapt to changes in the environment. A room within a building floor has been regarded as the unit of location identification instead of the user&amp;amp;rsquo;s exact coordinates to make the suggested model more relevant under practical conditions. The model was trained and tested using a real LQI dataset collected from varied indoor conditions to ensure the system can adapt effectively and operate consistently in dynamic environments and signal conditions. The results show that the proposed model surpasses fingerprinting indoor navigation in room detection accuracy and flexibility to environmental changes. An implemented prototype proved the real-time capability of the proposal in smart buildings, hospitals, and industrial IoT settings.</description>
	<pubDate>2026-03-16</pubDate>

	<content:encoded><![CDATA[
	<p><b>Signals, Vol. 7, Pages 26: A Robust Fingerprint-Based Machine Learning Model for Indoor Navigation in Real Time</b></p>
	<p>Signals <a href="https://www.mdpi.com/2624-6120/7/2/26">doi: 10.3390/signals7020026</a></p>
	<p>Authors:
		Md. Selim Al Mamun
		Fatema Akhter
		</p>
	<p>The accurate positioning of location in indoor environment has become crucial in many location-based services, mainly where global positioning systems (GPSs) are unavailable or fail to navigate correctly. Conventional fingerprint-based approaches face challenges with instability, low accuracy, and being sensitive to changes in the environment. This study proposes a robust fingerprint-based machine learning (ML) model for dynamic environment indoor navigation in real time. The proposed model uses link quality indicator (LQI) values from IEEE 802.15.4 as fingerprints and supervised learning algorithms, showing high accuracy and a strong ability to adapt to changes in the environment. A room within a building floor has been regarded as the unit of location identification instead of the user&amp;amp;rsquo;s exact coordinates to make the suggested model more relevant under practical conditions. The model was trained and tested using a real LQI dataset collected from varied indoor conditions to ensure the system can adapt effectively and operate consistently in dynamic environments and signal conditions. The results show that the proposed model surpasses fingerprinting indoor navigation in room detection accuracy and flexibility to environmental changes. An implemented prototype proved the real-time capability of the proposal in smart buildings, hospitals, and industrial IoT settings.</p>
	]]></content:encoded>

	<dc:title>A Robust Fingerprint-Based Machine Learning Model for Indoor Navigation in Real Time</dc:title>
			<dc:creator>Md. Selim Al Mamun</dc:creator>
			<dc:creator>Fatema Akhter</dc:creator>
		<dc:identifier>doi: 10.3390/signals7020026</dc:identifier>
	<dc:source>Signals</dc:source>
	<dc:date>2026-03-16</dc:date>

	<prism:publicationName>Signals</prism:publicationName>
	<prism:publicationDate>2026-03-16</prism:publicationDate>
	<prism:volume>7</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>26</prism:startingPage>
		<prism:doi>10.3390/signals7020026</prism:doi>
	<prism:url>https://www.mdpi.com/2624-6120/7/2/26</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-6120/7/2/25">

	<title>Signals, Vol. 7, Pages 25: SpectraMelt: An Open-Source A2I Simulator</title>
	<link>https://www.mdpi.com/2624-6120/7/2/25</link>
	<description>The Nyquist Folding Receiver is an architecture that uses Compressed Sensing to convert analog radio frequency signals into digital signals. Analog-to-Digital Converter architectures that implement Compressed Sensing are collectively known as Analog-to-Information. Sparse bandlimited analog signals with frequency bands above the Nyquist frequency of a traditional Analog-to-Digital Converter can be recovered by Analog-to-Information receivers. Recovery of these signals is affected by the selection of a Compressed Sensing recovery algorithm. Typical recovery algorithms selected for recovery of Nyquist Folding Receiver-compressed outputs use iterative methods to find the solution. This work presents a machine learning approach to signal reconstruction. The proposed method uses a neural network to learn the mapping from compressed samples to the original signal. The neural network is trained on a set of synthetic signals generated by a new open-source Analog-to-Information simulator called SpectraMelt. The results show that the neural network can effectively reconstruct the original signal from the compressed samples, achieving better performance than traditional iterative methods.</description>
	<pubDate>2026-03-05</pubDate>

	<content:encoded><![CDATA[
	<p><b>Signals, Vol. 7, Pages 25: SpectraMelt: An Open-Source A2I Simulator</b></p>
	<p>Signals <a href="https://www.mdpi.com/2624-6120/7/2/25">doi: 10.3390/signals7020025</a></p>
	<p>Authors:
		Peter Swartz
		Saiyu Ren
		Shuxia Sun
		James Martin
		</p>
	<p>The Nyquist Folding Receiver is an architecture that uses Compressed Sensing to convert analog radio frequency signals into digital signals. Analog-to-Digital Converter architectures that implement Compressed Sensing are collectively known as Analog-to-Information. Sparse bandlimited analog signals with frequency bands above the Nyquist frequency of a traditional Analog-to-Digital Converter can be recovered by Analog-to-Information receivers. Recovery of these signals is affected by the selection of a Compressed Sensing recovery algorithm. Typical recovery algorithms selected for recovery of Nyquist Folding Receiver-compressed outputs use iterative methods to find the solution. This work presents a machine learning approach to signal reconstruction. The proposed method uses a neural network to learn the mapping from compressed samples to the original signal. The neural network is trained on a set of synthetic signals generated by a new open-source Analog-to-Information simulator called SpectraMelt. The results show that the neural network can effectively reconstruct the original signal from the compressed samples, achieving better performance than traditional iterative methods.</p>
	]]></content:encoded>

	<dc:title>SpectraMelt: An Open-Source A2I Simulator</dc:title>
			<dc:creator>Peter Swartz</dc:creator>
			<dc:creator>Saiyu Ren</dc:creator>
			<dc:creator>Shuxia Sun</dc:creator>
			<dc:creator>James Martin</dc:creator>
		<dc:identifier>doi: 10.3390/signals7020025</dc:identifier>
	<dc:source>Signals</dc:source>
	<dc:date>2026-03-05</dc:date>

	<prism:publicationName>Signals</prism:publicationName>
	<prism:publicationDate>2026-03-05</prism:publicationDate>
	<prism:volume>7</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>25</prism:startingPage>
		<prism:doi>10.3390/signals7020025</prism:doi>
	<prism:url>https://www.mdpi.com/2624-6120/7/2/25</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-6120/7/2/24">

	<title>Signals, Vol. 7, Pages 24: Non-Contact Heart Rate Estimation via Higher Harmonic Analysis Using 24-GHz Doppler Radar: Validation in Humans and Anesthetized Cat</title>
	<link>https://www.mdpi.com/2624-6120/7/2/24</link>
	<description>This study presents a harmonic-based method for non-contact heart rate (HR) estimation from continuous-wave (CW) Doppler radar signals, validated across multiple species including humans and small animals (cat). Traditional frequency-domain methods struggle when the HR fundamental frequency is weak or overlaps with respiratory components. The proposed approach addresses this by identifying three higher-order HR harmonics (2nd, 3rd, and 4th) then reconstructing the HR fundamental frequency from their integer ratios (3/2, 4/3, 2/1). The algorithm processes 20-s sliding windows (1-s overlap) using bandpass filtering to remove respiratory components and HR fundamental while preserving higher harmonics, followed by Power Spectral Density (PSD) analysis. When a complete harmonic set cannot be found, the proposed algorithm switches to harmonic pair detection, enhancing robustness when one harmonic is absent or attenuated. Besides, an adaptive tolerance mechanism enables detection under non-ideal conditions. The method was validated using a public human dataset and an experimental cat dataset with varied positions (supine/prone) and anesthesia levels (1&amp;amp;ndash;3% isoflurane). For humans, the algorithm achieved HR Accuracy consistently above 98% with an average RMSE of 1.33 bpm (MAPE: 1.29%, MAE: 0.86 bpm) and Bland-Altman bias below 0.9 bpm. For the cat dataset, performance was even better with HR Accuracy remaining above 99%, an average RMSE of 0.39 bpm (MAPE: 0.22%, MAE: 0.30 bpm), and bias below 0.14 bpm.</description>
	<pubDate>2026-03-04</pubDate>

	<content:encoded><![CDATA[
	<p><b>Signals, Vol. 7, Pages 24: Non-Contact Heart Rate Estimation via Higher Harmonic Analysis Using 24-GHz Doppler Radar: Validation in Humans and Anesthetized Cat</b></p>
	<p>Signals <a href="https://www.mdpi.com/2624-6120/7/2/24">doi: 10.3390/signals7020024</a></p>
	<p>Authors:
		Huu-Son Nguyen
		Masaki Kurosawa
		Koichiro Ishibashi
		Ryou Tanaka
		Cong-Kha Pham
		Guanghao Sun
		</p>
	<p>This study presents a harmonic-based method for non-contact heart rate (HR) estimation from continuous-wave (CW) Doppler radar signals, validated across multiple species including humans and small animals (cat). Traditional frequency-domain methods struggle when the HR fundamental frequency is weak or overlaps with respiratory components. The proposed approach addresses this by identifying three higher-order HR harmonics (2nd, 3rd, and 4th) then reconstructing the HR fundamental frequency from their integer ratios (3/2, 4/3, 2/1). The algorithm processes 20-s sliding windows (1-s overlap) using bandpass filtering to remove respiratory components and HR fundamental while preserving higher harmonics, followed by Power Spectral Density (PSD) analysis. When a complete harmonic set cannot be found, the proposed algorithm switches to harmonic pair detection, enhancing robustness when one harmonic is absent or attenuated. Besides, an adaptive tolerance mechanism enables detection under non-ideal conditions. The method was validated using a public human dataset and an experimental cat dataset with varied positions (supine/prone) and anesthesia levels (1&amp;amp;ndash;3% isoflurane). For humans, the algorithm achieved HR Accuracy consistently above 98% with an average RMSE of 1.33 bpm (MAPE: 1.29%, MAE: 0.86 bpm) and Bland-Altman bias below 0.9 bpm. For the cat dataset, performance was even better with HR Accuracy remaining above 99%, an average RMSE of 0.39 bpm (MAPE: 0.22%, MAE: 0.30 bpm), and bias below 0.14 bpm.</p>
	]]></content:encoded>

	<dc:title>Non-Contact Heart Rate Estimation via Higher Harmonic Analysis Using 24-GHz Doppler Radar: Validation in Humans and Anesthetized Cat</dc:title>
			<dc:creator>Huu-Son Nguyen</dc:creator>
			<dc:creator>Masaki Kurosawa</dc:creator>
			<dc:creator>Koichiro Ishibashi</dc:creator>
			<dc:creator>Ryou Tanaka</dc:creator>
			<dc:creator>Cong-Kha Pham</dc:creator>
			<dc:creator>Guanghao Sun</dc:creator>
		<dc:identifier>doi: 10.3390/signals7020024</dc:identifier>
	<dc:source>Signals</dc:source>
	<dc:date>2026-03-04</dc:date>

	<prism:publicationName>Signals</prism:publicationName>
	<prism:publicationDate>2026-03-04</prism:publicationDate>
	<prism:volume>7</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>24</prism:startingPage>
		<prism:doi>10.3390/signals7020024</prism:doi>
	<prism:url>https://www.mdpi.com/2624-6120/7/2/24</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-6120/7/2/23">

	<title>Signals, Vol. 7, Pages 23: Robust SNR Estimation Based on Time&amp;ndash;Frequency Analysis and Residual Blocks</title>
	<link>https://www.mdpi.com/2624-6120/7/2/23</link>
	<description>Signal-to-noise ratio (SNR) estimation plays a crucial role in communication systems, directly impacting the quality and reliability of signal transmission. This paper proposes a novel deep learning framework aimed at enhancing the accuracy and robustness of SNR estimation. The framework converts received signals into time&amp;amp;ndash;frequency matrices as feature inputs, effectively capturing both temporal and spectral characteristics through time&amp;amp;ndash;frequency analysis. Extensive experimental results across an SNR range of &amp;amp;minus;5 dB to 15 dB demonstrate that our method achieves a mean squared error (MSE) that closely approaches the theoretical Cram&amp;amp;eacute;r&amp;amp;ndash;Rao bound (CRB), comparable to data-aided (DA) maximum likelihood methods. A quantitative analysis reveals that, even under challenging conditions, such as a low SNR of &amp;amp;minus;5 dB, the model maintains superior accuracy with a mean absolute error (MAE) as low as 0.352, significantly outperforming traditional M2M4 and NDA estimators. The model&amp;amp;rsquo;s performance was systematically evaluated in a wide range of scenarios, encompassing various signal modulation formats, upsampling factors, multipath fading channels, frequency offsets, phase shifts, and roll-off factors. The evaluation highlights its exceptional generalization capability and robustness, with high performance and stability maintained even in challenging and dynamic environments.</description>
	<pubDate>2026-03-04</pubDate>

	<content:encoded><![CDATA[
	<p><b>Signals, Vol. 7, Pages 23: Robust SNR Estimation Based on Time&amp;ndash;Frequency Analysis and Residual Blocks</b></p>
	<p>Signals <a href="https://www.mdpi.com/2624-6120/7/2/23">doi: 10.3390/signals7020023</a></p>
	<p>Authors:
		Longqing Li
		Wenjun Xie
		Deming Hu
		Jingke Nie
		Fei Xie
		Zhiping Huang
		Yongjie Zhao
		</p>
	<p>Signal-to-noise ratio (SNR) estimation plays a crucial role in communication systems, directly impacting the quality and reliability of signal transmission. This paper proposes a novel deep learning framework aimed at enhancing the accuracy and robustness of SNR estimation. The framework converts received signals into time&amp;amp;ndash;frequency matrices as feature inputs, effectively capturing both temporal and spectral characteristics through time&amp;amp;ndash;frequency analysis. Extensive experimental results across an SNR range of &amp;amp;minus;5 dB to 15 dB demonstrate that our method achieves a mean squared error (MSE) that closely approaches the theoretical Cram&amp;amp;eacute;r&amp;amp;ndash;Rao bound (CRB), comparable to data-aided (DA) maximum likelihood methods. A quantitative analysis reveals that, even under challenging conditions, such as a low SNR of &amp;amp;minus;5 dB, the model maintains superior accuracy with a mean absolute error (MAE) as low as 0.352, significantly outperforming traditional M2M4 and NDA estimators. The model&amp;amp;rsquo;s performance was systematically evaluated in a wide range of scenarios, encompassing various signal modulation formats, upsampling factors, multipath fading channels, frequency offsets, phase shifts, and roll-off factors. The evaluation highlights its exceptional generalization capability and robustness, with high performance and stability maintained even in challenging and dynamic environments.</p>
	]]></content:encoded>

	<dc:title>Robust SNR Estimation Based on Time&amp;amp;ndash;Frequency Analysis and Residual Blocks</dc:title>
			<dc:creator>Longqing Li</dc:creator>
			<dc:creator>Wenjun Xie</dc:creator>
			<dc:creator>Deming Hu</dc:creator>
			<dc:creator>Jingke Nie</dc:creator>
			<dc:creator>Fei Xie</dc:creator>
			<dc:creator>Zhiping Huang</dc:creator>
			<dc:creator>Yongjie Zhao</dc:creator>
		<dc:identifier>doi: 10.3390/signals7020023</dc:identifier>
	<dc:source>Signals</dc:source>
	<dc:date>2026-03-04</dc:date>

	<prism:publicationName>Signals</prism:publicationName>
	<prism:publicationDate>2026-03-04</prism:publicationDate>
	<prism:volume>7</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>23</prism:startingPage>
		<prism:doi>10.3390/signals7020023</prism:doi>
	<prism:url>https://www.mdpi.com/2624-6120/7/2/23</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-6120/7/2/21">

	<title>Signals, Vol. 7, Pages 21: Agentic Vision Framework for Real-Time Manufacturing Contamination Detection Using Patch-Based Lightweight Convolutional Neural Networks</title>
	<link>https://www.mdpi.com/2624-6120/7/2/21</link>
	<description>Modern manufacturing quality control demands intelligent, adaptive inspection systems capable of real-time contamination detection with minimal computational overhead. We present a five-agent vision framework for material-aware contamination detection in manufacturing environments. The system comprises: a Material Classification Agent that identifies contamination type (fiber, sand, or mixed), three Material-Specific Detection Agents, each employing patch-based CNNs optimized for their respective material with dynamic patch size selection (128 px, 256 px, 384 px), and an Adaptation Agent that monitors performance and eliminates consistently failing patch size configurations. This hierarchical architecture enables intelligent routing to specialized detectors and continuous refinement through performance-driven adaptation. The Material Classification Agent achieves 98% accuracy in contamination type identification. Material-specific agents demonstrate F1-scores of 0.968 (fiber), 0.977 (sand), and 0.977 (mixed) with real-time inference (2.40&amp;amp;ndash;11.11 ms per 512 &amp;amp;times; 512 image). The Adaptation Agent implements selective patch size elimination: configurations failing quality thresholds (F1 &amp;amp;lt; 0.5) across multiple evaluation cycles are removed from the detection pipeline. On the synthetic test split used in this study, comparative evaluation against PatchCore, WinCLIP, and PaDiM shows 3&amp;amp;ndash;45&amp;amp;times; higher F1-scores with superior accuracy&amp;amp;ndash;latency trade-offs, validating the efficacy of specialized material-aware architectures for manufacturing contamination detection.</description>
	<pubDate>2026-03-03</pubDate>

	<content:encoded><![CDATA[
	<p><b>Signals, Vol. 7, Pages 21: Agentic Vision Framework for Real-Time Manufacturing Contamination Detection Using Patch-Based Lightweight Convolutional Neural Networks</b></p>
	<p>Signals <a href="https://www.mdpi.com/2624-6120/7/2/21">doi: 10.3390/signals7020021</a></p>
	<p>Authors:
		Yuan Xing
		Xuedong Ding
		Haowen Pan
		</p>
	<p>Modern manufacturing quality control demands intelligent, adaptive inspection systems capable of real-time contamination detection with minimal computational overhead. We present a five-agent vision framework for material-aware contamination detection in manufacturing environments. The system comprises: a Material Classification Agent that identifies contamination type (fiber, sand, or mixed), three Material-Specific Detection Agents, each employing patch-based CNNs optimized for their respective material with dynamic patch size selection (128 px, 256 px, 384 px), and an Adaptation Agent that monitors performance and eliminates consistently failing patch size configurations. This hierarchical architecture enables intelligent routing to specialized detectors and continuous refinement through performance-driven adaptation. The Material Classification Agent achieves 98% accuracy in contamination type identification. Material-specific agents demonstrate F1-scores of 0.968 (fiber), 0.977 (sand), and 0.977 (mixed) with real-time inference (2.40&amp;amp;ndash;11.11 ms per 512 &amp;amp;times; 512 image). The Adaptation Agent implements selective patch size elimination: configurations failing quality thresholds (F1 &amp;amp;lt; 0.5) across multiple evaluation cycles are removed from the detection pipeline. On the synthetic test split used in this study, comparative evaluation against PatchCore, WinCLIP, and PaDiM shows 3&amp;amp;ndash;45&amp;amp;times; higher F1-scores with superior accuracy&amp;amp;ndash;latency trade-offs, validating the efficacy of specialized material-aware architectures for manufacturing contamination detection.</p>
	]]></content:encoded>

	<dc:title>Agentic Vision Framework for Real-Time Manufacturing Contamination Detection Using Patch-Based Lightweight Convolutional Neural Networks</dc:title>
			<dc:creator>Yuan Xing</dc:creator>
			<dc:creator>Xuedong Ding</dc:creator>
			<dc:creator>Haowen Pan</dc:creator>
		<dc:identifier>doi: 10.3390/signals7020021</dc:identifier>
	<dc:source>Signals</dc:source>
	<dc:date>2026-03-03</dc:date>

	<prism:publicationName>Signals</prism:publicationName>
	<prism:publicationDate>2026-03-03</prism:publicationDate>
	<prism:volume>7</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>21</prism:startingPage>
		<prism:doi>10.3390/signals7020021</prism:doi>
	<prism:url>https://www.mdpi.com/2624-6120/7/2/21</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-6120/7/2/22">

	<title>Signals, Vol. 7, Pages 22: Convolution and Sampling Theorems for Offset Fractional Fourier Transform</title>
	<link>https://www.mdpi.com/2624-6120/7/2/22</link>
	<description>The offset fractional Fourier transform (OFrFT) has emerged as a mathematical tool for time-frequency analysis of non-stationary signals. However, there has been relatively little research on investigating properties including the convolution theorem and the sampling formulas. This paper investigates the new form of convolution theorem and its product theorem associated with the OFrFT. We also establish a sampling formula related to the transformation. Finally, a simple example is displayed to verify the results related to the sampling formula for the OFrFT.</description>
	<pubDate>2026-03-03</pubDate>

	<content:encoded><![CDATA[
	<p><b>Signals, Vol. 7, Pages 22: Convolution and Sampling Theorems for Offset Fractional Fourier Transform</b></p>
	<p>Signals <a href="https://www.mdpi.com/2624-6120/7/2/22">doi: 10.3390/signals7020022</a></p>
	<p>Authors:
		Mawardi Bahri
		Marni Rezki
		Samsul Ariffin Abdul Karim
		Nasrullah Bachtiar
		Muhammad Zakir
		Bannu Addul Samad
		</p>
	<p>The offset fractional Fourier transform (OFrFT) has emerged as a mathematical tool for time-frequency analysis of non-stationary signals. However, there has been relatively little research on investigating properties including the convolution theorem and the sampling formulas. This paper investigates the new form of convolution theorem and its product theorem associated with the OFrFT. We also establish a sampling formula related to the transformation. Finally, a simple example is displayed to verify the results related to the sampling formula for the OFrFT.</p>
	]]></content:encoded>

	<dc:title>Convolution and Sampling Theorems for Offset Fractional Fourier Transform</dc:title>
			<dc:creator>Mawardi Bahri</dc:creator>
			<dc:creator>Marni Rezki</dc:creator>
			<dc:creator>Samsul Ariffin Abdul Karim</dc:creator>
			<dc:creator>Nasrullah Bachtiar</dc:creator>
			<dc:creator>Muhammad Zakir</dc:creator>
			<dc:creator>Bannu Addul Samad</dc:creator>
		<dc:identifier>doi: 10.3390/signals7020022</dc:identifier>
	<dc:source>Signals</dc:source>
	<dc:date>2026-03-03</dc:date>

	<prism:publicationName>Signals</prism:publicationName>
	<prism:publicationDate>2026-03-03</prism:publicationDate>
	<prism:volume>7</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>22</prism:startingPage>
		<prism:doi>10.3390/signals7020022</prism:doi>
	<prism:url>https://www.mdpi.com/2624-6120/7/2/22</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-6120/7/2/20">

	<title>Signals, Vol. 7, Pages 20: Proposal and Prototype of a GUI-Based Algorithm for ECG R-Peak Correction and Immediate R-R Interval Updating</title>
	<link>https://www.mdpi.com/2624-6120/7/2/20</link>
	<description>Electrocardiography (ECG) is a key biosensing technique for assessing cardiac function and autonomic activity. Accurate detection of R-peaks and precise calculation of R-R intervals (RRIs) are essential for heart rate variability (HRV) analysis; however, automated detection algorithms remain vulnerable to local misdetections, such as false positives or missed beats (false negatives), caused by noise, baseline fluctuations, or waveform variability. Conventional correction approaches based on filter or threshold adjustment may introduce new errors outside the target region, highlighting the need for an intuitive and localized manual correction capability. To address this issue, we developed a prototype graphical user interface (GUI)-based ECG viewer implemented in Fortran for high computational efficiency. The system enables interactive insertion and deletion of detected R-peaks, with recalculation of the RRI time series and automatic updating of related analyses, including power spectral density, histograms, Lorenz plots, and polar plots. Validation using synthetic ECG signals at four sampling frequencies (125&amp;amp;ndash;1000 Hz) and three display time scales (2, 5, and 10 s) demonstrated correction errors below 0.7% and stable update times within 20&amp;amp;ndash;30 ms. When applied to real ECG recordings from the MIT-BIH Arrhythmia Database (records 115, 122, and 209; MLII lead), the GUI-derived RRIs achieved accuracies exceeding 0.985 at a strict &amp;amp;plusmn;10 ms tolerance and reached 1.000 at &amp;amp;plusmn;20 ms or higher, including recordings with frequent atrial premature contractions. These results indicate that the proposed system provides reliable feedback for localized correction of R-peak misdetections without altering the underlying ECG signal. The proposed algorithm may support future research and experimental applications in biosignal processing.</description>
	<pubDate>2026-03-03</pubDate>

	<content:encoded><![CDATA[
	<p><b>Signals, Vol. 7, Pages 20: Proposal and Prototype of a GUI-Based Algorithm for ECG R-Peak Correction and Immediate R-R Interval Updating</b></p>
	<p>Signals <a href="https://www.mdpi.com/2624-6120/7/2/20">doi: 10.3390/signals7020020</a></p>
	<p>Authors:
		Yutaka Yoshida
		Kiyoko Yokoyama
		</p>
	<p>Electrocardiography (ECG) is a key biosensing technique for assessing cardiac function and autonomic activity. Accurate detection of R-peaks and precise calculation of R-R intervals (RRIs) are essential for heart rate variability (HRV) analysis; however, automated detection algorithms remain vulnerable to local misdetections, such as false positives or missed beats (false negatives), caused by noise, baseline fluctuations, or waveform variability. Conventional correction approaches based on filter or threshold adjustment may introduce new errors outside the target region, highlighting the need for an intuitive and localized manual correction capability. To address this issue, we developed a prototype graphical user interface (GUI)-based ECG viewer implemented in Fortran for high computational efficiency. The system enables interactive insertion and deletion of detected R-peaks, with recalculation of the RRI time series and automatic updating of related analyses, including power spectral density, histograms, Lorenz plots, and polar plots. Validation using synthetic ECG signals at four sampling frequencies (125&amp;amp;ndash;1000 Hz) and three display time scales (2, 5, and 10 s) demonstrated correction errors below 0.7% and stable update times within 20&amp;amp;ndash;30 ms. When applied to real ECG recordings from the MIT-BIH Arrhythmia Database (records 115, 122, and 209; MLII lead), the GUI-derived RRIs achieved accuracies exceeding 0.985 at a strict &amp;amp;plusmn;10 ms tolerance and reached 1.000 at &amp;amp;plusmn;20 ms or higher, including recordings with frequent atrial premature contractions. These results indicate that the proposed system provides reliable feedback for localized correction of R-peak misdetections without altering the underlying ECG signal. The proposed algorithm may support future research and experimental applications in biosignal processing.</p>
	]]></content:encoded>

	<dc:title>Proposal and Prototype of a GUI-Based Algorithm for ECG R-Peak Correction and Immediate R-R Interval Updating</dc:title>
			<dc:creator>Yutaka Yoshida</dc:creator>
			<dc:creator>Kiyoko Yokoyama</dc:creator>
		<dc:identifier>doi: 10.3390/signals7020020</dc:identifier>
	<dc:source>Signals</dc:source>
	<dc:date>2026-03-03</dc:date>

	<prism:publicationName>Signals</prism:publicationName>
	<prism:publicationDate>2026-03-03</prism:publicationDate>
	<prism:volume>7</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>20</prism:startingPage>
		<prism:doi>10.3390/signals7020020</prism:doi>
	<prism:url>https://www.mdpi.com/2624-6120/7/2/20</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-6120/7/2/19">

	<title>Signals, Vol. 7, Pages 19: Performance Evaluation of Displacement Estimation Methods for Early-Stage Breast Cancer Tumor Detection Using Strain Elastography</title>
	<link>https://www.mdpi.com/2624-6120/7/2/19</link>
	<description>Displacement estimation methods in strain elastography use ultrasound signals to estimate displacements and strain in soft tissue. Although several methods exist, systematic comparisons under controlled simulation conditions for lesions smaller than 5 mm are limited. Evaluating axial accuracy for superficial and intermediate depths in small breast cancer lesions is clinically important, as early-stage detection with existing techniques remains challenging. In this study, speckle tracking, the Doppler method, and the combined autocorrelation method (CAM) were used to estimate axial displacements from simulated elastography signals. The performance of these methods was assessed using the root mean square error (RMSE) for displacement field, strain, and tumor size estimation in a three-layer model comprising healthy tissue&amp;amp;ndash;tumor&amp;amp;ndash;healthy tissue. An extended analysis considering anatomically realistic tissue and motion artifacts conditions for the case of smallest lesion is presented. Finally, the CAM method, which obtained the best results, was assessed varying SNR/strain values. Simulation results show that CAM outperforms the other methods in displacement estimation across early-stage tumor sizes at both superficial and intermediate depths in all performed tests.</description>
	<pubDate>2026-03-02</pubDate>

	<content:encoded><![CDATA[
	<p><b>Signals, Vol. 7, Pages 19: Performance Evaluation of Displacement Estimation Methods for Early-Stage Breast Cancer Tumor Detection Using Strain Elastography</b></p>
	<p>Signals <a href="https://www.mdpi.com/2624-6120/7/2/19">doi: 10.3390/signals7020019</a></p>
	<p>Authors:
		Alexey García Padilla
		Ivonne Bazán Trujillo
		Carlos A. Negreira Casares
		</p>
	<p>Displacement estimation methods in strain elastography use ultrasound signals to estimate displacements and strain in soft tissue. Although several methods exist, systematic comparisons under controlled simulation conditions for lesions smaller than 5 mm are limited. Evaluating axial accuracy for superficial and intermediate depths in small breast cancer lesions is clinically important, as early-stage detection with existing techniques remains challenging. In this study, speckle tracking, the Doppler method, and the combined autocorrelation method (CAM) were used to estimate axial displacements from simulated elastography signals. The performance of these methods was assessed using the root mean square error (RMSE) for displacement field, strain, and tumor size estimation in a three-layer model comprising healthy tissue&amp;amp;ndash;tumor&amp;amp;ndash;healthy tissue. An extended analysis considering anatomically realistic tissue and motion artifacts conditions for the case of smallest lesion is presented. Finally, the CAM method, which obtained the best results, was assessed varying SNR/strain values. Simulation results show that CAM outperforms the other methods in displacement estimation across early-stage tumor sizes at both superficial and intermediate depths in all performed tests.</p>
	]]></content:encoded>

	<dc:title>Performance Evaluation of Displacement Estimation Methods for Early-Stage Breast Cancer Tumor Detection Using Strain Elastography</dc:title>
			<dc:creator>Alexey García Padilla</dc:creator>
			<dc:creator>Ivonne Bazán Trujillo</dc:creator>
			<dc:creator>Carlos A. Negreira Casares</dc:creator>
		<dc:identifier>doi: 10.3390/signals7020019</dc:identifier>
	<dc:source>Signals</dc:source>
	<dc:date>2026-03-02</dc:date>

	<prism:publicationName>Signals</prism:publicationName>
	<prism:publicationDate>2026-03-02</prism:publicationDate>
	<prism:volume>7</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>19</prism:startingPage>
		<prism:doi>10.3390/signals7020019</prism:doi>
	<prism:url>https://www.mdpi.com/2624-6120/7/2/19</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-6120/7/1/18">

	<title>Signals, Vol. 7, Pages 18: ML-CDAE: Multi-Lead Convolutional Denoising Autoencoder for Denoising 12-Lead ECG Signals</title>
	<link>https://www.mdpi.com/2624-6120/7/1/18</link>
	<description>Background: Electrocardiography (ECG), particularly the 12-lead configuration, is a crucial method for identifying heart rhythm abnormalities. However, its effectiveness can be reduced by noise contamination. State-of-the-art denoising methods based on neural networks have demonstrated promising performance in denoising complex biosignals like ECG. However, most of these methods have focused on denoising single-lead ECG recordings. Methods: This research aims to leverage the inherent correlation among multi-lead ECG signals. Therefore, a multi-lead convolutional denoising autoencoder (ML-CDAE) model is proposed, to learn more effective representations, leading simultaneously to improved denoising performance and enhanced quality of 12-lead ECG recordings. Results: The findings indicate that ML-CDAE consistently outperforms a single-lead convolutional denoising autoencoder (SL-CDAE) and fully convolutional denoising autoencoder (FCN-DAE) model in denoising ECG signals corrupted by a mixture of physical noises. In particular, the mean squared error (MSE) and signal-to-noise ratio improvement (SNRimp) are used as evaluation metrics to assess the performance. Conclusions: The strong correlation among multi-lead ECG signals can be leveraged not only to enhance the denoising performance of the ML-CDAE model but also to simultaneously denoise 12-lead ECG signals more successfully compared to both the SL-CDAE and FCN-DAE models.</description>
	<pubDate>2026-02-19</pubDate>

	<content:encoded><![CDATA[
	<p><b>Signals, Vol. 7, Pages 18: ML-CDAE: Multi-Lead Convolutional Denoising Autoencoder for Denoising 12-Lead ECG Signals</b></p>
	<p>Signals <a href="https://www.mdpi.com/2624-6120/7/1/18">doi: 10.3390/signals7010018</a></p>
	<p>Authors:
		Malaz Alfa
		Fars Samann
		Thomas Schanze
		</p>
	<p>Background: Electrocardiography (ECG), particularly the 12-lead configuration, is a crucial method for identifying heart rhythm abnormalities. However, its effectiveness can be reduced by noise contamination. State-of-the-art denoising methods based on neural networks have demonstrated promising performance in denoising complex biosignals like ECG. However, most of these methods have focused on denoising single-lead ECG recordings. Methods: This research aims to leverage the inherent correlation among multi-lead ECG signals. Therefore, a multi-lead convolutional denoising autoencoder (ML-CDAE) model is proposed, to learn more effective representations, leading simultaneously to improved denoising performance and enhanced quality of 12-lead ECG recordings. Results: The findings indicate that ML-CDAE consistently outperforms a single-lead convolutional denoising autoencoder (SL-CDAE) and fully convolutional denoising autoencoder (FCN-DAE) model in denoising ECG signals corrupted by a mixture of physical noises. In particular, the mean squared error (MSE) and signal-to-noise ratio improvement (SNRimp) are used as evaluation metrics to assess the performance. Conclusions: The strong correlation among multi-lead ECG signals can be leveraged not only to enhance the denoising performance of the ML-CDAE model but also to simultaneously denoise 12-lead ECG signals more successfully compared to both the SL-CDAE and FCN-DAE models.</p>
	]]></content:encoded>

	<dc:title>ML-CDAE: Multi-Lead Convolutional Denoising Autoencoder for Denoising 12-Lead ECG Signals</dc:title>
			<dc:creator>Malaz Alfa</dc:creator>
			<dc:creator>Fars Samann</dc:creator>
			<dc:creator>Thomas Schanze</dc:creator>
		<dc:identifier>doi: 10.3390/signals7010018</dc:identifier>
	<dc:source>Signals</dc:source>
	<dc:date>2026-02-19</dc:date>

	<prism:publicationName>Signals</prism:publicationName>
	<prism:publicationDate>2026-02-19</prism:publicationDate>
	<prism:volume>7</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>18</prism:startingPage>
		<prism:doi>10.3390/signals7010018</prism:doi>
	<prism:url>https://www.mdpi.com/2624-6120/7/1/18</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-6120/7/1/17">

	<title>Signals, Vol. 7, Pages 17: From Neurons to Networks: A Holistic Review of Electroencephalography (EEG) from Neurophysiological Foundations to AI Techniques</title>
	<link>https://www.mdpi.com/2624-6120/7/1/17</link>
	<description>Electroencephalography (EEG) has transitioned from a subjective observational method into a data-intensive analytical field that utilises sophisticated algorithms and mathematical models. This review provides a holistic foundation by detailing the neurophysiological basis, recording techniques, and applications of EEG before providing a rigorous examination of traditional and modern analytical pillars. Statistical and Time-Series Analysis, Spectral and Time-Frequency Analysis, Spatial Analysis and Source Modelling, Connectivity and Network Analysis, and Nonlinear and Chaotic Analysis are explored. Afterwards, while acknowledging the historical role of Machine Learning (ML) and Deep Learning (DL) architectures, such as Support Vector Machines (SVMs) and Convolutional Neural Networks (CNNs), this review shifts the primary focus toward current state-of-the-art Artificial Intelligence (AI) trends. We place emphasis on the emergence of Foundation Models, including Large Language Models (LLMs) and Large Vision Models (LVMs), adapted for high-dimensional neural sequences. Finally, we explore the integration of Generative AI for data augmentation and review Explainable AI (XAI) frameworks designed to bridge the gap between &amp;amp;ldquo;black-box&amp;amp;rdquo; decoding and clinical interpretability. We conclude that the next generation of EEG analysis will likely converge into Neuro-Symbolic architectures, synergising the massive generative power of foundation models with the rigorous, rule-based interpretability of classical signal theory.</description>
	<pubDate>2026-02-16</pubDate>

	<content:encoded><![CDATA[
	<p><b>Signals, Vol. 7, Pages 17: From Neurons to Networks: A Holistic Review of Electroencephalography (EEG) from Neurophysiological Foundations to AI Techniques</b></p>
	<p>Signals <a href="https://www.mdpi.com/2624-6120/7/1/17">doi: 10.3390/signals7010017</a></p>
	<p>Authors:
		Christos Kalogeropoulos
		Konstantinos Theofilatos
		Seferina Mavroudi
		</p>
	<p>Electroencephalography (EEG) has transitioned from a subjective observational method into a data-intensive analytical field that utilises sophisticated algorithms and mathematical models. This review provides a holistic foundation by detailing the neurophysiological basis, recording techniques, and applications of EEG before providing a rigorous examination of traditional and modern analytical pillars. Statistical and Time-Series Analysis, Spectral and Time-Frequency Analysis, Spatial Analysis and Source Modelling, Connectivity and Network Analysis, and Nonlinear and Chaotic Analysis are explored. Afterwards, while acknowledging the historical role of Machine Learning (ML) and Deep Learning (DL) architectures, such as Support Vector Machines (SVMs) and Convolutional Neural Networks (CNNs), this review shifts the primary focus toward current state-of-the-art Artificial Intelligence (AI) trends. We place emphasis on the emergence of Foundation Models, including Large Language Models (LLMs) and Large Vision Models (LVMs), adapted for high-dimensional neural sequences. Finally, we explore the integration of Generative AI for data augmentation and review Explainable AI (XAI) frameworks designed to bridge the gap between &amp;amp;ldquo;black-box&amp;amp;rdquo; decoding and clinical interpretability. We conclude that the next generation of EEG analysis will likely converge into Neuro-Symbolic architectures, synergising the massive generative power of foundation models with the rigorous, rule-based interpretability of classical signal theory.</p>
	]]></content:encoded>

	<dc:title>From Neurons to Networks: A Holistic Review of Electroencephalography (EEG) from Neurophysiological Foundations to AI Techniques</dc:title>
			<dc:creator>Christos Kalogeropoulos</dc:creator>
			<dc:creator>Konstantinos Theofilatos</dc:creator>
			<dc:creator>Seferina Mavroudi</dc:creator>
		<dc:identifier>doi: 10.3390/signals7010017</dc:identifier>
	<dc:source>Signals</dc:source>
	<dc:date>2026-02-16</dc:date>

	<prism:publicationName>Signals</prism:publicationName>
	<prism:publicationDate>2026-02-16</prism:publicationDate>
	<prism:volume>7</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Review</prism:section>
	<prism:startingPage>17</prism:startingPage>
		<prism:doi>10.3390/signals7010017</prism:doi>
	<prism:url>https://www.mdpi.com/2624-6120/7/1/17</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-6120/7/1/16">

	<title>Signals, Vol. 7, Pages 16: An Interpretable Residual Spatio-Temporal Graph Attention Network for Multiclass Emotion Recognition from EEG</title>
	<link>https://www.mdpi.com/2624-6120/7/1/16</link>
	<description>Automatic emotion recognition based on EEG has been a key research frontier in recent years, involving the direct extraction of emotional states from brain dynamics. However, existing deep learning approaches often treat EEG either as a sequence or as a static spatial map, thereby failing to jointly capture the temporal evolution and spatial dependencies underlying emotional responses. To address this limitation, we propose an Interpretable Residual Spatio-Temporal Graph Attention Network (IRSTGANet) that integrates temporal convolutional encoding with residual graph-attention blocks. The temporal module enhances short-term EEG dynamics, while the graph-attention layers learn adaptive node connectivity relationships and preserve contextual information through residual links. Evaluated on the DEAP and SEED datasets, the proposed model achieved exceptional performance on valence and arousal, as well as four-class and nine-class classification on the DEAP dataset and on the three-class SEED dataset, exceeding state-of-the-art methods. These results demonstrate that combining temporal enhancement with residual graph attention yields both improved recognition performance and interpretable insights into emotion-related neural connectivity.</description>
	<pubDate>2026-02-05</pubDate>

	<content:encoded><![CDATA[
	<p><b>Signals, Vol. 7, Pages 16: An Interpretable Residual Spatio-Temporal Graph Attention Network for Multiclass Emotion Recognition from EEG</b></p>
	<p>Signals <a href="https://www.mdpi.com/2624-6120/7/1/16">doi: 10.3390/signals7010016</a></p>
	<p>Authors:
		Manal Hilali
		Abdellah Ezzati
		Said Ben Alla
		Ahmed El Badaoui
		</p>
	<p>Automatic emotion recognition based on EEG has been a key research frontier in recent years, involving the direct extraction of emotional states from brain dynamics. However, existing deep learning approaches often treat EEG either as a sequence or as a static spatial map, thereby failing to jointly capture the temporal evolution and spatial dependencies underlying emotional responses. To address this limitation, we propose an Interpretable Residual Spatio-Temporal Graph Attention Network (IRSTGANet) that integrates temporal convolutional encoding with residual graph-attention blocks. The temporal module enhances short-term EEG dynamics, while the graph-attention layers learn adaptive node connectivity relationships and preserve contextual information through residual links. Evaluated on the DEAP and SEED datasets, the proposed model achieved exceptional performance on valence and arousal, as well as four-class and nine-class classification on the DEAP dataset and on the three-class SEED dataset, exceeding state-of-the-art methods. These results demonstrate that combining temporal enhancement with residual graph attention yields both improved recognition performance and interpretable insights into emotion-related neural connectivity.</p>
	]]></content:encoded>

	<dc:title>An Interpretable Residual Spatio-Temporal Graph Attention Network for Multiclass Emotion Recognition from EEG</dc:title>
			<dc:creator>Manal Hilali</dc:creator>
			<dc:creator>Abdellah Ezzati</dc:creator>
			<dc:creator>Said Ben Alla</dc:creator>
			<dc:creator>Ahmed El Badaoui</dc:creator>
		<dc:identifier>doi: 10.3390/signals7010016</dc:identifier>
	<dc:source>Signals</dc:source>
	<dc:date>2026-02-05</dc:date>

	<prism:publicationName>Signals</prism:publicationName>
	<prism:publicationDate>2026-02-05</prism:publicationDate>
	<prism:volume>7</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>16</prism:startingPage>
		<prism:doi>10.3390/signals7010016</prism:doi>
	<prism:url>https://www.mdpi.com/2624-6120/7/1/16</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-6120/7/1/15">

	<title>Signals, Vol. 7, Pages 15: Predicting 1-Year Mortality in Patients with Non-ST Elevation Myocardial Infarction (NSTEMI) Using Survival Models and Aortic Pressure Signals Recorded During Cardiac Catheterization</title>
	<link>https://www.mdpi.com/2624-6120/7/1/15</link>
	<description>Despite successful revascularization, patients with non-ST elevation myocardial infarction (NSTEMI) remain at higher risk of mortality and morbidity. Accurately predicting mortality risk in this cohort can improve outcomes through timely interventions. This study for the first time predicts 1-year all-cause mortality in an NSTEMI cohort using features extracted primarily from the aortic pressure (AP) signal recorded during cardiac catheterization. We analyzed data from 497 NSTEMI patients (66.3 &amp;amp;plusmn; 12.9 years, 187 (37.6%) females) retrospectively. We developed three survival models, the multivariate Cox proportional hazards, DeepSurv, and random survival forest, to predict mortality. Then, used Shapley additive explanations (SHAP) to interpret the decision-making process of the best survival model. Using 5-fold stratified cross-validation, DeepSurv achieved an average C-index of 0.935, an IBS of 0.028, and a mean time-dependent AUC of 0.939, outperforming the other models. Ejection systolic time, ejection systolic period, the difference between systolic blood pressure and dicrotic notch pressure (DesP), skewness, the age-modified shock index, and myocardial oxygen supply/demand ratio were identified by SHAP as the most characteristic AP features. In conclusion, AP signal features offer valuable prognostic insight for predicting 1-year all-cause mortality in the NSTEMI population, leading to enhanced risk stratification and clinical decision-making.</description>
	<pubDate>2026-02-02</pubDate>

	<content:encoded><![CDATA[
	<p><b>Signals, Vol. 7, Pages 15: Predicting 1-Year Mortality in Patients with Non-ST Elevation Myocardial Infarction (NSTEMI) Using Survival Models and Aortic Pressure Signals Recorded During Cardiac Catheterization</b></p>
	<p>Signals <a href="https://www.mdpi.com/2624-6120/7/1/15">doi: 10.3390/signals7010015</a></p>
	<p>Authors:
		Seyed Reza Razavi
		Ashish H. Shah
		Zahra Moussavi
		</p>
	<p>Despite successful revascularization, patients with non-ST elevation myocardial infarction (NSTEMI) remain at higher risk of mortality and morbidity. Accurately predicting mortality risk in this cohort can improve outcomes through timely interventions. This study for the first time predicts 1-year all-cause mortality in an NSTEMI cohort using features extracted primarily from the aortic pressure (AP) signal recorded during cardiac catheterization. We analyzed data from 497 NSTEMI patients (66.3 &amp;amp;plusmn; 12.9 years, 187 (37.6%) females) retrospectively. We developed three survival models, the multivariate Cox proportional hazards, DeepSurv, and random survival forest, to predict mortality. Then, used Shapley additive explanations (SHAP) to interpret the decision-making process of the best survival model. Using 5-fold stratified cross-validation, DeepSurv achieved an average C-index of 0.935, an IBS of 0.028, and a mean time-dependent AUC of 0.939, outperforming the other models. Ejection systolic time, ejection systolic period, the difference between systolic blood pressure and dicrotic notch pressure (DesP), skewness, the age-modified shock index, and myocardial oxygen supply/demand ratio were identified by SHAP as the most characteristic AP features. In conclusion, AP signal features offer valuable prognostic insight for predicting 1-year all-cause mortality in the NSTEMI population, leading to enhanced risk stratification and clinical decision-making.</p>
	]]></content:encoded>

	<dc:title>Predicting 1-Year Mortality in Patients with Non-ST Elevation Myocardial Infarction (NSTEMI) Using Survival Models and Aortic Pressure Signals Recorded During Cardiac Catheterization</dc:title>
			<dc:creator>Seyed Reza Razavi</dc:creator>
			<dc:creator>Ashish H. Shah</dc:creator>
			<dc:creator>Zahra Moussavi</dc:creator>
		<dc:identifier>doi: 10.3390/signals7010015</dc:identifier>
	<dc:source>Signals</dc:source>
	<dc:date>2026-02-02</dc:date>

	<prism:publicationName>Signals</prism:publicationName>
	<prism:publicationDate>2026-02-02</prism:publicationDate>
	<prism:volume>7</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>15</prism:startingPage>
		<prism:doi>10.3390/signals7010015</prism:doi>
	<prism:url>https://www.mdpi.com/2624-6120/7/1/15</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-6120/7/1/14">

	<title>Signals, Vol. 7, Pages 14: Comparative Study of Supervised Deep Learning Architectures for Background Subtraction and Motion Segmentation on CDnet2014</title>
	<link>https://www.mdpi.com/2624-6120/7/1/14</link>
	<description>Foreground segmentation and background subtraction are critical components in many computer vision applications, such as intelligent video surveillance, urban security systems, and obstacle detection for autonomous vehicles. Although extensively studied over the past decades, these tasks remain challenging, particularly due to rapid illumination changes, dynamic backgrounds, cast shadows, and camera movements. The emergence of supervised deep learning-based methods has significantly enhanced performance, surpassing traditional approaches on the benchmark dataset CDnet2014. In this context, this paper provides a comprehensive review of recent supervised deep learning techniques applied to background subtraction, along with an in-depth comparative analysis of state-of-the-art approaches available on the official CDnet2014 results platform. Specifically, we examine several key architecture families, including convolutional neural networks (CNN and FCN), encoder&amp;amp;ndash;decoder models such as FgSegNet and Motion U-Net, adversarial frameworks (GAN), Transformer-based architectures, and hybrid methods combining intermittent semantic segmentation with rapid detection algorithms such as RT-SBS-v2. Beyond summarizing existing works, this review contributes a structured cross-family comparison under a unified benchmark, a focused analysis of performance behavior across challenging CDnet2014 scenarios, and a critical discussion of the trade-offs between segmentation accuracy, robustness, and computational efficiency for practical deployment.</description>
	<pubDate>2026-02-02</pubDate>

	<content:encoded><![CDATA[
	<p><b>Signals, Vol. 7, Pages 14: Comparative Study of Supervised Deep Learning Architectures for Background Subtraction and Motion Segmentation on CDnet2014</b></p>
	<p>Signals <a href="https://www.mdpi.com/2624-6120/7/1/14">doi: 10.3390/signals7010014</a></p>
	<p>Authors:
		Oussama Boufares
		Wajdi Saadaoui
		Mohamed Boussif
		</p>
	<p>Foreground segmentation and background subtraction are critical components in many computer vision applications, such as intelligent video surveillance, urban security systems, and obstacle detection for autonomous vehicles. Although extensively studied over the past decades, these tasks remain challenging, particularly due to rapid illumination changes, dynamic backgrounds, cast shadows, and camera movements. The emergence of supervised deep learning-based methods has significantly enhanced performance, surpassing traditional approaches on the benchmark dataset CDnet2014. In this context, this paper provides a comprehensive review of recent supervised deep learning techniques applied to background subtraction, along with an in-depth comparative analysis of state-of-the-art approaches available on the official CDnet2014 results platform. Specifically, we examine several key architecture families, including convolutional neural networks (CNN and FCN), encoder&amp;amp;ndash;decoder models such as FgSegNet and Motion U-Net, adversarial frameworks (GAN), Transformer-based architectures, and hybrid methods combining intermittent semantic segmentation with rapid detection algorithms such as RT-SBS-v2. Beyond summarizing existing works, this review contributes a structured cross-family comparison under a unified benchmark, a focused analysis of performance behavior across challenging CDnet2014 scenarios, and a critical discussion of the trade-offs between segmentation accuracy, robustness, and computational efficiency for practical deployment.</p>
	]]></content:encoded>

	<dc:title>Comparative Study of Supervised Deep Learning Architectures for Background Subtraction and Motion Segmentation on CDnet2014</dc:title>
			<dc:creator>Oussama Boufares</dc:creator>
			<dc:creator>Wajdi Saadaoui</dc:creator>
			<dc:creator>Mohamed Boussif</dc:creator>
		<dc:identifier>doi: 10.3390/signals7010014</dc:identifier>
	<dc:source>Signals</dc:source>
	<dc:date>2026-02-02</dc:date>

	<prism:publicationName>Signals</prism:publicationName>
	<prism:publicationDate>2026-02-02</prism:publicationDate>
	<prism:volume>7</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Review</prism:section>
	<prism:startingPage>14</prism:startingPage>
		<prism:doi>10.3390/signals7010014</prism:doi>
	<prism:url>https://www.mdpi.com/2624-6120/7/1/14</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-6120/7/1/13">

	<title>Signals, Vol. 7, Pages 13: Adaptive ORB Accelerator on FPGA: High Throughput, Power Consumption, and More Efficient Vision for UAVs</title>
	<link>https://www.mdpi.com/2624-6120/7/1/13</link>
	<description>Feature extraction and description are fundamental components of visual perception systems used in applications such as visual odometry, Simultaneous Localization and Mapping (SLAM), and autonomous navigation. In resource-constrained platforms, such as Unmanned Aerial Vehicles (UAVs), achieving real-time hardware acceleration on Field-Programmable Gate Arrays (FPGAs) is challenging. This work demonstrates an FPGA-based implementation of an adaptive ORB (Oriented FAST and Rotated BRIEF) feature extraction pipeline designed for high-throughput and energy-efficient embedded vision. The proposed architecture is a completely new design for the main algorithmic blocks of ORB, including the FAST (Features from Accelerated Segment Test) feature detector, Gaussian image filtering, moment computation, and descriptor generation. Adaptive mechanisms are introduced to dynamically adjust thresholds and filtering behavior, improving robustness under varying illumination conditions. The design is developed using a High-Level Synthesis (HLS) approach, where all processing modules are implemented as reusable hardware IP cores and integrated at the system level. The architecture is deployed and evaluated on two FPGA platforms, PYNQ-Z2 and KRIA KR260, and its performance is compared against CPU and GPU implementations using a dedicated C++ testbench based on OpenCV. Experimental results demonstrate significant improvements in throughput and energy efficiency while maintaining stable and scalable performance, making the proposed solution suitable for real-time embedded vision applications on UAVs and similar platforms. Notably, the FPGA implementation increases DSP utilization from 11% to 29% compared to the previous designs implemented by other researchers, effectively offloading computational tasks from general purpose logic (LUTs and FFs), reducing LUT usage by 6% and FF usage by 13%, while maintaining overall design stability, scalability, and acceptable thermal margins at 2.387 W. This work establishes a robust foundation for integrating the optimized ORB pipeline into larger drone systems and opens the door for future system-level enhancements.</description>
	<pubDate>2026-02-02</pubDate>

	<content:encoded><![CDATA[
	<p><b>Signals, Vol. 7, Pages 13: Adaptive ORB Accelerator on FPGA: High Throughput, Power Consumption, and More Efficient Vision for UAVs</b></p>
	<p>Signals <a href="https://www.mdpi.com/2624-6120/7/1/13">doi: 10.3390/signals7010013</a></p>
	<p>Authors:
		Hussam Rostum
		József Vásárhelyi
		</p>
	<p>Feature extraction and description are fundamental components of visual perception systems used in applications such as visual odometry, Simultaneous Localization and Mapping (SLAM), and autonomous navigation. In resource-constrained platforms, such as Unmanned Aerial Vehicles (UAVs), achieving real-time hardware acceleration on Field-Programmable Gate Arrays (FPGAs) is challenging. This work demonstrates an FPGA-based implementation of an adaptive ORB (Oriented FAST and Rotated BRIEF) feature extraction pipeline designed for high-throughput and energy-efficient embedded vision. The proposed architecture is a completely new design for the main algorithmic blocks of ORB, including the FAST (Features from Accelerated Segment Test) feature detector, Gaussian image filtering, moment computation, and descriptor generation. Adaptive mechanisms are introduced to dynamically adjust thresholds and filtering behavior, improving robustness under varying illumination conditions. The design is developed using a High-Level Synthesis (HLS) approach, where all processing modules are implemented as reusable hardware IP cores and integrated at the system level. The architecture is deployed and evaluated on two FPGA platforms, PYNQ-Z2 and KRIA KR260, and its performance is compared against CPU and GPU implementations using a dedicated C++ testbench based on OpenCV. Experimental results demonstrate significant improvements in throughput and energy efficiency while maintaining stable and scalable performance, making the proposed solution suitable for real-time embedded vision applications on UAVs and similar platforms. Notably, the FPGA implementation increases DSP utilization from 11% to 29% compared to the previous designs implemented by other researchers, effectively offloading computational tasks from general purpose logic (LUTs and FFs), reducing LUT usage by 6% and FF usage by 13%, while maintaining overall design stability, scalability, and acceptable thermal margins at 2.387 W. This work establishes a robust foundation for integrating the optimized ORB pipeline into larger drone systems and opens the door for future system-level enhancements.</p>
	]]></content:encoded>

	<dc:title>Adaptive ORB Accelerator on FPGA: High Throughput, Power Consumption, and More Efficient Vision for UAVs</dc:title>
			<dc:creator>Hussam Rostum</dc:creator>
			<dc:creator>József Vásárhelyi</dc:creator>
		<dc:identifier>doi: 10.3390/signals7010013</dc:identifier>
	<dc:source>Signals</dc:source>
	<dc:date>2026-02-02</dc:date>

	<prism:publicationName>Signals</prism:publicationName>
	<prism:publicationDate>2026-02-02</prism:publicationDate>
	<prism:volume>7</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>13</prism:startingPage>
		<prism:doi>10.3390/signals7010013</prism:doi>
	<prism:url>https://www.mdpi.com/2624-6120/7/1/13</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-6120/7/1/12">

	<title>Signals, Vol. 7, Pages 12: Multitrack Music Transcription Based on Joint Learning of Onset and Frame Streams</title>
	<link>https://www.mdpi.com/2624-6120/7/1/12</link>
	<description>Multitrack music transcription is the task of converting music recordings into symbolic music representations that are assigned to individual instruments. This task requires simultaneous transcription of note onset and offset events for individual instruments. In addition, the limited resources of many transcription datasets make multitrack music transcription challenging. Thus, even state-of-the-art transcription systems are inadequate for applications requiring high accuracy. In this paper, we propose a framework to jointly transcribe onsets and frames for multiple instruments by integrating a deep learning architecture based on U-Net with an architecture based on Perceiver, which is a variant of the Transformer architecture. The proposed framework effectively detects the pitches of different instruments by employing the multi-layer combined frequency and periodicity (ML-CFP) with multilayered frequency-domain and quefrency-domain features as the input data representation. Our experiments demonstrate that the proposed multitrack music transcription system outperforms existing systems on five transcription datasets, including low-resource datasets. Furthermore, we evaluate the proposed system in terms of instrument type and show that the system provides high-quality transcription results for the predominant instruments.</description>
	<pubDate>2026-02-02</pubDate>

	<content:encoded><![CDATA[
	<p><b>Signals, Vol. 7, Pages 12: Multitrack Music Transcription Based on Joint Learning of Onset and Frame Streams</b></p>
	<p>Signals <a href="https://www.mdpi.com/2624-6120/7/1/12">doi: 10.3390/signals7010012</a></p>
	<p>Authors:
		Tomoki Matsunaga
		Hiroaki Saito
		</p>
	<p>Multitrack music transcription is the task of converting music recordings into symbolic music representations that are assigned to individual instruments. This task requires simultaneous transcription of note onset and offset events for individual instruments. In addition, the limited resources of many transcription datasets make multitrack music transcription challenging. Thus, even state-of-the-art transcription systems are inadequate for applications requiring high accuracy. In this paper, we propose a framework to jointly transcribe onsets and frames for multiple instruments by integrating a deep learning architecture based on U-Net with an architecture based on Perceiver, which is a variant of the Transformer architecture. The proposed framework effectively detects the pitches of different instruments by employing the multi-layer combined frequency and periodicity (ML-CFP) with multilayered frequency-domain and quefrency-domain features as the input data representation. Our experiments demonstrate that the proposed multitrack music transcription system outperforms existing systems on five transcription datasets, including low-resource datasets. Furthermore, we evaluate the proposed system in terms of instrument type and show that the system provides high-quality transcription results for the predominant instruments.</p>
	]]></content:encoded>

	<dc:title>Multitrack Music Transcription Based on Joint Learning of Onset and Frame Streams</dc:title>
			<dc:creator>Tomoki Matsunaga</dc:creator>
			<dc:creator>Hiroaki Saito</dc:creator>
		<dc:identifier>doi: 10.3390/signals7010012</dc:identifier>
	<dc:source>Signals</dc:source>
	<dc:date>2026-02-02</dc:date>

	<prism:publicationName>Signals</prism:publicationName>
	<prism:publicationDate>2026-02-02</prism:publicationDate>
	<prism:volume>7</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>12</prism:startingPage>
		<prism:doi>10.3390/signals7010012</prism:doi>
	<prism:url>https://www.mdpi.com/2624-6120/7/1/12</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-6120/7/1/11">

	<title>Signals, Vol. 7, Pages 11: An Experimental Tabletop Platform for Bidirectional Molecular Communication Using Advection&amp;ndash;Diffusion Dynamics in Bio-Inspired Nanonetworks</title>
	<link>https://www.mdpi.com/2624-6120/7/1/11</link>
	<description>With rapid advances in nanotechnology and synthetic biology, biological nanonetworks are emerging for biomedical and environmental applications within the Internet of Bio-NanoThings. While they rely on molecular communication, experimental validation remains limited, especially for non-ideal effects such as molecular accumulation. In this work, we present a novel table-top experimental system that emulates the core functionalities of a biological nanonetwork and is straightforward to reproduce in standard laboratory environments, also making it suitable for educational demonstrations. To the best of our knowledge, this is the first experimental platform that incorporates two end nodes capable of acting interchangeably as transmitter and receiver, thereby enabling true bidirectional molecular communication. Information transfer is realized through controlled release, advection and diffusion of molecules, using molecular concentration coding analogous to concentration shift keying, while the receiver decodes messages by comparing measured concentrations against predefined thresholds. Based on the measurements reported herein, the drop-based algorithm substantially outperforms the threshold-based scheme. Specifically, it reduces first-message latency by more than 2.5&amp;amp;times; across the tested volumes and reduces latest-message latency by up to 71%, providing approximately 3.7&amp;amp;times; better message delivery. A key experimental outcome is the observation of channel saturation: beyond a certain operating period, residual molecules accumulate and effectively saturate the medium, inhibiting reliable further message exchange until sufficient clearance occurs. This saturation-induced &amp;amp;ldquo;channel memory&amp;amp;rdquo; emerges as a fundamental practical constraint on sustained communication and achievable data rates. Overall, the proposed platform provides a scalable, controllable, and experimentally accessible testbed for systematically studying signal degradation, saturation, clearance dynamics, and throughput limits, thereby bridging the gap between theoretical models and practical implementations in the Internet of Bio-NanoThings era.</description>
	<pubDate>2026-02-02</pubDate>

	<content:encoded><![CDATA[
	<p><b>Signals, Vol. 7, Pages 11: An Experimental Tabletop Platform for Bidirectional Molecular Communication Using Advection&amp;ndash;Diffusion Dynamics in Bio-Inspired Nanonetworks</b></p>
	<p>Signals <a href="https://www.mdpi.com/2624-6120/7/1/11">doi: 10.3390/signals7010011</a></p>
	<p>Authors:
		Nefeli Chatzisavvidou
		Stefanos Papasotiriou
		Ioanna Vrachni
		Konstantinos Kantelis
		Petros Nicopolitidis
		Georgios Papadimitriou
		</p>
	<p>With rapid advances in nanotechnology and synthetic biology, biological nanonetworks are emerging for biomedical and environmental applications within the Internet of Bio-NanoThings. While they rely on molecular communication, experimental validation remains limited, especially for non-ideal effects such as molecular accumulation. In this work, we present a novel table-top experimental system that emulates the core functionalities of a biological nanonetwork and is straightforward to reproduce in standard laboratory environments, also making it suitable for educational demonstrations. To the best of our knowledge, this is the first experimental platform that incorporates two end nodes capable of acting interchangeably as transmitter and receiver, thereby enabling true bidirectional molecular communication. Information transfer is realized through controlled release, advection and diffusion of molecules, using molecular concentration coding analogous to concentration shift keying, while the receiver decodes messages by comparing measured concentrations against predefined thresholds. Based on the measurements reported herein, the drop-based algorithm substantially outperforms the threshold-based scheme. Specifically, it reduces first-message latency by more than 2.5&amp;amp;times; across the tested volumes and reduces latest-message latency by up to 71%, providing approximately 3.7&amp;amp;times; better message delivery. A key experimental outcome is the observation of channel saturation: beyond a certain operating period, residual molecules accumulate and effectively saturate the medium, inhibiting reliable further message exchange until sufficient clearance occurs. This saturation-induced &amp;amp;ldquo;channel memory&amp;amp;rdquo; emerges as a fundamental practical constraint on sustained communication and achievable data rates. Overall, the proposed platform provides a scalable, controllable, and experimentally accessible testbed for systematically studying signal degradation, saturation, clearance dynamics, and throughput limits, thereby bridging the gap between theoretical models and practical implementations in the Internet of Bio-NanoThings era.</p>
	]]></content:encoded>

	<dc:title>An Experimental Tabletop Platform for Bidirectional Molecular Communication Using Advection&amp;amp;ndash;Diffusion Dynamics in Bio-Inspired Nanonetworks</dc:title>
			<dc:creator>Nefeli Chatzisavvidou</dc:creator>
			<dc:creator>Stefanos Papasotiriou</dc:creator>
			<dc:creator>Ioanna Vrachni</dc:creator>
			<dc:creator>Konstantinos Kantelis</dc:creator>
			<dc:creator>Petros Nicopolitidis</dc:creator>
			<dc:creator>Georgios Papadimitriou</dc:creator>
		<dc:identifier>doi: 10.3390/signals7010011</dc:identifier>
	<dc:source>Signals</dc:source>
	<dc:date>2026-02-02</dc:date>

	<prism:publicationName>Signals</prism:publicationName>
	<prism:publicationDate>2026-02-02</prism:publicationDate>
	<prism:volume>7</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>11</prism:startingPage>
		<prism:doi>10.3390/signals7010011</prism:doi>
	<prism:url>https://www.mdpi.com/2624-6120/7/1/11</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-6120/7/1/10">

	<title>Signals, Vol. 7, Pages 10: A Bispectral Slice Negentropy Analysis Method for the Detection and Diagnosis of Rolling Bearing Faults</title>
	<link>https://www.mdpi.com/2624-6120/7/1/10</link>
	<description>Bearing fault diagnosis is critical in rotating machinery, and collecting and analyzing vibration signals from faulty bearings is a widely employed method in fault diagnosis. To efficiently extract the information of periodic pulse from complex signals and accurately identify fault characteristic frequencies, this paper proposes a BSNA (Bispectral Slice Negentropy Analysis) method. This method leverages the nonlinear characteristics of bispectral analysis and the sensitivity of negentropy measures to transform one-dimensional signals into two-dimensional spectra. By utilizing the demodulation capability of the time-frequency modulation bispectrum, it highlights the relationship between resonance bands and modulation frequency, while maximizing the preservation of critical fault information and minimizing the impact of interference signals. The fault information contained in the slices is subsequently quantified using the CSNE (correlation spectral negentropy), which effectively captures the magnitude of periodic pulse energy. By calculating the CSNE of each modulation frequency slice and visualizing it, the energy distribution of periodic pulses within each slice can be effectively observed. The feasibility of this method in rolling bearing fault diagnosis has been validated through simulation analysis and experimental comparison. This approach enables the accurate identification of fault characteristic frequency and its harmonics, thereby significantly enhancing the accuracy and robustness of fault diagnosis, particularly in complex and noisy background environments.</description>
	<pubDate>2026-02-02</pubDate>

	<content:encoded><![CDATA[
	<p><b>Signals, Vol. 7, Pages 10: A Bispectral Slice Negentropy Analysis Method for the Detection and Diagnosis of Rolling Bearing Faults</b></p>
	<p>Signals <a href="https://www.mdpi.com/2624-6120/7/1/10">doi: 10.3390/signals7010010</a></p>
	<p>Authors:
		Yifan Liu
		Yonggang Xu
		Yanping Zhu
		Xue Zou
		Huaming Zhang
		</p>
	<p>Bearing fault diagnosis is critical in rotating machinery, and collecting and analyzing vibration signals from faulty bearings is a widely employed method in fault diagnosis. To efficiently extract the information of periodic pulse from complex signals and accurately identify fault characteristic frequencies, this paper proposes a BSNA (Bispectral Slice Negentropy Analysis) method. This method leverages the nonlinear characteristics of bispectral analysis and the sensitivity of negentropy measures to transform one-dimensional signals into two-dimensional spectra. By utilizing the demodulation capability of the time-frequency modulation bispectrum, it highlights the relationship between resonance bands and modulation frequency, while maximizing the preservation of critical fault information and minimizing the impact of interference signals. The fault information contained in the slices is subsequently quantified using the CSNE (correlation spectral negentropy), which effectively captures the magnitude of periodic pulse energy. By calculating the CSNE of each modulation frequency slice and visualizing it, the energy distribution of periodic pulses within each slice can be effectively observed. The feasibility of this method in rolling bearing fault diagnosis has been validated through simulation analysis and experimental comparison. This approach enables the accurate identification of fault characteristic frequency and its harmonics, thereby significantly enhancing the accuracy and robustness of fault diagnosis, particularly in complex and noisy background environments.</p>
	]]></content:encoded>

	<dc:title>A Bispectral Slice Negentropy Analysis Method for the Detection and Diagnosis of Rolling Bearing Faults</dc:title>
			<dc:creator>Yifan Liu</dc:creator>
			<dc:creator>Yonggang Xu</dc:creator>
			<dc:creator>Yanping Zhu</dc:creator>
			<dc:creator>Xue Zou</dc:creator>
			<dc:creator>Huaming Zhang</dc:creator>
		<dc:identifier>doi: 10.3390/signals7010010</dc:identifier>
	<dc:source>Signals</dc:source>
	<dc:date>2026-02-02</dc:date>

	<prism:publicationName>Signals</prism:publicationName>
	<prism:publicationDate>2026-02-02</prism:publicationDate>
	<prism:volume>7</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>10</prism:startingPage>
		<prism:doi>10.3390/signals7010010</prism:doi>
	<prism:url>https://www.mdpi.com/2624-6120/7/1/10</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-6120/7/1/9">

	<title>Signals, Vol. 7, Pages 9: Event-Triggered Robust Fusion Estimation for Multi-Sensor Systems Under Random Packet Drops</title>
	<link>https://www.mdpi.com/2624-6120/7/1/9</link>
	<description>This paper focuses on the design of robust fusion estimators for multi-sensor systems experiencing constrained communications, model uncertainties, and random packet dropouts. To mitigate the impact of modeling errors, a sensitivity-penalized robust state estimator is employed at each local estimator. At the local fusion estimators, a centralized robust fusion estimation algorithm is derived by improving the cost function of the sensitivity-penalized estimator. The implementation of an event-triggered strategy effectively alleviates the burden on the communication channels linking the sensors and the fusion center. Moreover, the fusion estimator is capable of handling packet drops caused by unreliable communication channels, and the pseudo cross-covariance matrix is accordingly formulated. Sufficient conditions are derived to ensure the uniform boundedness of the estimation error for the proposed robust fusion estimator. Finally, simulation experiments using a tractor-car system validate the performance and advantages of the presented algorithm.</description>
	<pubDate>2026-01-21</pubDate>

	<content:encoded><![CDATA[
	<p><b>Signals, Vol. 7, Pages 9: Event-Triggered Robust Fusion Estimation for Multi-Sensor Systems Under Random Packet Drops</b></p>
	<p>Signals <a href="https://www.mdpi.com/2624-6120/7/1/9">doi: 10.3390/signals7010009</a></p>
	<p>Authors:
		Shaoxun Lu
		Huabo Liu
		</p>
	<p>This paper focuses on the design of robust fusion estimators for multi-sensor systems experiencing constrained communications, model uncertainties, and random packet dropouts. To mitigate the impact of modeling errors, a sensitivity-penalized robust state estimator is employed at each local estimator. At the local fusion estimators, a centralized robust fusion estimation algorithm is derived by improving the cost function of the sensitivity-penalized estimator. The implementation of an event-triggered strategy effectively alleviates the burden on the communication channels linking the sensors and the fusion center. Moreover, the fusion estimator is capable of handling packet drops caused by unreliable communication channels, and the pseudo cross-covariance matrix is accordingly formulated. Sufficient conditions are derived to ensure the uniform boundedness of the estimation error for the proposed robust fusion estimator. Finally, simulation experiments using a tractor-car system validate the performance and advantages of the presented algorithm.</p>
	]]></content:encoded>

	<dc:title>Event-Triggered Robust Fusion Estimation for Multi-Sensor Systems Under Random Packet Drops</dc:title>
			<dc:creator>Shaoxun Lu</dc:creator>
			<dc:creator>Huabo Liu</dc:creator>
		<dc:identifier>doi: 10.3390/signals7010009</dc:identifier>
	<dc:source>Signals</dc:source>
	<dc:date>2026-01-21</dc:date>

	<prism:publicationName>Signals</prism:publicationName>
	<prism:publicationDate>2026-01-21</prism:publicationDate>
	<prism:volume>7</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>9</prism:startingPage>
		<prism:doi>10.3390/signals7010009</prism:doi>
	<prism:url>https://www.mdpi.com/2624-6120/7/1/9</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-6120/7/1/8">

	<title>Signals, Vol. 7, Pages 8: Firebug Swarm Optimization Algorithm: An Overview and Applications</title>
	<link>https://www.mdpi.com/2624-6120/7/1/8</link>
	<description>This survey delves into the Firebug Swarm Optimization (FSO) algorithm, an advanced global optimization algorithm that plays a pivotal role in modern swarm intelligence optimization techniques. It explores the core principles of the FSO algorithm and examines the various hybrid variants developed to address complex optimization challenges. This survey also traces the evolution of swarm optimization methods, shedding light onto the natural phenomena and biological processes that have inspired these algorithms. Furthermore, it highlights the diverse real-world applications of the FSO algorithm, showcasing its effectiveness in fields such as engineering, data science, and artificial intelligence. To provide a comprehensive comparison, the survey includes a case study that evaluates the FSO algorithm&amp;amp;rsquo;s performance against other existing algorithms. Lastly, the survey identifies key open research questions and suggests potential future directions for advancing the FSO algorithm and other nature-inspired optimization techniques, aiming to overcome current limitations and unlock new possibilities.</description>
	<pubDate>2026-01-13</pubDate>

	<content:encoded><![CDATA[
	<p><b>Signals, Vol. 7, Pages 8: Firebug Swarm Optimization Algorithm: An Overview and Applications</b></p>
	<p>Signals <a href="https://www.mdpi.com/2624-6120/7/1/8">doi: 10.3390/signals7010008</a></p>
	<p>Authors:
		Faroq Awin
		Yasser Alginahi
		Esam Abdel-Raheem
		</p>
	<p>This survey delves into the Firebug Swarm Optimization (FSO) algorithm, an advanced global optimization algorithm that plays a pivotal role in modern swarm intelligence optimization techniques. It explores the core principles of the FSO algorithm and examines the various hybrid variants developed to address complex optimization challenges. This survey also traces the evolution of swarm optimization methods, shedding light onto the natural phenomena and biological processes that have inspired these algorithms. Furthermore, it highlights the diverse real-world applications of the FSO algorithm, showcasing its effectiveness in fields such as engineering, data science, and artificial intelligence. To provide a comprehensive comparison, the survey includes a case study that evaluates the FSO algorithm&amp;amp;rsquo;s performance against other existing algorithms. Lastly, the survey identifies key open research questions and suggests potential future directions for advancing the FSO algorithm and other nature-inspired optimization techniques, aiming to overcome current limitations and unlock new possibilities.</p>
	]]></content:encoded>

	<dc:title>Firebug Swarm Optimization Algorithm: An Overview and Applications</dc:title>
			<dc:creator>Faroq Awin</dc:creator>
			<dc:creator>Yasser Alginahi</dc:creator>
			<dc:creator>Esam Abdel-Raheem</dc:creator>
		<dc:identifier>doi: 10.3390/signals7010008</dc:identifier>
	<dc:source>Signals</dc:source>
	<dc:date>2026-01-13</dc:date>

	<prism:publicationName>Signals</prism:publicationName>
	<prism:publicationDate>2026-01-13</prism:publicationDate>
	<prism:volume>7</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Review</prism:section>
	<prism:startingPage>8</prism:startingPage>
		<prism:doi>10.3390/signals7010008</prism:doi>
	<prism:url>https://www.mdpi.com/2624-6120/7/1/8</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-6120/7/1/7">

	<title>Signals, Vol. 7, Pages 7: Quantifying the Relationship Between Speech Quality Metrics and Biometric Speaker Recognition Performance Under Acoustic Degradation</title>
	<link>https://www.mdpi.com/2624-6120/7/1/7</link>
	<description>Self-supervised learning (SSL) models have achieved remarkable success in speaker verification tasks, yet their robustness to real-world audio degradation remains insufficiently characterized. This study presents a comprehensive analysis of how audio quality degradation affects three prominent SSL-based speaker verification systems (WavLM, Wav2Vec2, and HuBERT) across three diverse datasets: TIMIT, CHiME-6, and Common Voice. We systematically applied 21 degradation conditions spanning noise contamination (SNR levels from 0 to 20 dB), reverberation (RT60 from 0.3 to 1.0 s), and codec compression (various bit rates), then measured both objective audio quality metrics (PESQ, STOI, SNR, SegSNR, fwSNRseg, jitter, shimmer, HNR) and speaker verification performance metrics (EER, AUC-ROC, d-prime, minDCF). At the condition level, multiple regression with all eight quality metrics explained up to 80% of the variance in minDCF for HuBERT and 78% for WavLM, but only 35% for Wav2Vec2; EER predictability was lower (69%, 67%, and 28%, respectively). PESQ was the strongest single predictor for WavLM and HuBERT, while Shimmer showed the highest single-metric correlation for Wav2Vec2; fwSNRseg yielded the top single-metric R2 for WavLM, and PESQ for HuBERT and Wav2Vec2 (with much smaller gains for Wav2Vec2). WavLM and HuBERT exhibited more predictable quality-performance relationships compared to Wav2Vec2. These findings establish quantitative relationships between measurable audio quality and speaker verification accuracy at the condition level, though substantial within-condition variability limits utterance-level prediction accuracy.</description>
	<pubDate>2026-01-12</pubDate>

	<content:encoded><![CDATA[
	<p><b>Signals, Vol. 7, Pages 7: Quantifying the Relationship Between Speech Quality Metrics and Biometric Speaker Recognition Performance Under Acoustic Degradation</b></p>
	<p>Signals <a href="https://www.mdpi.com/2624-6120/7/1/7">doi: 10.3390/signals7010007</a></p>
	<p>Authors:
		Ajan Ahmed
		Masudul H. Imtiaz
		</p>
	<p>Self-supervised learning (SSL) models have achieved remarkable success in speaker verification tasks, yet their robustness to real-world audio degradation remains insufficiently characterized. This study presents a comprehensive analysis of how audio quality degradation affects three prominent SSL-based speaker verification systems (WavLM, Wav2Vec2, and HuBERT) across three diverse datasets: TIMIT, CHiME-6, and Common Voice. We systematically applied 21 degradation conditions spanning noise contamination (SNR levels from 0 to 20 dB), reverberation (RT60 from 0.3 to 1.0 s), and codec compression (various bit rates), then measured both objective audio quality metrics (PESQ, STOI, SNR, SegSNR, fwSNRseg, jitter, shimmer, HNR) and speaker verification performance metrics (EER, AUC-ROC, d-prime, minDCF). At the condition level, multiple regression with all eight quality metrics explained up to 80% of the variance in minDCF for HuBERT and 78% for WavLM, but only 35% for Wav2Vec2; EER predictability was lower (69%, 67%, and 28%, respectively). PESQ was the strongest single predictor for WavLM and HuBERT, while Shimmer showed the highest single-metric correlation for Wav2Vec2; fwSNRseg yielded the top single-metric R2 for WavLM, and PESQ for HuBERT and Wav2Vec2 (with much smaller gains for Wav2Vec2). WavLM and HuBERT exhibited more predictable quality-performance relationships compared to Wav2Vec2. These findings establish quantitative relationships between measurable audio quality and speaker verification accuracy at the condition level, though substantial within-condition variability limits utterance-level prediction accuracy.</p>
	]]></content:encoded>

	<dc:title>Quantifying the Relationship Between Speech Quality Metrics and Biometric Speaker Recognition Performance Under Acoustic Degradation</dc:title>
			<dc:creator>Ajan Ahmed</dc:creator>
			<dc:creator>Masudul H. Imtiaz</dc:creator>
		<dc:identifier>doi: 10.3390/signals7010007</dc:identifier>
	<dc:source>Signals</dc:source>
	<dc:date>2026-01-12</dc:date>

	<prism:publicationName>Signals</prism:publicationName>
	<prism:publicationDate>2026-01-12</prism:publicationDate>
	<prism:volume>7</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>7</prism:startingPage>
		<prism:doi>10.3390/signals7010007</prism:doi>
	<prism:url>https://www.mdpi.com/2624-6120/7/1/7</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-6120/7/1/6">

	<title>Signals, Vol. 7, Pages 6: Inverse Synthetic Aperture Radar Imaging of Space Objects Using Probing Signal with a Zero Autocorrelation Zone</title>
	<link>https://www.mdpi.com/2624-6120/7/1/6</link>
	<description>To obtain radar images of a group of small space objects or to resolve individual elements of complex space objects in near-Earth orbit, a radar system must have high spatial resolution. High range resolution is achieved by using complex probing signals with a wide spectrum bandwidth. Achieving high angular resolution for small or complex space objects is based on the inverse synthetic aperture antenna effect. Among the various classes of complex signals, only two have found practical application in Inverse Synthetic Aperture Radar (ISAR) systems so far: the Linear Frequency-Modulated signal (chirp) and the Stepped-Frequency signal. Over the coherent integration interval of the echo signals, which corresponds to the ISAR aperture synthesis time, the combined correlation characteristics of the signal ensemble are analyzed. A high level of integral correlation noise in the ensemble of probing signals degrades the quality of the radar image. Therefore, a probing signal with a Zero Autocorrelation Zone (ZACZ) is highly relevant for ISAR applications. In this work, through simulation, radar images of a complex space object were obtained using both chirp and ZACZ probing signals. A comparative analysis of the correlation characteristics of the echo signals and the resulting radar images of the complex space object was performed.</description>
	<pubDate>2026-01-12</pubDate>

	<content:encoded><![CDATA[
	<p><b>Signals, Vol. 7, Pages 6: Inverse Synthetic Aperture Radar Imaging of Space Objects Using Probing Signal with a Zero Autocorrelation Zone</b></p>
	<p>Signals <a href="https://www.mdpi.com/2624-6120/7/1/6">doi: 10.3390/signals7010006</a></p>
	<p>Authors:
		Roman N. Ipanov
		Aleksey A. Komarov
		</p>
	<p>To obtain radar images of a group of small space objects or to resolve individual elements of complex space objects in near-Earth orbit, a radar system must have high spatial resolution. High range resolution is achieved by using complex probing signals with a wide spectrum bandwidth. Achieving high angular resolution for small or complex space objects is based on the inverse synthetic aperture antenna effect. Among the various classes of complex signals, only two have found practical application in Inverse Synthetic Aperture Radar (ISAR) systems so far: the Linear Frequency-Modulated signal (chirp) and the Stepped-Frequency signal. Over the coherent integration interval of the echo signals, which corresponds to the ISAR aperture synthesis time, the combined correlation characteristics of the signal ensemble are analyzed. A high level of integral correlation noise in the ensemble of probing signals degrades the quality of the radar image. Therefore, a probing signal with a Zero Autocorrelation Zone (ZACZ) is highly relevant for ISAR applications. In this work, through simulation, radar images of a complex space object were obtained using both chirp and ZACZ probing signals. A comparative analysis of the correlation characteristics of the echo signals and the resulting radar images of the complex space object was performed.</p>
	]]></content:encoded>

	<dc:title>Inverse Synthetic Aperture Radar Imaging of Space Objects Using Probing Signal with a Zero Autocorrelation Zone</dc:title>
			<dc:creator>Roman N. Ipanov</dc:creator>
			<dc:creator>Aleksey A. Komarov</dc:creator>
		<dc:identifier>doi: 10.3390/signals7010006</dc:identifier>
	<dc:source>Signals</dc:source>
	<dc:date>2026-01-12</dc:date>

	<prism:publicationName>Signals</prism:publicationName>
	<prism:publicationDate>2026-01-12</prism:publicationDate>
	<prism:volume>7</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>6</prism:startingPage>
		<prism:doi>10.3390/signals7010006</prism:doi>
	<prism:url>https://www.mdpi.com/2624-6120/7/1/6</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-6120/7/1/5">

	<title>Signals, Vol. 7, Pages 5: EMG-Based Muscle Synergy Analysis: Leg Dominance Effects During One-Leg Stance on Stable and Unstable Surfaces</title>
	<link>https://www.mdpi.com/2624-6120/7/1/5</link>
	<description>Leg dominance has been linked to an increased risk of lower-limb injuries in sports. This study examined bilateral asymmetry in muscle synergy patterns during one-leg stance on stable and multiaxial unstable surfaces. Twenty-five active young adults (25.6 &amp;amp;plusmn; 3.9 years) performed unipedal stance tasks on their dominant and non-dominant legs while surface electromyography (EMG) was recorded from seven lower-limb muscles per leg. Muscle synergies were extracted using non-negative matrix factorization (NMF), and structural similarity was assessed via cosine similarity with the Hungarian matching algorithm. Four consistent synergies were identified under both surface conditions, accounting for 88% of the total variance. On the stable surface, significant asymmetry in muscle weightings was observed in the rectus femoris (p = 0.030) for Synergy 1 and in the rectus femoris (p = 0.042), tibialis anterior (p = 0.024), peroneus longus (p = 0.023), and soleus (p = 0.006) for Synergy 2. On the unstable surface, asymmetry was evident in the biceps femoris (p = 0.048) for Synergy 2 and the rectus femoris (p = 0.045) for Synergy 3. Overall, dominance-related asymmetry was more pronounced under stable conditions and became more subtle as postural demand increased, revealing bilateral asymmetry in neuromuscular coordination during unipedal stance.</description>
	<pubDate>2026-01-09</pubDate>

	<content:encoded><![CDATA[
	<p><b>Signals, Vol. 7, Pages 5: EMG-Based Muscle Synergy Analysis: Leg Dominance Effects During One-Leg Stance on Stable and Unstable Surfaces</b></p>
	<p>Signals <a href="https://www.mdpi.com/2624-6120/7/1/5">doi: 10.3390/signals7010005</a></p>
	<p>Authors:
		Arunee Promsri
		</p>
	<p>Leg dominance has been linked to an increased risk of lower-limb injuries in sports. This study examined bilateral asymmetry in muscle synergy patterns during one-leg stance on stable and multiaxial unstable surfaces. Twenty-five active young adults (25.6 &amp;amp;plusmn; 3.9 years) performed unipedal stance tasks on their dominant and non-dominant legs while surface electromyography (EMG) was recorded from seven lower-limb muscles per leg. Muscle synergies were extracted using non-negative matrix factorization (NMF), and structural similarity was assessed via cosine similarity with the Hungarian matching algorithm. Four consistent synergies were identified under both surface conditions, accounting for 88% of the total variance. On the stable surface, significant asymmetry in muscle weightings was observed in the rectus femoris (p = 0.030) for Synergy 1 and in the rectus femoris (p = 0.042), tibialis anterior (p = 0.024), peroneus longus (p = 0.023), and soleus (p = 0.006) for Synergy 2. On the unstable surface, asymmetry was evident in the biceps femoris (p = 0.048) for Synergy 2 and the rectus femoris (p = 0.045) for Synergy 3. Overall, dominance-related asymmetry was more pronounced under stable conditions and became more subtle as postural demand increased, revealing bilateral asymmetry in neuromuscular coordination during unipedal stance.</p>
	]]></content:encoded>

	<dc:title>EMG-Based Muscle Synergy Analysis: Leg Dominance Effects During One-Leg Stance on Stable and Unstable Surfaces</dc:title>
			<dc:creator>Arunee Promsri</dc:creator>
		<dc:identifier>doi: 10.3390/signals7010005</dc:identifier>
	<dc:source>Signals</dc:source>
	<dc:date>2026-01-09</dc:date>

	<prism:publicationName>Signals</prism:publicationName>
	<prism:publicationDate>2026-01-09</prism:publicationDate>
	<prism:volume>7</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>5</prism:startingPage>
		<prism:doi>10.3390/signals7010005</prism:doi>
	<prism:url>https://www.mdpi.com/2624-6120/7/1/5</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-6120/7/1/4">

	<title>Signals, Vol. 7, Pages 4: Multimodal Hybrid CNN-Transformer with Attention Mechanism for Sleep Stages and Disorders Classification Using Bio-Signal Images</title>
	<link>https://www.mdpi.com/2624-6120/7/1/4</link>
	<description>Background and Objective: The accurate detection of sleep stages and disorders in older adults is essential for the effective diagnosis and treatment of sleep disorders affecting millions worldwide. Although Polysomnography (PSG) remains the primary method for monitoring sleep in medical settings, it is costly and time-consuming. Recent automated models have not fully explored and effectively fused the sleep features that are essential to identify sleep stages and disorders. This study proposes a novel automated model for detecting sleep stages and disorders in older adults by analyzing PSG recordings. PSG data include multiple channels, and the use of our proposed advanced methods reveals the potential correlations and complementary features across EEG, EOG, and EMG signals. Methods: In this study, we employed three novel advanced architectures, (1) CNNs, (2) CNNs with Bi-LSTM, and (3) CNNs with a transformer encoder, for the automatic classification of sleep stages and disorders using multichannel PSG data. The CNN extracts local features from RGB spectrogram images of EEG, EOG, and EMG signals individually, followed by an appropriate column-wise feature fusion block. The Bi-LSTM and transformer encoder are then used to learn and capture intra-epoch feature transition rules and dependencies. A residual connection is also applied to preserve the characteristics of the original joint feature maps and prevent gradient vanishing. Results: The experimental results in the CAP sleep database demonstrated that our proposed CNN with transformer encoder method outperformed standalone CNN, CNN with Bi-LSTM, and other advanced state-of-the-art methods in sleep stages and disorders classification. It achieves an accuracy of 95.2%, Cohen&amp;amp;rsquo;s kappa of 93.6%, MF1 of 91.3%, and MGm of 95% for sleep staging, and an accuracy of 99.3%, Cohen&amp;amp;rsquo;s kappa of 99.1%, MF1 of 99.2%, and MGm of 99.6% for disorder detection. Our model also achieves superior performance to other state-of-the-art approaches in the classification of N1, a stage known for its classification difficulty. Conclusions: To the best of our knowledge, we are the first group going beyond the standard to investigate and innovate a model architecture which is accurate and robust for classifying sleep stages and disorders in the elderly for both patient and non-patient subjects. Given its high performance, our method has the potential to be integrated and deployed into clinical routine care settings.</description>
	<pubDate>2026-01-08</pubDate>

	<content:encoded><![CDATA[
	<p><b>Signals, Vol. 7, Pages 4: Multimodal Hybrid CNN-Transformer with Attention Mechanism for Sleep Stages and Disorders Classification Using Bio-Signal Images</b></p>
	<p>Signals <a href="https://www.mdpi.com/2624-6120/7/1/4">doi: 10.3390/signals7010004</a></p>
	<p>Authors:
		Innocent Tujyinama
		Bessam Abdulrazak
		Rachid Hedjam
		</p>
	<p>Background and Objective: The accurate detection of sleep stages and disorders in older adults is essential for the effective diagnosis and treatment of sleep disorders affecting millions worldwide. Although Polysomnography (PSG) remains the primary method for monitoring sleep in medical settings, it is costly and time-consuming. Recent automated models have not fully explored and effectively fused the sleep features that are essential to identify sleep stages and disorders. This study proposes a novel automated model for detecting sleep stages and disorders in older adults by analyzing PSG recordings. PSG data include multiple channels, and the use of our proposed advanced methods reveals the potential correlations and complementary features across EEG, EOG, and EMG signals. Methods: In this study, we employed three novel advanced architectures, (1) CNNs, (2) CNNs with Bi-LSTM, and (3) CNNs with a transformer encoder, for the automatic classification of sleep stages and disorders using multichannel PSG data. The CNN extracts local features from RGB spectrogram images of EEG, EOG, and EMG signals individually, followed by an appropriate column-wise feature fusion block. The Bi-LSTM and transformer encoder are then used to learn and capture intra-epoch feature transition rules and dependencies. A residual connection is also applied to preserve the characteristics of the original joint feature maps and prevent gradient vanishing. Results: The experimental results in the CAP sleep database demonstrated that our proposed CNN with transformer encoder method outperformed standalone CNN, CNN with Bi-LSTM, and other advanced state-of-the-art methods in sleep stages and disorders classification. It achieves an accuracy of 95.2%, Cohen&amp;amp;rsquo;s kappa of 93.6%, MF1 of 91.3%, and MGm of 95% for sleep staging, and an accuracy of 99.3%, Cohen&amp;amp;rsquo;s kappa of 99.1%, MF1 of 99.2%, and MGm of 99.6% for disorder detection. Our model also achieves superior performance to other state-of-the-art approaches in the classification of N1, a stage known for its classification difficulty. Conclusions: To the best of our knowledge, we are the first group going beyond the standard to investigate and innovate a model architecture which is accurate and robust for classifying sleep stages and disorders in the elderly for both patient and non-patient subjects. Given its high performance, our method has the potential to be integrated and deployed into clinical routine care settings.</p>
	]]></content:encoded>

	<dc:title>Multimodal Hybrid CNN-Transformer with Attention Mechanism for Sleep Stages and Disorders Classification Using Bio-Signal Images</dc:title>
			<dc:creator>Innocent Tujyinama</dc:creator>
			<dc:creator>Bessam Abdulrazak</dc:creator>
			<dc:creator>Rachid Hedjam</dc:creator>
		<dc:identifier>doi: 10.3390/signals7010004</dc:identifier>
	<dc:source>Signals</dc:source>
	<dc:date>2026-01-08</dc:date>

	<prism:publicationName>Signals</prism:publicationName>
	<prism:publicationDate>2026-01-08</prism:publicationDate>
	<prism:volume>7</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>4</prism:startingPage>
		<prism:doi>10.3390/signals7010004</prism:doi>
	<prism:url>https://www.mdpi.com/2624-6120/7/1/4</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-6120/7/1/3">

	<title>Signals, Vol. 7, Pages 3: A Novel Deterministic Algorithm for Atrial Fibrillation Detection</title>
	<link>https://www.mdpi.com/2624-6120/7/1/3</link>
	<description>The absence of a recognizable P wave in an electrocardiogram (ECG) is a critical indicator for the diagnosis of atrial fibrillation (AF). An algorithm capable of distinguishing between physiological and pathological states in a short period of time could serve as a valuable tool for timely and effective diagnosis, even in a home setting. To achieve this goal, a deterministic algorithm is proposed. The Fantasia Database and the AF Termination Challenge Database were used for training the model. Subsequently, for the test session, a one-minute recording was extracted from the Autonomic Aging Dataset and the Long-Term AF Database. After band-pass filtering, characteristic points such as R-peaks and P waves were extracted. The R-peak detection algorithm was compared with the gold standard Pan-Tompkins, obtaining a p-value &amp;amp;gt; 0.05 on the Fantasia Database, which means that there is no statistical difference between them. Subsequently derived features such as duration, amplitude, subtended area, and P wave slope have been used to discriminate healthy subjects from AF patients. The P-wave slope emerged as the most effective feature, achieving a classification accuracy of 100% and 96% for the training and test sets, respectively. This algorithm thus represents a significant advancement as it achieves a performance comparable to other deterministic methods based on P wave analysis using only one-minute recordings, thereby enabling accurate diagnosis in a shorter time frame.</description>
	<pubDate>2026-01-08</pubDate>

	<content:encoded><![CDATA[
	<p><b>Signals, Vol. 7, Pages 3: A Novel Deterministic Algorithm for Atrial Fibrillation Detection</b></p>
	<p>Signals <a href="https://www.mdpi.com/2624-6120/7/1/3">doi: 10.3390/signals7010003</a></p>
	<p>Authors:
		Alessandro Filisetti
		Pietro Bia
		Germana Luciani
		Margherita Losardo
		Riccardo Ardoino
		Antonio Manna
		</p>
	<p>The absence of a recognizable P wave in an electrocardiogram (ECG) is a critical indicator for the diagnosis of atrial fibrillation (AF). An algorithm capable of distinguishing between physiological and pathological states in a short period of time could serve as a valuable tool for timely and effective diagnosis, even in a home setting. To achieve this goal, a deterministic algorithm is proposed. The Fantasia Database and the AF Termination Challenge Database were used for training the model. Subsequently, for the test session, a one-minute recording was extracted from the Autonomic Aging Dataset and the Long-Term AF Database. After band-pass filtering, characteristic points such as R-peaks and P waves were extracted. The R-peak detection algorithm was compared with the gold standard Pan-Tompkins, obtaining a p-value &amp;amp;gt; 0.05 on the Fantasia Database, which means that there is no statistical difference between them. Subsequently derived features such as duration, amplitude, subtended area, and P wave slope have been used to discriminate healthy subjects from AF patients. The P-wave slope emerged as the most effective feature, achieving a classification accuracy of 100% and 96% for the training and test sets, respectively. This algorithm thus represents a significant advancement as it achieves a performance comparable to other deterministic methods based on P wave analysis using only one-minute recordings, thereby enabling accurate diagnosis in a shorter time frame.</p>
	]]></content:encoded>

	<dc:title>A Novel Deterministic Algorithm for Atrial Fibrillation Detection</dc:title>
			<dc:creator>Alessandro Filisetti</dc:creator>
			<dc:creator>Pietro Bia</dc:creator>
			<dc:creator>Germana Luciani</dc:creator>
			<dc:creator>Margherita Losardo</dc:creator>
			<dc:creator>Riccardo Ardoino</dc:creator>
			<dc:creator>Antonio Manna</dc:creator>
		<dc:identifier>doi: 10.3390/signals7010003</dc:identifier>
	<dc:source>Signals</dc:source>
	<dc:date>2026-01-08</dc:date>

	<prism:publicationName>Signals</prism:publicationName>
	<prism:publicationDate>2026-01-08</prism:publicationDate>
	<prism:volume>7</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>3</prism:startingPage>
		<prism:doi>10.3390/signals7010003</prism:doi>
	<prism:url>https://www.mdpi.com/2624-6120/7/1/3</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-6120/7/1/2">

	<title>Signals, Vol. 7, Pages 2: Combined Infinity Laplacian and Non-Local Means Models Applied to Depth Map Restoration</title>
	<link>https://www.mdpi.com/2624-6120/7/1/2</link>
	<description>Scene depth information is a key component of any robotic mobile application. Range sensors, such as LiDAR, sonar, or radar, capture depth data of a scene. However, the data captured by these sensors frequently presents missing regions or information with a low confidence level. These missing regions in the depth data could be large areas without information, making it difficult to make decisions, for instance, for an autonomous vehicle. Recovering depth data has become a primary activity for computer vision applications. This work proposes and evaluates an interpolation model to infer dense depth maps from a Lab color space reference picture and an incomplete-depth image embedded in a completion pipeline. The complete proposal pipeline comprises convolutional layers and a convex combination of the infinity Laplacian and non-local means model. The proposed model infers dense depth maps by considering depth data and utilizing clues from a color picture of the scene, along with a metric for computing differences between two pixels. The work contributes (i) the convex combination of the two models to interpolate the data, and (ii) the proposal of a class of function suitable for balancing between different models. The obtained results show that the model outperforms similar models in the KITTI dataset and outperforms our previous implementation in the NYU_v2 dataset, dropping the MSE by 34.86%, 3.35%, and 34.42% for 4&amp;amp;times;, 8&amp;amp;times;, 16&amp;amp;times; upsampling tasks, respectively.</description>
	<pubDate>2026-01-07</pubDate>

	<content:encoded><![CDATA[
	<p><b>Signals, Vol. 7, Pages 2: Combined Infinity Laplacian and Non-Local Means Models Applied to Depth Map Restoration</b></p>
	<p>Signals <a href="https://www.mdpi.com/2624-6120/7/1/2">doi: 10.3390/signals7010002</a></p>
	<p>Authors:
		Vanel Lazcano
		Mabel Vega-Rojas
		Felipe Calderero
		</p>
	<p>Scene depth information is a key component of any robotic mobile application. Range sensors, such as LiDAR, sonar, or radar, capture depth data of a scene. However, the data captured by these sensors frequently presents missing regions or information with a low confidence level. These missing regions in the depth data could be large areas without information, making it difficult to make decisions, for instance, for an autonomous vehicle. Recovering depth data has become a primary activity for computer vision applications. This work proposes and evaluates an interpolation model to infer dense depth maps from a Lab color space reference picture and an incomplete-depth image embedded in a completion pipeline. The complete proposal pipeline comprises convolutional layers and a convex combination of the infinity Laplacian and non-local means model. The proposed model infers dense depth maps by considering depth data and utilizing clues from a color picture of the scene, along with a metric for computing differences between two pixels. The work contributes (i) the convex combination of the two models to interpolate the data, and (ii) the proposal of a class of function suitable for balancing between different models. The obtained results show that the model outperforms similar models in the KITTI dataset and outperforms our previous implementation in the NYU_v2 dataset, dropping the MSE by 34.86%, 3.35%, and 34.42% for 4&amp;amp;times;, 8&amp;amp;times;, 16&amp;amp;times; upsampling tasks, respectively.</p>
	]]></content:encoded>

	<dc:title>Combined Infinity Laplacian and Non-Local Means Models Applied to Depth Map Restoration</dc:title>
			<dc:creator>Vanel Lazcano</dc:creator>
			<dc:creator>Mabel Vega-Rojas</dc:creator>
			<dc:creator>Felipe Calderero</dc:creator>
		<dc:identifier>doi: 10.3390/signals7010002</dc:identifier>
	<dc:source>Signals</dc:source>
	<dc:date>2026-01-07</dc:date>

	<prism:publicationName>Signals</prism:publicationName>
	<prism:publicationDate>2026-01-07</prism:publicationDate>
	<prism:volume>7</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>2</prism:startingPage>
		<prism:doi>10.3390/signals7010002</prism:doi>
	<prism:url>https://www.mdpi.com/2624-6120/7/1/2</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-6120/7/1/1">

	<title>Signals, Vol. 7, Pages 1: Evaluation of Jamming Attacks on NR-V2X Systems: Simulation and Experimental Perspectives</title>
	<link>https://www.mdpi.com/2624-6120/7/1/1</link>
	<description>Autonomous vehicles (AVs) are transforming transportation by improving safety, efficiency, and intelligence through integrated sensing, computing, and communication technologies. However, their growing reliance on Vehicle-to-Everything (V2X) communication exposes them to cybersecurity vulnerabilities, particularly at the physical layer. Among these, jamming attacks represent a critical threat by disrupting wireless channels and compromising message delivery, severely impacting vehicle coordination and safety. This work investigates the robustness of New Radio (NR)-V2X-enabled vehicular systems under jamming conditions through a dual-methodology approach. First, two Cooperative Intelligent Transport System (C-ITS) scenarios standardized by 3GPP&amp;amp;mdash;Do Not Pass Warning (DNPW) and Intersection Movement Assist (IMA)&amp;amp;mdash;are implemented in the OMNeT++ simulation environment using Simu5G, Veins, and SUMO. The simulations incorporate four types of jamming strategies and evaluate their impact on key metrics such as packet loss, signal quality, inter-vehicle spacing, and collision risk. Second, a complementary laboratory experiment is conducted using AnaPico vector signal generators (a Keysight Technologies brand) and an Anritsu multi-channel spectrum receiver, replicating controlled wireless conditions to validate the degradation effects observed in the simulation. The findings reveal that jamming severely undermines communication reliability in NR-V2X systems, both in simulation and in practice. These findings highlight the urgent need for resilient NR-V2X protocols and countermeasures to ensure the integrity of cooperative autonomous systems in adversarial environments.</description>
	<pubDate>2025-12-19</pubDate>

	<content:encoded><![CDATA[
	<p><b>Signals, Vol. 7, Pages 1: Evaluation of Jamming Attacks on NR-V2X Systems: Simulation and Experimental Perspectives</b></p>
	<p>Signals <a href="https://www.mdpi.com/2624-6120/7/1/1">doi: 10.3390/signals7010001</a></p>
	<p>Authors:
		Antonio Santos da Silva
		Kevin Herman Muraro Gularte
		Giovanni Almeida Santos
		Davi Salomão Soares Corrêa
		Luís Felipe Oliveira de Melo
		João Paulo Javidi da Costa
		José Alfredo Ruiz Vargas
		Daniel Alves da Silva
		Tai Fei
		</p>
	<p>Autonomous vehicles (AVs) are transforming transportation by improving safety, efficiency, and intelligence through integrated sensing, computing, and communication technologies. However, their growing reliance on Vehicle-to-Everything (V2X) communication exposes them to cybersecurity vulnerabilities, particularly at the physical layer. Among these, jamming attacks represent a critical threat by disrupting wireless channels and compromising message delivery, severely impacting vehicle coordination and safety. This work investigates the robustness of New Radio (NR)-V2X-enabled vehicular systems under jamming conditions through a dual-methodology approach. First, two Cooperative Intelligent Transport System (C-ITS) scenarios standardized by 3GPP&amp;amp;mdash;Do Not Pass Warning (DNPW) and Intersection Movement Assist (IMA)&amp;amp;mdash;are implemented in the OMNeT++ simulation environment using Simu5G, Veins, and SUMO. The simulations incorporate four types of jamming strategies and evaluate their impact on key metrics such as packet loss, signal quality, inter-vehicle spacing, and collision risk. Second, a complementary laboratory experiment is conducted using AnaPico vector signal generators (a Keysight Technologies brand) and an Anritsu multi-channel spectrum receiver, replicating controlled wireless conditions to validate the degradation effects observed in the simulation. The findings reveal that jamming severely undermines communication reliability in NR-V2X systems, both in simulation and in practice. These findings highlight the urgent need for resilient NR-V2X protocols and countermeasures to ensure the integrity of cooperative autonomous systems in adversarial environments.</p>
	]]></content:encoded>

	<dc:title>Evaluation of Jamming Attacks on NR-V2X Systems: Simulation and Experimental Perspectives</dc:title>
			<dc:creator>Antonio Santos da Silva</dc:creator>
			<dc:creator>Kevin Herman Muraro Gularte</dc:creator>
			<dc:creator>Giovanni Almeida Santos</dc:creator>
			<dc:creator>Davi Salomão Soares Corrêa</dc:creator>
			<dc:creator>Luís Felipe Oliveira de Melo</dc:creator>
			<dc:creator>João Paulo Javidi da Costa</dc:creator>
			<dc:creator>José Alfredo Ruiz Vargas</dc:creator>
			<dc:creator>Daniel Alves da Silva</dc:creator>
			<dc:creator>Tai Fei</dc:creator>
		<dc:identifier>doi: 10.3390/signals7010001</dc:identifier>
	<dc:source>Signals</dc:source>
	<dc:date>2025-12-19</dc:date>

	<prism:publicationName>Signals</prism:publicationName>
	<prism:publicationDate>2025-12-19</prism:publicationDate>
	<prism:volume>7</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>1</prism:startingPage>
		<prism:doi>10.3390/signals7010001</prism:doi>
	<prism:url>https://www.mdpi.com/2624-6120/7/1/1</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-6120/6/4/74">

	<title>Signals, Vol. 6, Pages 74: An Improved Variable Step-Size Normalized Subband Adaptive Filtering Algorithm for Signal Clipping Distortion</title>
	<link>https://www.mdpi.com/2624-6120/6/4/74</link>
	<description>The safe and stable operation of power systems and other dynamic systems relies on accurate perception of their dynamic processes. Voltage, current, and other measurement signals carry critical information about the system&amp;amp;rsquo;s state. However, under conditions such as equipment damage, aging, and non-ideal operational conditions of devices under test, over-range phenomena may occur, leading to biased estimation issues in adaptive filters. To address this problem, this paper proposes a variable-parameter subband adaptive filtering algorithm with signal clipping distortion awareness. The algorithm first uses the Expectation-Maximization (EM) process to achieve high-fidelity restoration of damaged signals. Then, by integrating an intelligent steady-state detector and a dual-mode control mechanism, the adaptive filter can adjust key parameters such as step-size, forgetting factor, and regularization parameter based on state perception results. Finally, theoretical analysis proves the unbiased nature of the proposed method. Validation using real-world data from a high-penetration renewable energy power system shows that the algorithm achieves fast tracking during transient events and provides high-precision estimation during steady-state operation, offering an effective solution for real-time, high-accuracy processing of dynamic measurement data in power systems.</description>
	<pubDate>2025-12-12</pubDate>

	<content:encoded><![CDATA[
	<p><b>Signals, Vol. 6, Pages 74: An Improved Variable Step-Size Normalized Subband Adaptive Filtering Algorithm for Signal Clipping Distortion</b></p>
	<p>Signals <a href="https://www.mdpi.com/2624-6120/6/4/74">doi: 10.3390/signals6040074</a></p>
	<p>Authors:
		Jiapeng Duan
		Bo Zhang
		</p>
	<p>The safe and stable operation of power systems and other dynamic systems relies on accurate perception of their dynamic processes. Voltage, current, and other measurement signals carry critical information about the system&amp;amp;rsquo;s state. However, under conditions such as equipment damage, aging, and non-ideal operational conditions of devices under test, over-range phenomena may occur, leading to biased estimation issues in adaptive filters. To address this problem, this paper proposes a variable-parameter subband adaptive filtering algorithm with signal clipping distortion awareness. The algorithm first uses the Expectation-Maximization (EM) process to achieve high-fidelity restoration of damaged signals. Then, by integrating an intelligent steady-state detector and a dual-mode control mechanism, the adaptive filter can adjust key parameters such as step-size, forgetting factor, and regularization parameter based on state perception results. Finally, theoretical analysis proves the unbiased nature of the proposed method. Validation using real-world data from a high-penetration renewable energy power system shows that the algorithm achieves fast tracking during transient events and provides high-precision estimation during steady-state operation, offering an effective solution for real-time, high-accuracy processing of dynamic measurement data in power systems.</p>
	]]></content:encoded>

	<dc:title>An Improved Variable Step-Size Normalized Subband Adaptive Filtering Algorithm for Signal Clipping Distortion</dc:title>
			<dc:creator>Jiapeng Duan</dc:creator>
			<dc:creator>Bo Zhang</dc:creator>
		<dc:identifier>doi: 10.3390/signals6040074</dc:identifier>
	<dc:source>Signals</dc:source>
	<dc:date>2025-12-12</dc:date>

	<prism:publicationName>Signals</prism:publicationName>
	<prism:publicationDate>2025-12-12</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>74</prism:startingPage>
		<prism:doi>10.3390/signals6040074</prism:doi>
	<prism:url>https://www.mdpi.com/2624-6120/6/4/74</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-6120/6/4/73">

	<title>Signals, Vol. 6, Pages 73: Vibro-Acoustic Characterization of Additively Manufactured Loudspeaker Enclosures: A Parametric Study of Material and Infill Influence</title>
	<link>https://www.mdpi.com/2624-6120/6/4/73</link>
	<description>This paper presents a comparative analysis of the influence of Fused Deposition Modeling (FDM) parameters&amp;amp;mdash;specifically material type, infill geometry, and density&amp;amp;mdash;on the vibro-acoustic characteristics of loudspeaker enclosures. The enclosures were designed as exponential horns to intensify resonance phenomena for precise evaluation. Twelve unique configurations were fabricated using three materials with distinct damping properties (PLA, ABS, wood-composite) and three internal geometries (linear, honeycomb, Gyroid). Key vibro-acoustic properties were assessed via digital signal processing of recorded audio signals, including relative frequency response and time-frequency (spectrogram) analysis, and correlated with a predictive Finite Element Analysis (FEA) model of mechanical vibrations. The study unequivocally demonstrates that a material with a high internal damping coefficient is a critical factor. The wood-composite enabled a reduction in the main resonance amplitude by approximately 4 dB compared to PLA with the same geometry, corresponding to a predicted 86% reduction in mechanical vibration. Furthermore, the results show that a synergy between a high-damping material and an advanced, energy-dissipating infill (Gyroid) is crucial for achieving high acoustic fidelity. The wood-composite with 10% Gyroid infill was identified as the optimal design, offering the most effective resonance damping and the most neutral tonal characteristic. This work provides a valuable contribution to the field by establishing a clear link between FDM parameters and acoustic outcomes, delivering practical guidelines for performance optimization in personalized audio systems.</description>
	<pubDate>2025-12-12</pubDate>

	<content:encoded><![CDATA[
	<p><b>Signals, Vol. 6, Pages 73: Vibro-Acoustic Characterization of Additively Manufactured Loudspeaker Enclosures: A Parametric Study of Material and Infill Influence</b></p>
	<p>Signals <a href="https://www.mdpi.com/2624-6120/6/4/73">doi: 10.3390/signals6040073</a></p>
	<p>Authors:
		Jakub Konopiński
		Piotr Sosiński
		Mikołaj Wanat
		Piotr Góral
		</p>
	<p>This paper presents a comparative analysis of the influence of Fused Deposition Modeling (FDM) parameters&amp;amp;mdash;specifically material type, infill geometry, and density&amp;amp;mdash;on the vibro-acoustic characteristics of loudspeaker enclosures. The enclosures were designed as exponential horns to intensify resonance phenomena for precise evaluation. Twelve unique configurations were fabricated using three materials with distinct damping properties (PLA, ABS, wood-composite) and three internal geometries (linear, honeycomb, Gyroid). Key vibro-acoustic properties were assessed via digital signal processing of recorded audio signals, including relative frequency response and time-frequency (spectrogram) analysis, and correlated with a predictive Finite Element Analysis (FEA) model of mechanical vibrations. The study unequivocally demonstrates that a material with a high internal damping coefficient is a critical factor. The wood-composite enabled a reduction in the main resonance amplitude by approximately 4 dB compared to PLA with the same geometry, corresponding to a predicted 86% reduction in mechanical vibration. Furthermore, the results show that a synergy between a high-damping material and an advanced, energy-dissipating infill (Gyroid) is crucial for achieving high acoustic fidelity. The wood-composite with 10% Gyroid infill was identified as the optimal design, offering the most effective resonance damping and the most neutral tonal characteristic. This work provides a valuable contribution to the field by establishing a clear link between FDM parameters and acoustic outcomes, delivering practical guidelines for performance optimization in personalized audio systems.</p>
	]]></content:encoded>

	<dc:title>Vibro-Acoustic Characterization of Additively Manufactured Loudspeaker Enclosures: A Parametric Study of Material and Infill Influence</dc:title>
			<dc:creator>Jakub Konopiński</dc:creator>
			<dc:creator>Piotr Sosiński</dc:creator>
			<dc:creator>Mikołaj Wanat</dc:creator>
			<dc:creator>Piotr Góral</dc:creator>
		<dc:identifier>doi: 10.3390/signals6040073</dc:identifier>
	<dc:source>Signals</dc:source>
	<dc:date>2025-12-12</dc:date>

	<prism:publicationName>Signals</prism:publicationName>
	<prism:publicationDate>2025-12-12</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>73</prism:startingPage>
		<prism:doi>10.3390/signals6040073</prism:doi>
	<prism:url>https://www.mdpi.com/2624-6120/6/4/73</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-6120/6/4/72">

	<title>Signals, Vol. 6, Pages 72: CHROM-Y: Illumination-Adaptive Robust Remote Photoplethysmography Through 2D Chrominance&amp;ndash;Luminance Fusion and Convolutional Neural Networks</title>
	<link>https://www.mdpi.com/2624-6120/6/4/72</link>
	<description>Remote photoplethysmography (rPPG) enables non-contact heart rate estimation but remains highly sensitive to illumination variation and dataset-dependent factors. This study proposes CHROM-Y, a robust 2D feature representation that combines chrominance (&amp;amp;Omega;, &amp;amp;Phi;) with luminance (Y) to improve physiological signal extraction under varying lighting conditions. The proposed features were evaluated using U-Net, ResNet-18, and VGG16 for heart rate estimation and waveform reconstruction on the UBFC-rPPG and BhRPPG datasets. On UBFC-rPPG, U-Net with CHROM-Y achieved the best performance with a Peak MAE of 3.62 bpm and RMSE of 6.67 bpm, while ablation experiments confirmed the importance of the Y-channel, showing degradation of up to 41.14% in MAE when removed. Although waveform reconstruction demonstrated low Pearson correlation, dominant frequency preservation enabled reliable frequency-based HR estimation. Cross-dataset evaluation revealed reduced generalization (MAE up to 13.33 bpm and RMSE up to 22.80 bpm), highlighting sensitivity to domain shifts. However, fine-tuning U-Net on BhRPPG produced consistent improvements across low, medium, and high illumination levels, with performance gains of 11.18&amp;amp;ndash;29.47% in MAE and 12.48&amp;amp;ndash;27.94% in RMSE, indicating improved adaptability to illumination variations.</description>
	<pubDate>2025-12-09</pubDate>

	<content:encoded><![CDATA[
	<p><b>Signals, Vol. 6, Pages 72: CHROM-Y: Illumination-Adaptive Robust Remote Photoplethysmography Through 2D Chrominance&amp;ndash;Luminance Fusion and Convolutional Neural Networks</b></p>
	<p>Signals <a href="https://www.mdpi.com/2624-6120/6/4/72">doi: 10.3390/signals6040072</a></p>
	<p>Authors:
		Mohammed Javidh
		Ruchi Shah
		Mohan Uma
		Sethuramalingam Prabhu
		Rajendran Beaulah Jeyavathana
		</p>
	<p>Remote photoplethysmography (rPPG) enables non-contact heart rate estimation but remains highly sensitive to illumination variation and dataset-dependent factors. This study proposes CHROM-Y, a robust 2D feature representation that combines chrominance (&amp;amp;Omega;, &amp;amp;Phi;) with luminance (Y) to improve physiological signal extraction under varying lighting conditions. The proposed features were evaluated using U-Net, ResNet-18, and VGG16 for heart rate estimation and waveform reconstruction on the UBFC-rPPG and BhRPPG datasets. On UBFC-rPPG, U-Net with CHROM-Y achieved the best performance with a Peak MAE of 3.62 bpm and RMSE of 6.67 bpm, while ablation experiments confirmed the importance of the Y-channel, showing degradation of up to 41.14% in MAE when removed. Although waveform reconstruction demonstrated low Pearson correlation, dominant frequency preservation enabled reliable frequency-based HR estimation. Cross-dataset evaluation revealed reduced generalization (MAE up to 13.33 bpm and RMSE up to 22.80 bpm), highlighting sensitivity to domain shifts. However, fine-tuning U-Net on BhRPPG produced consistent improvements across low, medium, and high illumination levels, with performance gains of 11.18&amp;amp;ndash;29.47% in MAE and 12.48&amp;amp;ndash;27.94% in RMSE, indicating improved adaptability to illumination variations.</p>
	]]></content:encoded>

	<dc:title>CHROM-Y: Illumination-Adaptive Robust Remote Photoplethysmography Through 2D Chrominance&amp;amp;ndash;Luminance Fusion and Convolutional Neural Networks</dc:title>
			<dc:creator>Mohammed Javidh</dc:creator>
			<dc:creator>Ruchi Shah</dc:creator>
			<dc:creator>Mohan Uma</dc:creator>
			<dc:creator>Sethuramalingam Prabhu</dc:creator>
			<dc:creator>Rajendran Beaulah Jeyavathana</dc:creator>
		<dc:identifier>doi: 10.3390/signals6040072</dc:identifier>
	<dc:source>Signals</dc:source>
	<dc:date>2025-12-09</dc:date>

	<prism:publicationName>Signals</prism:publicationName>
	<prism:publicationDate>2025-12-09</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>72</prism:startingPage>
		<prism:doi>10.3390/signals6040072</prism:doi>
	<prism:url>https://www.mdpi.com/2624-6120/6/4/72</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-6120/6/4/71">

	<title>Signals, Vol. 6, Pages 71: A Dynamic Empirical Bayes Signal Model for Attribute Defect Detection</title>
	<link>https://www.mdpi.com/2624-6120/6/4/71</link>
	<description>This study evaluates Empirical Bayes (EB) c-charts for monitoring count-type data under precautionary (PLF) and logarithmic (LLF) loss functions. By assuming an exponential prior for the Poisson mean, the EB framework enables the construction of predictive densities for future observations. Simulation studies and a real-world dataset on missing rivets in large aircraft were used to compare the methods&amp;amp;rsquo; ability to detect out-of-control conditions. The results show that EB&amp;amp;ndash;LLF charts exhibit high sensitivity for small and moderate process shifts, and both EB approaches outperform the classical c-chart by integrating prior information to detect shifts earlier while controlling false alarms. These findings highlight the importance of loss function choice and demonstrate the effectiveness of EB charts for robust process monitoring.</description>
	<pubDate>2025-12-08</pubDate>

	<content:encoded><![CDATA[
	<p><b>Signals, Vol. 6, Pages 71: A Dynamic Empirical Bayes Signal Model for Attribute Defect Detection</b></p>
	<p>Signals <a href="https://www.mdpi.com/2624-6120/6/4/71">doi: 10.3390/signals6040071</a></p>
	<p>Authors:
		Yadpirun Supharakonsakun
		</p>
	<p>This study evaluates Empirical Bayes (EB) c-charts for monitoring count-type data under precautionary (PLF) and logarithmic (LLF) loss functions. By assuming an exponential prior for the Poisson mean, the EB framework enables the construction of predictive densities for future observations. Simulation studies and a real-world dataset on missing rivets in large aircraft were used to compare the methods&amp;amp;rsquo; ability to detect out-of-control conditions. The results show that EB&amp;amp;ndash;LLF charts exhibit high sensitivity for small and moderate process shifts, and both EB approaches outperform the classical c-chart by integrating prior information to detect shifts earlier while controlling false alarms. These findings highlight the importance of loss function choice and demonstrate the effectiveness of EB charts for robust process monitoring.</p>
	]]></content:encoded>

	<dc:title>A Dynamic Empirical Bayes Signal Model for Attribute Defect Detection</dc:title>
			<dc:creator>Yadpirun Supharakonsakun</dc:creator>
		<dc:identifier>doi: 10.3390/signals6040071</dc:identifier>
	<dc:source>Signals</dc:source>
	<dc:date>2025-12-08</dc:date>

	<prism:publicationName>Signals</prism:publicationName>
	<prism:publicationDate>2025-12-08</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>71</prism:startingPage>
		<prism:doi>10.3390/signals6040071</prism:doi>
	<prism:url>https://www.mdpi.com/2624-6120/6/4/71</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-6120/6/4/70">

	<title>Signals, Vol. 6, Pages 70: Review of Trends in Wavelets with Possible Maritime Applications</title>
	<link>https://www.mdpi.com/2624-6120/6/4/70</link>
	<description>The wavelet transform (WT) is an integral transform primarily used for processing and analyzing nonstationary signals due to its multiresolution property. Multiresolution analysis is one method that finds applications in many fields because of the characteristics of the transform. Over the years, WT has become standard and is integrated into many coding protocols and applications without special mention. Decades of research in the field of wavelets have revealed several stages of development. In the initial stage, the focus was on wavelet families, with scientists deriving new families for emerging applications. The second stage addressed implementation issues, emphasizing more efficient implementation techniques. The next stage involved artificial neural networks (ANNs) that perform WT. This paper reviews the development of WT with examples from maritime applications. We also provide an overview of cutting-edge trends in wavelets and propose the aforementioned stages as a new taxonomy of WT development.</description>
	<pubDate>2025-12-01</pubDate>

	<content:encoded><![CDATA[
	<p><b>Signals, Vol. 6, Pages 70: Review of Trends in Wavelets with Possible Maritime Applications</b></p>
	<p>Signals <a href="https://www.mdpi.com/2624-6120/6/4/70">doi: 10.3390/signals6040070</a></p>
	<p>Authors:
		Igor Vujović
		Joško Šoda
		Ivana Golub Medvešek
		</p>
	<p>The wavelet transform (WT) is an integral transform primarily used for processing and analyzing nonstationary signals due to its multiresolution property. Multiresolution analysis is one method that finds applications in many fields because of the characteristics of the transform. Over the years, WT has become standard and is integrated into many coding protocols and applications without special mention. Decades of research in the field of wavelets have revealed several stages of development. In the initial stage, the focus was on wavelet families, with scientists deriving new families for emerging applications. The second stage addressed implementation issues, emphasizing more efficient implementation techniques. The next stage involved artificial neural networks (ANNs) that perform WT. This paper reviews the development of WT with examples from maritime applications. We also provide an overview of cutting-edge trends in wavelets and propose the aforementioned stages as a new taxonomy of WT development.</p>
	]]></content:encoded>

	<dc:title>Review of Trends in Wavelets with Possible Maritime Applications</dc:title>
			<dc:creator>Igor Vujović</dc:creator>
			<dc:creator>Joško Šoda</dc:creator>
			<dc:creator>Ivana Golub Medvešek</dc:creator>
		<dc:identifier>doi: 10.3390/signals6040070</dc:identifier>
	<dc:source>Signals</dc:source>
	<dc:date>2025-12-01</dc:date>

	<prism:publicationName>Signals</prism:publicationName>
	<prism:publicationDate>2025-12-01</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Review</prism:section>
	<prism:startingPage>70</prism:startingPage>
		<prism:doi>10.3390/signals6040070</prism:doi>
	<prism:url>https://www.mdpi.com/2624-6120/6/4/70</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-6120/6/4/69">

	<title>Signals, Vol. 6, Pages 69: Discretization of Digital Controllers Comprising Second-Order Notch Filters</title>
	<link>https://www.mdpi.com/2624-6120/6/4/69</link>
	<description>Second-order notch filters (NFs) with constant coefficients are often used as part of feedback controllers in grid-connected power conversion systems to prevent unwanted harmonic content polluting the closed control loops. In practice, the value of the mains frequency resides within a certain known range rather than remaining constant. Hence, the correct selection of NF coefficients is crucial for ensuring that the desired performance is maintained within the whole expected mains frequency range. Bilinear transformation (BLT) with notch frequency prewarping is often adopted to convert an NF from a continuous to a digital form. While accurately preserving the notch frequency location, the method reduces the filter bandwidth. As a remedy, BLT with both notch frequency and damping ratio prewarping may be employed. Nevertheless, some inaccuracy remains under low sampling-to-notch frequency ratios. This technical note demonstrates that the issue may be solved by prewarping the boundary values of the expected harmonic frequency range rather than the notch frequency and/or damping factor before applying the BLT. Simulation results accurately support the presented issue and proposed solution.</description>
	<pubDate>2025-12-01</pubDate>

	<content:encoded><![CDATA[
	<p><b>Signals, Vol. 6, Pages 69: Discretization of Digital Controllers Comprising Second-Order Notch Filters</b></p>
	<p>Signals <a href="https://www.mdpi.com/2624-6120/6/4/69">doi: 10.3390/signals6040069</a></p>
	<p>Authors:
		Alon Kuperman
		</p>
	<p>Second-order notch filters (NFs) with constant coefficients are often used as part of feedback controllers in grid-connected power conversion systems to prevent unwanted harmonic content polluting the closed control loops. In practice, the value of the mains frequency resides within a certain known range rather than remaining constant. Hence, the correct selection of NF coefficients is crucial for ensuring that the desired performance is maintained within the whole expected mains frequency range. Bilinear transformation (BLT) with notch frequency prewarping is often adopted to convert an NF from a continuous to a digital form. While accurately preserving the notch frequency location, the method reduces the filter bandwidth. As a remedy, BLT with both notch frequency and damping ratio prewarping may be employed. Nevertheless, some inaccuracy remains under low sampling-to-notch frequency ratios. This technical note demonstrates that the issue may be solved by prewarping the boundary values of the expected harmonic frequency range rather than the notch frequency and/or damping factor before applying the BLT. Simulation results accurately support the presented issue and proposed solution.</p>
	]]></content:encoded>

	<dc:title>Discretization of Digital Controllers Comprising Second-Order Notch Filters</dc:title>
			<dc:creator>Alon Kuperman</dc:creator>
		<dc:identifier>doi: 10.3390/signals6040069</dc:identifier>
	<dc:source>Signals</dc:source>
	<dc:date>2025-12-01</dc:date>

	<prism:publicationName>Signals</prism:publicationName>
	<prism:publicationDate>2025-12-01</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Technical Note</prism:section>
	<prism:startingPage>69</prism:startingPage>
		<prism:doi>10.3390/signals6040069</prism:doi>
	<prism:url>https://www.mdpi.com/2624-6120/6/4/69</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-6120/6/4/68">

	<title>Signals, Vol. 6, Pages 68: Standardizing EMG Pipelines for Muscle Synergy Analysis: A Large-Scale Evaluation of Filtering, Normalization and Criteria</title>
	<link>https://www.mdpi.com/2624-6120/6/4/68</link>
	<description>Muscle synergies offer valuable insights into the movement strategies employed by the central nervous system and present a promising avenue for clinical applications. However, the field lacks a complete understanding of how surface electromyography processing parameters affect muscle synergy analysis, which in turn has hindered cross-study comparisons and the translation of experimental results to clinical contexts. To address the gap, this study presents a systematic evaluation of interactive effects of three key parameters on muscle synergy analysis, including nine cut-off frequencies of low-pass filters, five normalization methods, and five synergy extraction criteria, covering 225 unique combinations. Using a comprehensive running dataset of 135 subjects, this study examined variance accounted for (VAF) and correlation coefficient (R2) metrics, the number of synergies, and synergy structure consistency under different parameter settings. Synergy similarity was used as a quantitative measure of synergy stability across different parameter settings. The results demonstrated that cut-off frequencies, normalization methods, and criteria choices interactively influenced the outcomes. Notably, VAF consistently yielded higher values than R2, highlighting differences in how these metrics capture explained variance. Error VAF (EVAF) emerged as the most robust criterion for determining the number of synergies, especially when combined with normalization methods by maximum value (MAX), average value (AVE), or unit variance (UVA) and moderately high cut-off frequencies, which led to more stable synergy structures across conditions. Furthermore, the predefined threshold associated with each criterion markedly affected the estimated number of synergies. These findings provide structured guidelines for muscle synergy analysis, helping to standardize preprocessing and extraction parameters, improve reproducibility across studies, and enhance the clinical applicability of synergy-based assessments.</description>
	<pubDate>2025-12-01</pubDate>

	<content:encoded><![CDATA[
	<p><b>Signals, Vol. 6, Pages 68: Standardizing EMG Pipelines for Muscle Synergy Analysis: A Large-Scale Evaluation of Filtering, Normalization and Criteria</b></p>
	<p>Signals <a href="https://www.mdpi.com/2624-6120/6/4/68">doi: 10.3390/signals6040068</a></p>
	<p>Authors:
		Kunkun Zhao
		Yaowei Jin
		Yizhou Feng
		Jianqing Li
		Yuxuan Zhou
		</p>
	<p>Muscle synergies offer valuable insights into the movement strategies employed by the central nervous system and present a promising avenue for clinical applications. However, the field lacks a complete understanding of how surface electromyography processing parameters affect muscle synergy analysis, which in turn has hindered cross-study comparisons and the translation of experimental results to clinical contexts. To address the gap, this study presents a systematic evaluation of interactive effects of three key parameters on muscle synergy analysis, including nine cut-off frequencies of low-pass filters, five normalization methods, and five synergy extraction criteria, covering 225 unique combinations. Using a comprehensive running dataset of 135 subjects, this study examined variance accounted for (VAF) and correlation coefficient (R2) metrics, the number of synergies, and synergy structure consistency under different parameter settings. Synergy similarity was used as a quantitative measure of synergy stability across different parameter settings. The results demonstrated that cut-off frequencies, normalization methods, and criteria choices interactively influenced the outcomes. Notably, VAF consistently yielded higher values than R2, highlighting differences in how these metrics capture explained variance. Error VAF (EVAF) emerged as the most robust criterion for determining the number of synergies, especially when combined with normalization methods by maximum value (MAX), average value (AVE), or unit variance (UVA) and moderately high cut-off frequencies, which led to more stable synergy structures across conditions. Furthermore, the predefined threshold associated with each criterion markedly affected the estimated number of synergies. These findings provide structured guidelines for muscle synergy analysis, helping to standardize preprocessing and extraction parameters, improve reproducibility across studies, and enhance the clinical applicability of synergy-based assessments.</p>
	]]></content:encoded>

	<dc:title>Standardizing EMG Pipelines for Muscle Synergy Analysis: A Large-Scale Evaluation of Filtering, Normalization and Criteria</dc:title>
			<dc:creator>Kunkun Zhao</dc:creator>
			<dc:creator>Yaowei Jin</dc:creator>
			<dc:creator>Yizhou Feng</dc:creator>
			<dc:creator>Jianqing Li</dc:creator>
			<dc:creator>Yuxuan Zhou</dc:creator>
		<dc:identifier>doi: 10.3390/signals6040068</dc:identifier>
	<dc:source>Signals</dc:source>
	<dc:date>2025-12-01</dc:date>

	<prism:publicationName>Signals</prism:publicationName>
	<prism:publicationDate>2025-12-01</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>68</prism:startingPage>
		<prism:doi>10.3390/signals6040068</prism:doi>
	<prism:url>https://www.mdpi.com/2624-6120/6/4/68</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-6120/6/4/67">

	<title>Signals, Vol. 6, Pages 67: Real-Time Physiological Activity and Sleep State Monitoring System Using TS2Vec Embeddings and DBSCAN Clustering for Heart Rate and Motor Response Analysis in IoMT</title>
	<link>https://www.mdpi.com/2624-6120/6/4/67</link>
	<description>Monitoring physiological activity and sleep states in real time is challenging, particularly for continuous assessment in daily life settings using wearable IoMT devices. We developed a 24 h wearable system that integrates electrocardiogram (ECG) electrodes for heart rate measurement and a glove-mounted flex sensor for motor responses, connected through an Internet of Medical Things (IoMT) platform. Flex signals were combined using principal component analysis (PCA) to generate a single kinematic channel, then standardized with heart rate. Time-series windows were embedded using TS2Vec and clustered with DBSCAN, while t-SNE was applied only for visualization. The framework identified four physiologically coherent states: (i) nocturnal sleep with the lowest heart rate and minimal motion, (ii) evening pre-sleep with low movement and moderately higher heart rate, (iii) daytime activity with variable motion and mid-range heart rate, and (iv) late-day high-intensity activity with the highest heart rate and increased motor responses. A few outliers were observed during transient body movements or sensor readjustments, which were identified and excluded during preprocessing to ensure stable clustering results. Across 24 h, heart rate ranged from 52 to 96 bpm (mean 77.4), while flexion spanned 0 to 165&amp;amp;deg; (mean 52.5&amp;amp;deg;), showing alignment between movement intensity and cardiac response. This integrated sensing and analytics pipeline provides an interpretable, subject-specific state map that enables continuous remote monitoring of physiological activity and sleep patterns.</description>
	<pubDate>2025-11-17</pubDate>

	<content:encoded><![CDATA[
	<p><b>Signals, Vol. 6, Pages 67: Real-Time Physiological Activity and Sleep State Monitoring System Using TS2Vec Embeddings and DBSCAN Clustering for Heart Rate and Motor Response Analysis in IoMT</b></p>
	<p>Signals <a href="https://www.mdpi.com/2624-6120/6/4/67">doi: 10.3390/signals6040067</a></p>
	<p>Authors:
		Arifin Arifin
		Harmiati Harbi
		Andi Silvia Indriani
		Ida Laila
		Bualkar Abdullah
		 Alridho
		Irfan Idris
		Jalu Ahmad Prakosa
		</p>
	<p>Monitoring physiological activity and sleep states in real time is challenging, particularly for continuous assessment in daily life settings using wearable IoMT devices. We developed a 24 h wearable system that integrates electrocardiogram (ECG) electrodes for heart rate measurement and a glove-mounted flex sensor for motor responses, connected through an Internet of Medical Things (IoMT) platform. Flex signals were combined using principal component analysis (PCA) to generate a single kinematic channel, then standardized with heart rate. Time-series windows were embedded using TS2Vec and clustered with DBSCAN, while t-SNE was applied only for visualization. The framework identified four physiologically coherent states: (i) nocturnal sleep with the lowest heart rate and minimal motion, (ii) evening pre-sleep with low movement and moderately higher heart rate, (iii) daytime activity with variable motion and mid-range heart rate, and (iv) late-day high-intensity activity with the highest heart rate and increased motor responses. A few outliers were observed during transient body movements or sensor readjustments, which were identified and excluded during preprocessing to ensure stable clustering results. Across 24 h, heart rate ranged from 52 to 96 bpm (mean 77.4), while flexion spanned 0 to 165&amp;amp;deg; (mean 52.5&amp;amp;deg;), showing alignment between movement intensity and cardiac response. This integrated sensing and analytics pipeline provides an interpretable, subject-specific state map that enables continuous remote monitoring of physiological activity and sleep patterns.</p>
	]]></content:encoded>

	<dc:title>Real-Time Physiological Activity and Sleep State Monitoring System Using TS2Vec Embeddings and DBSCAN Clustering for Heart Rate and Motor Response Analysis in IoMT</dc:title>
			<dc:creator>Arifin Arifin</dc:creator>
			<dc:creator>Harmiati Harbi</dc:creator>
			<dc:creator>Andi Silvia Indriani</dc:creator>
			<dc:creator>Ida Laila</dc:creator>
			<dc:creator>Bualkar Abdullah</dc:creator>
			<dc:creator> Alridho</dc:creator>
			<dc:creator>Irfan Idris</dc:creator>
			<dc:creator>Jalu Ahmad Prakosa</dc:creator>
		<dc:identifier>doi: 10.3390/signals6040067</dc:identifier>
	<dc:source>Signals</dc:source>
	<dc:date>2025-11-17</dc:date>

	<prism:publicationName>Signals</prism:publicationName>
	<prism:publicationDate>2025-11-17</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>67</prism:startingPage>
		<prism:doi>10.3390/signals6040067</prism:doi>
	<prism:url>https://www.mdpi.com/2624-6120/6/4/67</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-6120/6/4/66">

	<title>Signals, Vol. 6, Pages 66: Radar Foot Gesture Recognition with Hybrid Pruned Lightweight Deep Models</title>
	<link>https://www.mdpi.com/2624-6120/6/4/66</link>
	<description>Foot gesture recognition using a continuous-wave (CW) radar requires implementation on edge hardware with strict latency and memory budgets. Existing structured and unstructured pruning pipelines rely on iterative training&amp;amp;ndash;pruning&amp;amp;ndash;retraining cycles, increasing search costs and making them significantly time-consuming. We propose a NAS-guided bisection hybrid pruning framework on foot gesture recognition from a continuous-wave (CW) radar, which employs a weighted shared supernet encompassing both block and channel options. The method consists of three major steps. In the bisection-guided NAS structured pruning stage, the algorithm identifies the minimum number of retained blocks&amp;amp;mdash;or equivalently, the maximum achievable sparsity&amp;amp;mdash;that satisfies the target accuracy under specified FLOPs and latency constraints. Next, during the hybrid compression phase, a global L1 percentile-based unstructured pruning and channel repacking are applied to further reduce memory usage. Finally, in the low-cost decision protocol stage, each pruning decision is evaluated using short fine-tuning (1&amp;amp;ndash;3 epochs) and partial validation (10&amp;amp;ndash;30% of dataset) to avoid repeated full retraining. We further provide a unified theory for hybrid pruning&amp;amp;mdash;formulating a resource-aware objective, a logit-perturbation invariance bound for unstructured pruning/INT8/repacking, a Hoeffding-based bisection decision margin, and a compression (code-length) generalization bound&amp;amp;mdash;explaining when the compressed models match baseline accuracy while meeting edge budgets. Radar return signals are processed with a short-time Fourier transform (STFT) to generate unique time&amp;amp;ndash;frequency spectrograms for each gesture (kick, swing, slide, tap). The proposed pruning method achieves 20&amp;amp;ndash;57% reductions in floating-point operations (FLOPs) and approximately 86% reductions in parameters, while preserving equivalent recognition accuracy. Experimental results demonstrate that the pruned model maintains high gesture recognition performance with substantially lower computational cost, making it suitable for real-time deployment on edge devices.</description>
	<pubDate>2025-11-13</pubDate>

	<content:encoded><![CDATA[
	<p><b>Signals, Vol. 6, Pages 66: Radar Foot Gesture Recognition with Hybrid Pruned Lightweight Deep Models</b></p>
	<p>Signals <a href="https://www.mdpi.com/2624-6120/6/4/66">doi: 10.3390/signals6040066</a></p>
	<p>Authors:
		Eungang Son
		Seungeon Song
		Bong-Seok Kim
		Sangdong Kim
		Jonghun Lee
		</p>
	<p>Foot gesture recognition using a continuous-wave (CW) radar requires implementation on edge hardware with strict latency and memory budgets. Existing structured and unstructured pruning pipelines rely on iterative training&amp;amp;ndash;pruning&amp;amp;ndash;retraining cycles, increasing search costs and making them significantly time-consuming. We propose a NAS-guided bisection hybrid pruning framework on foot gesture recognition from a continuous-wave (CW) radar, which employs a weighted shared supernet encompassing both block and channel options. The method consists of three major steps. In the bisection-guided NAS structured pruning stage, the algorithm identifies the minimum number of retained blocks&amp;amp;mdash;or equivalently, the maximum achievable sparsity&amp;amp;mdash;that satisfies the target accuracy under specified FLOPs and latency constraints. Next, during the hybrid compression phase, a global L1 percentile-based unstructured pruning and channel repacking are applied to further reduce memory usage. Finally, in the low-cost decision protocol stage, each pruning decision is evaluated using short fine-tuning (1&amp;amp;ndash;3 epochs) and partial validation (10&amp;amp;ndash;30% of dataset) to avoid repeated full retraining. We further provide a unified theory for hybrid pruning&amp;amp;mdash;formulating a resource-aware objective, a logit-perturbation invariance bound for unstructured pruning/INT8/repacking, a Hoeffding-based bisection decision margin, and a compression (code-length) generalization bound&amp;amp;mdash;explaining when the compressed models match baseline accuracy while meeting edge budgets. Radar return signals are processed with a short-time Fourier transform (STFT) to generate unique time&amp;amp;ndash;frequency spectrograms for each gesture (kick, swing, slide, tap). The proposed pruning method achieves 20&amp;amp;ndash;57% reductions in floating-point operations (FLOPs) and approximately 86% reductions in parameters, while preserving equivalent recognition accuracy. Experimental results demonstrate that the pruned model maintains high gesture recognition performance with substantially lower computational cost, making it suitable for real-time deployment on edge devices.</p>
	]]></content:encoded>

	<dc:title>Radar Foot Gesture Recognition with Hybrid Pruned Lightweight Deep Models</dc:title>
			<dc:creator>Eungang Son</dc:creator>
			<dc:creator>Seungeon Song</dc:creator>
			<dc:creator>Bong-Seok Kim</dc:creator>
			<dc:creator>Sangdong Kim</dc:creator>
			<dc:creator>Jonghun Lee</dc:creator>
		<dc:identifier>doi: 10.3390/signals6040066</dc:identifier>
	<dc:source>Signals</dc:source>
	<dc:date>2025-11-13</dc:date>

	<prism:publicationName>Signals</prism:publicationName>
	<prism:publicationDate>2025-11-13</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>66</prism:startingPage>
		<prism:doi>10.3390/signals6040066</prism:doi>
	<prism:url>https://www.mdpi.com/2624-6120/6/4/66</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-6120/6/4/65">

	<title>Signals, Vol. 6, Pages 65: Universal Digital Calibration of Mismatched DACs: Enabling Sub-0.02 mm2 Area with Redundancy and Segmented Correction</title>
	<link>https://www.mdpi.com/2624-6120/6/4/65</link>
	<description>This paper presents a novel methodology for the design and calibration of ultra-compact digital-to-analog converters (DACs), integrating architectural redundancy and a digital calibration algorithm. The proposed calibration approach generates pre-distortion codes that correct both positive and negative nonlinearity errors, even in designs with severe mismatch or relaxed layout constraints. This enables the use of aggressively scaled devices while maintaining high linearity and spectral fidelity. The algorithm is architecture-agnostic and compatible with resistor-string, current-steering, and hybrid DAC structures. It operates with minimal memory, low latency, and supports both foreground and background calibration modes. The method is validated through simulation and silicon measurement of three 14-bit DAC architectures fabricated in TSMC 180 nm CMOS. Post-calibration results demonstrate linearity within &amp;amp;plusmn;0.5&amp;amp;ndash;1.2 LSB, ENOB up to 13.8 bits, and significant improvements in SNR, SFDR, and THD. The compact layouts&amp;amp;mdash;occupying as little as 0.0169 mm2&amp;amp;mdash;highlight the scalability of the proposed method for applications such as analog AI accelerators and high-density mixed-signal SoCs.</description>
	<pubDate>2025-11-12</pubDate>

	<content:encoded><![CDATA[
	<p><b>Signals, Vol. 6, Pages 65: Universal Digital Calibration of Mismatched DACs: Enabling Sub-0.02 mm2 Area with Redundancy and Segmented Correction</b></p>
	<p>Signals <a href="https://www.mdpi.com/2624-6120/6/4/65">doi: 10.3390/signals6040065</a></p>
	<p>Authors:
		Ekaniyere Oko-Odion
		Isaac Bruce
		Emmanuel Nti Darko
		Matthew Crabb
		Degang Chen
		</p>
	<p>This paper presents a novel methodology for the design and calibration of ultra-compact digital-to-analog converters (DACs), integrating architectural redundancy and a digital calibration algorithm. The proposed calibration approach generates pre-distortion codes that correct both positive and negative nonlinearity errors, even in designs with severe mismatch or relaxed layout constraints. This enables the use of aggressively scaled devices while maintaining high linearity and spectral fidelity. The algorithm is architecture-agnostic and compatible with resistor-string, current-steering, and hybrid DAC structures. It operates with minimal memory, low latency, and supports both foreground and background calibration modes. The method is validated through simulation and silicon measurement of three 14-bit DAC architectures fabricated in TSMC 180 nm CMOS. Post-calibration results demonstrate linearity within &amp;amp;plusmn;0.5&amp;amp;ndash;1.2 LSB, ENOB up to 13.8 bits, and significant improvements in SNR, SFDR, and THD. The compact layouts&amp;amp;mdash;occupying as little as 0.0169 mm2&amp;amp;mdash;highlight the scalability of the proposed method for applications such as analog AI accelerators and high-density mixed-signal SoCs.</p>
	]]></content:encoded>

	<dc:title>Universal Digital Calibration of Mismatched DACs: Enabling Sub-0.02 mm2 Area with Redundancy and Segmented Correction</dc:title>
			<dc:creator>Ekaniyere Oko-Odion</dc:creator>
			<dc:creator>Isaac Bruce</dc:creator>
			<dc:creator>Emmanuel Nti Darko</dc:creator>
			<dc:creator>Matthew Crabb</dc:creator>
			<dc:creator>Degang Chen</dc:creator>
		<dc:identifier>doi: 10.3390/signals6040065</dc:identifier>
	<dc:source>Signals</dc:source>
	<dc:date>2025-11-12</dc:date>

	<prism:publicationName>Signals</prism:publicationName>
	<prism:publicationDate>2025-11-12</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>65</prism:startingPage>
		<prism:doi>10.3390/signals6040065</prism:doi>
	<prism:url>https://www.mdpi.com/2624-6120/6/4/65</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-6120/6/4/64">

	<title>Signals, Vol. 6, Pages 64: Augmented Gait Classification: Integrating YOLO, CNN&amp;ndash;SNN Hybridization, and GAN Synthesis for Knee Osteoarthritis and Parkinson&amp;rsquo;s Disease</title>
	<link>https://www.mdpi.com/2624-6120/6/4/64</link>
	<description>We propose a novel hybrid deep learning framework that synergistically integrates Convolutional Neural Networks (CNNs), Spiking Neural Networks (SNNs), and Generative Adversarial Networks (GANs) for robust and accurate classification of high-resolution frontal and sagittal human gait video sequences&amp;amp;mdash;capturing both lower-limb kinematics and upper-body posture&amp;amp;mdash;from subjects with Knee Osteoarthritis (KOA), Parkinson&amp;amp;rsquo;s Disease (PD), and healthy Normal (NM) controls, classified into three disease-type categories. Our approach first employs a tailored CNN backbone to extract rich spatial features from fixed-length clips (e.g., 16 frames resized to 128 &amp;amp;times; 128 px), which are then temporally encoded and processed by an SNN layer to capture dynamic gait patterns. To address class imbalance and enhance generalization, a conditional GAN augments rare severity classes with realistic synthetic gait sequences. Evaluated on the controlled, marker-based KOA-PD-NM laboratory public dataset, our model achieves an overall accuracy of 99.47%, a sensitivity of 98.4%, a specificity of 99.0%, and an F1-score of 98.6%, outperforming baseline CNN, SNN, and CNN&amp;amp;ndash;SNN configurations by over 2.5% in accuracy and 3.1% in F1-score. Ablation studies confirm that GAN-based augmentation yields a 1.9% accuracy gain, while the SNN layer provides critical temporal robustness. Our findings demonstrate that this CNN&amp;amp;ndash;SNN&amp;amp;ndash;GAN paradigm offers a powerful, computationally efficient solution for high-precision, gait-based disease classification, achieving a 48.4% reduction in FLOPs (1.82 GFLOPs to 0.94 GFLOPs) and 9.2% lower average power consumption (68.4 W to 62.1 W) on Kaggle P100 GPU compared to CNN-only baselines. The hybrid model demonstrates significant potential for energy savings on neuromorphic hardware, with an estimated 13.2% reduction in energy per inference based on FLOP-based analysis, positioning it favorably for deployment in resource-constrained clinical environments and edge computing scenarios.</description>
	<pubDate>2025-11-07</pubDate>

	<content:encoded><![CDATA[
	<p><b>Signals, Vol. 6, Pages 64: Augmented Gait Classification: Integrating YOLO, CNN&amp;ndash;SNN Hybridization, and GAN Synthesis for Knee Osteoarthritis and Parkinson&amp;rsquo;s Disease</b></p>
	<p>Signals <a href="https://www.mdpi.com/2624-6120/6/4/64">doi: 10.3390/signals6040064</a></p>
	<p>Authors:
		Houmem Slimi
		Ala Balti
		Mounir Sayadi
		Mohamed Moncef Ben Khelifa
		</p>
	<p>We propose a novel hybrid deep learning framework that synergistically integrates Convolutional Neural Networks (CNNs), Spiking Neural Networks (SNNs), and Generative Adversarial Networks (GANs) for robust and accurate classification of high-resolution frontal and sagittal human gait video sequences&amp;amp;mdash;capturing both lower-limb kinematics and upper-body posture&amp;amp;mdash;from subjects with Knee Osteoarthritis (KOA), Parkinson&amp;amp;rsquo;s Disease (PD), and healthy Normal (NM) controls, classified into three disease-type categories. Our approach first employs a tailored CNN backbone to extract rich spatial features from fixed-length clips (e.g., 16 frames resized to 128 &amp;amp;times; 128 px), which are then temporally encoded and processed by an SNN layer to capture dynamic gait patterns. To address class imbalance and enhance generalization, a conditional GAN augments rare severity classes with realistic synthetic gait sequences. Evaluated on the controlled, marker-based KOA-PD-NM laboratory public dataset, our model achieves an overall accuracy of 99.47%, a sensitivity of 98.4%, a specificity of 99.0%, and an F1-score of 98.6%, outperforming baseline CNN, SNN, and CNN&amp;amp;ndash;SNN configurations by over 2.5% in accuracy and 3.1% in F1-score. Ablation studies confirm that GAN-based augmentation yields a 1.9% accuracy gain, while the SNN layer provides critical temporal robustness. Our findings demonstrate that this CNN&amp;amp;ndash;SNN&amp;amp;ndash;GAN paradigm offers a powerful, computationally efficient solution for high-precision, gait-based disease classification, achieving a 48.4% reduction in FLOPs (1.82 GFLOPs to 0.94 GFLOPs) and 9.2% lower average power consumption (68.4 W to 62.1 W) on Kaggle P100 GPU compared to CNN-only baselines. The hybrid model demonstrates significant potential for energy savings on neuromorphic hardware, with an estimated 13.2% reduction in energy per inference based on FLOP-based analysis, positioning it favorably for deployment in resource-constrained clinical environments and edge computing scenarios.</p>
	]]></content:encoded>

	<dc:title>Augmented Gait Classification: Integrating YOLO, CNN&amp;amp;ndash;SNN Hybridization, and GAN Synthesis for Knee Osteoarthritis and Parkinson&amp;amp;rsquo;s Disease</dc:title>
			<dc:creator>Houmem Slimi</dc:creator>
			<dc:creator>Ala Balti</dc:creator>
			<dc:creator>Mounir Sayadi</dc:creator>
			<dc:creator>Mohamed Moncef Ben Khelifa</dc:creator>
		<dc:identifier>doi: 10.3390/signals6040064</dc:identifier>
	<dc:source>Signals</dc:source>
	<dc:date>2025-11-07</dc:date>

	<prism:publicationName>Signals</prism:publicationName>
	<prism:publicationDate>2025-11-07</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>64</prism:startingPage>
		<prism:doi>10.3390/signals6040064</prism:doi>
	<prism:url>https://www.mdpi.com/2624-6120/6/4/64</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-6120/6/4/63">

	<title>Signals, Vol. 6, Pages 63: Research on Small Dataset Object Detection Algorithm Based on Hierarchically Deployed Attention Mechanisms</title>
	<link>https://www.mdpi.com/2624-6120/6/4/63</link>
	<description>To address the demand for lightweight, high-precision, real-time, and low-computation detection of targets with limited samples&amp;amp;mdash;such as laboratory instruments in portable AR devices&amp;amp;mdash;this paper proposes a small dataset object detection algorithm based on a hierarchically deployed attention mechanism. The algorithm adopts Rep-YOLOv8 as its backbone. First, an ECA channel attention mechanism is incorporated into the backbone network to extract image features and adaptively adjust channel weights, improving performance with only a minor increase in parameters. Second, a CBAM-spatial module is integrated to enhance region-specific features for small dataset objects, highlighting target characteristics and suppressing irrelevant background noise. Then, in the neck network, the SE attention module is replaced with an eSE attention module to prevent channel information loss caused by dimensional changes. Experiments conducted on both open-source and self-constructed small datasets show that the proposed hierarchical Rep-YOLOv8 model effectively meets the requirements of lightweight design, real-time processing, high accuracy, and low computational cost. On the self-built small dataset, the model achieves a mAP@0.5 of 0.971 across 17 categories, outperforming the baseline Rep-YOLOv8 (0.871) by 11.5%, demonstrating effective recognition and segmentation capability for small dataset objects.</description>
	<pubDate>2025-11-04</pubDate>

	<content:encoded><![CDATA[
	<p><b>Signals, Vol. 6, Pages 63: Research on Small Dataset Object Detection Algorithm Based on Hierarchically Deployed Attention Mechanisms</b></p>
	<p>Signals <a href="https://www.mdpi.com/2624-6120/6/4/63">doi: 10.3390/signals6040063</a></p>
	<p>Authors:
		Yonggang Zhao
		Jiongming Lu
		Jixia Xu
		Jiechu Miu
		Hangbo Hua
		</p>
	<p>To address the demand for lightweight, high-precision, real-time, and low-computation detection of targets with limited samples&amp;amp;mdash;such as laboratory instruments in portable AR devices&amp;amp;mdash;this paper proposes a small dataset object detection algorithm based on a hierarchically deployed attention mechanism. The algorithm adopts Rep-YOLOv8 as its backbone. First, an ECA channel attention mechanism is incorporated into the backbone network to extract image features and adaptively adjust channel weights, improving performance with only a minor increase in parameters. Second, a CBAM-spatial module is integrated to enhance region-specific features for small dataset objects, highlighting target characteristics and suppressing irrelevant background noise. Then, in the neck network, the SE attention module is replaced with an eSE attention module to prevent channel information loss caused by dimensional changes. Experiments conducted on both open-source and self-constructed small datasets show that the proposed hierarchical Rep-YOLOv8 model effectively meets the requirements of lightweight design, real-time processing, high accuracy, and low computational cost. On the self-built small dataset, the model achieves a mAP@0.5 of 0.971 across 17 categories, outperforming the baseline Rep-YOLOv8 (0.871) by 11.5%, demonstrating effective recognition and segmentation capability for small dataset objects.</p>
	]]></content:encoded>

	<dc:title>Research on Small Dataset Object Detection Algorithm Based on Hierarchically Deployed Attention Mechanisms</dc:title>
			<dc:creator>Yonggang Zhao</dc:creator>
			<dc:creator>Jiongming Lu</dc:creator>
			<dc:creator>Jixia Xu</dc:creator>
			<dc:creator>Jiechu Miu</dc:creator>
			<dc:creator>Hangbo Hua</dc:creator>
		<dc:identifier>doi: 10.3390/signals6040063</dc:identifier>
	<dc:source>Signals</dc:source>
	<dc:date>2025-11-04</dc:date>

	<prism:publicationName>Signals</prism:publicationName>
	<prism:publicationDate>2025-11-04</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>63</prism:startingPage>
		<prism:doi>10.3390/signals6040063</prism:doi>
	<prism:url>https://www.mdpi.com/2624-6120/6/4/63</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-6120/6/4/62">

	<title>Signals, Vol. 6, Pages 62: Explainable AI-Based Clinical Signal Analysis for Myocardial Infarction Classification and Risk Factor Interpretation</title>
	<link>https://www.mdpi.com/2624-6120/6/4/62</link>
	<description>Myocardial infarction (MI) remains one of the most critical causes of death worldwide, demanding predictive models that balance accuracy with clinical interpretability. This study introduces an explainable artificial intelligence (XAI) framework that integrates least absolute shrinkage and selection operator (LASSO) regression for feature selection, logistic regression for prediction, and Shapley additive explanations (SHAP) for interpretability. Using a dataset of 918 patients and 12 signal-derived clinical variables, the model achieved an accuracy of 87.7%, a recall of 0.87, and an F1 score of 0.89, confirming its robust performance. The key risk factors identified were age, fasting blood sugar, ST depression, flat ST slope, and exercise-induced angina, while the maximum heart rate and upward ST slope served as protective factors. Comparative analyses showed that the SHAP and p-value methods largely aligned, consistently highlighting ST_Slope_Flat and ExerciseAngina_Y, though discrepancies emerged for ST_Slope_Up, which showed limited statistical significance but high SHAP contribution. By combining predictive strength with transparent interpretation, this study addresses the black-box limitations of conventional models and offers actionable insights for clinicians. The findings highlight the potential of signal-driven XAI approaches to improve early detection and patient-centered prevention of MI. Future work should validate these models on larger and more diverse datasets to enhance generalizability and clinical adoption.</description>
	<pubDate>2025-11-04</pubDate>

	<content:encoded><![CDATA[
	<p><b>Signals, Vol. 6, Pages 62: Explainable AI-Based Clinical Signal Analysis for Myocardial Infarction Classification and Risk Factor Interpretation</b></p>
	<p>Signals <a href="https://www.mdpi.com/2624-6120/6/4/62">doi: 10.3390/signals6040062</a></p>
	<p>Authors:
		Ji-Yeong Jang
		Ji-Na Lee
		Ji-Hye Park
		Ji-Yeoun Lee
		</p>
	<p>Myocardial infarction (MI) remains one of the most critical causes of death worldwide, demanding predictive models that balance accuracy with clinical interpretability. This study introduces an explainable artificial intelligence (XAI) framework that integrates least absolute shrinkage and selection operator (LASSO) regression for feature selection, logistic regression for prediction, and Shapley additive explanations (SHAP) for interpretability. Using a dataset of 918 patients and 12 signal-derived clinical variables, the model achieved an accuracy of 87.7%, a recall of 0.87, and an F1 score of 0.89, confirming its robust performance. The key risk factors identified were age, fasting blood sugar, ST depression, flat ST slope, and exercise-induced angina, while the maximum heart rate and upward ST slope served as protective factors. Comparative analyses showed that the SHAP and p-value methods largely aligned, consistently highlighting ST_Slope_Flat and ExerciseAngina_Y, though discrepancies emerged for ST_Slope_Up, which showed limited statistical significance but high SHAP contribution. By combining predictive strength with transparent interpretation, this study addresses the black-box limitations of conventional models and offers actionable insights for clinicians. The findings highlight the potential of signal-driven XAI approaches to improve early detection and patient-centered prevention of MI. Future work should validate these models on larger and more diverse datasets to enhance generalizability and clinical adoption.</p>
	]]></content:encoded>

	<dc:title>Explainable AI-Based Clinical Signal Analysis for Myocardial Infarction Classification and Risk Factor Interpretation</dc:title>
			<dc:creator>Ji-Yeong Jang</dc:creator>
			<dc:creator>Ji-Na Lee</dc:creator>
			<dc:creator>Ji-Hye Park</dc:creator>
			<dc:creator>Ji-Yeoun Lee</dc:creator>
		<dc:identifier>doi: 10.3390/signals6040062</dc:identifier>
	<dc:source>Signals</dc:source>
	<dc:date>2025-11-04</dc:date>

	<prism:publicationName>Signals</prism:publicationName>
	<prism:publicationDate>2025-11-04</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>62</prism:startingPage>
		<prism:doi>10.3390/signals6040062</prism:doi>
	<prism:url>https://www.mdpi.com/2624-6120/6/4/62</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-6120/6/4/61">

	<title>Signals, Vol. 6, Pages 61: Non-Invasive Techniques for fECG Analysis in Fetal Heart Monitoring: A Systematic Review</title>
	<link>https://www.mdpi.com/2624-6120/6/4/61</link>
	<description>An electrocardiogram (ECG) is a vital diagnostic tool that provides crucial insights into the heart rate, cardiac positioning, origin of electrical potentials, propagation of depolarization waves, and the identification of rhythm and conduction irregularities. Analysis of ECG is essential, especially during pregnancy, where monitoring fetal health is critical. Fetal electrocardiography (fECG) has emerged as a significant modality for evaluating the developmental status and well-being of the fetal heart throughout gestation, facilitating early detection of congenital heart diseases (CHDs) and other cardiac abnormalities. Typically, fECG signals are acquired non-invasively through electrodes placed on the maternal abdomen, which reduces risk and enhances user convenience. However, these signals are often contaminated via various sources, including maternal electrocardiogram (mECG), electromagnetic interference from power lines, baseline drift, motion artifacts, uterine contractions, and high-frequency noise. Such disturbances impair signal fidelity and threaten diagnostic accuracy. This scoping review adhering to PRISMA-ScR guidelines aims to highlight the methods for signal acquisition, existing databases for validation, and a range of algorithms proposed by researchers for improving the quality of fECG. A comprehensive examination of 157,000 uniquely identified publications from Google Scholar, PubMed, and Web of Science have resulted in the selection of 6210 records through a systematic screening of titles, abstracts, and keywords. Subsequently, 141 full-text articles were considered eligible for inclusion in this study (from 1950 to 2026). By critically evaluating established techniques in the current literature, a strategy is proposed for analyzing fECG and calculating heart rate variability (HRV) for identifying fetal heart-related abnormalities. Advances in these methodologies could significantly aid in the diagnosis of fetal heart diseases, assisting timely clinical interventions and prevention.</description>
	<pubDate>2025-11-04</pubDate>

	<content:encoded><![CDATA[
	<p><b>Signals, Vol. 6, Pages 61: Non-Invasive Techniques for fECG Analysis in Fetal Heart Monitoring: A Systematic Review</b></p>
	<p>Signals <a href="https://www.mdpi.com/2624-6120/6/4/61">doi: 10.3390/signals6040061</a></p>
	<p>Authors:
		Sanghamitra Subhadarsini Dash
		Malaya Kumar Nath
		</p>
	<p>An electrocardiogram (ECG) is a vital diagnostic tool that provides crucial insights into the heart rate, cardiac positioning, origin of electrical potentials, propagation of depolarization waves, and the identification of rhythm and conduction irregularities. Analysis of ECG is essential, especially during pregnancy, where monitoring fetal health is critical. Fetal electrocardiography (fECG) has emerged as a significant modality for evaluating the developmental status and well-being of the fetal heart throughout gestation, facilitating early detection of congenital heart diseases (CHDs) and other cardiac abnormalities. Typically, fECG signals are acquired non-invasively through electrodes placed on the maternal abdomen, which reduces risk and enhances user convenience. However, these signals are often contaminated via various sources, including maternal electrocardiogram (mECG), electromagnetic interference from power lines, baseline drift, motion artifacts, uterine contractions, and high-frequency noise. Such disturbances impair signal fidelity and threaten diagnostic accuracy. This scoping review adhering to PRISMA-ScR guidelines aims to highlight the methods for signal acquisition, existing databases for validation, and a range of algorithms proposed by researchers for improving the quality of fECG. A comprehensive examination of 157,000 uniquely identified publications from Google Scholar, PubMed, and Web of Science have resulted in the selection of 6210 records through a systematic screening of titles, abstracts, and keywords. Subsequently, 141 full-text articles were considered eligible for inclusion in this study (from 1950 to 2026). By critically evaluating established techniques in the current literature, a strategy is proposed for analyzing fECG and calculating heart rate variability (HRV) for identifying fetal heart-related abnormalities. Advances in these methodologies could significantly aid in the diagnosis of fetal heart diseases, assisting timely clinical interventions and prevention.</p>
	]]></content:encoded>

	<dc:title>Non-Invasive Techniques for fECG Analysis in Fetal Heart Monitoring: A Systematic Review</dc:title>
			<dc:creator>Sanghamitra Subhadarsini Dash</dc:creator>
			<dc:creator>Malaya Kumar Nath</dc:creator>
		<dc:identifier>doi: 10.3390/signals6040061</dc:identifier>
	<dc:source>Signals</dc:source>
	<dc:date>2025-11-04</dc:date>

	<prism:publicationName>Signals</prism:publicationName>
	<prism:publicationDate>2025-11-04</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Review</prism:section>
	<prism:startingPage>61</prism:startingPage>
		<prism:doi>10.3390/signals6040061</prism:doi>
	<prism:url>https://www.mdpi.com/2624-6120/6/4/61</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-6120/6/4/60">

	<title>Signals, Vol. 6, Pages 60: Smoke Detection on the Edge: A Comparative Study of YOLO Algorithm Variants</title>
	<link>https://www.mdpi.com/2624-6120/6/4/60</link>
	<description>The early detection of smoke signals due to wildfires is vital in containing the extent of loss and reducing response time, particularly in inaccessible or forested areas. For lightweight object detection, this study contrasts the YOLOv9-tiny, YOLOv10-nano, YOLOv11-nano, YOLOv12-nano, and YOLOv13-nano algorithms in determining wildfire smoke at extended ranges. We present a robustness- and generalization-checking five-fold cross-validation. This study is also the first of its kind to train and publicly benchmark YOLOv10-nano up to YOLOv13-nano on the given dataset. We investigate and compare the detection performance against the standard performance metrics of precision, recall, F1-score, and mAP50, as well as the performance metrics regarding computational efficiency, including the training and testing time. Our results offer practical implications regarding the trade-off between pre-processing methods and model architectures for smoke detection when applied in real time on ground-based cameras installed on mountains and other high-risk fire locations. The investigation presented in this work provides a model in which implementations of lightweight deep learning models for wildfire early-warning systems can be achieved.</description>
	<pubDate>2025-11-04</pubDate>

	<content:encoded><![CDATA[
	<p><b>Signals, Vol. 6, Pages 60: Smoke Detection on the Edge: A Comparative Study of YOLO Algorithm Variants</b></p>
	<p>Signals <a href="https://www.mdpi.com/2624-6120/6/4/60">doi: 10.3390/signals6040060</a></p>
	<p>Authors:
		Iosif Polenakis
		Christos Sarantidis
		Ioannis Karydis
		Markos Avlonitis
		</p>
	<p>The early detection of smoke signals due to wildfires is vital in containing the extent of loss and reducing response time, particularly in inaccessible or forested areas. For lightweight object detection, this study contrasts the YOLOv9-tiny, YOLOv10-nano, YOLOv11-nano, YOLOv12-nano, and YOLOv13-nano algorithms in determining wildfire smoke at extended ranges. We present a robustness- and generalization-checking five-fold cross-validation. This study is also the first of its kind to train and publicly benchmark YOLOv10-nano up to YOLOv13-nano on the given dataset. We investigate and compare the detection performance against the standard performance metrics of precision, recall, F1-score, and mAP50, as well as the performance metrics regarding computational efficiency, including the training and testing time. Our results offer practical implications regarding the trade-off between pre-processing methods and model architectures for smoke detection when applied in real time on ground-based cameras installed on mountains and other high-risk fire locations. The investigation presented in this work provides a model in which implementations of lightweight deep learning models for wildfire early-warning systems can be achieved.</p>
	]]></content:encoded>

	<dc:title>Smoke Detection on the Edge: A Comparative Study of YOLO Algorithm Variants</dc:title>
			<dc:creator>Iosif Polenakis</dc:creator>
			<dc:creator>Christos Sarantidis</dc:creator>
			<dc:creator>Ioannis Karydis</dc:creator>
			<dc:creator>Markos Avlonitis</dc:creator>
		<dc:identifier>doi: 10.3390/signals6040060</dc:identifier>
	<dc:source>Signals</dc:source>
	<dc:date>2025-11-04</dc:date>

	<prism:publicationName>Signals</prism:publicationName>
	<prism:publicationDate>2025-11-04</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>60</prism:startingPage>
		<prism:doi>10.3390/signals6040060</prism:doi>
	<prism:url>https://www.mdpi.com/2624-6120/6/4/60</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-6120/6/4/59">

	<title>Signals, Vol. 6, Pages 59: Why Partitioning Matters: Revealing Overestimated Performance in WiFi-CSI-Based Human Action Recognition</title>
	<link>https://www.mdpi.com/2624-6120/6/4/59</link>
	<description>Human action recognition (HAR) based on WiFi channel state information (CSI) has attracted growing attention due to its contactless, privacy-preserving, and cost-effective nature. Recent studies have reported promising results by leveraging deep learning and image-based representations of CSI. However, methodological flaws in experimental protocols, particularly improper dataset partitioning, can lead to data leakage and significantly overestimate model performance. In this paper, we critically analyze a recently published WiFi-CSI-based HAR approach that converts CSI measurements into images and applies deep learning for classification. We show that the original evaluation relied on random data splitting without subject separation, causing substantial data leakage and inflated results. To address this, we reimplemented the method using subject-independent partitioning, which provides a realistic assessment of generalization ability. Furthermore, we conduct a quantitative study of post-training quantization under both correct and flawed partitioning strategies, revealing that methodological errors can conceal the true performance degradation of compressed models. Our findings demonstrate that evaluation protocols strongly influence reported outcomes, not only for baseline models but also for engineering decisions regarding model optimization and deployment. Based on these insights, we provide guidelines for designing robust experimental protocols in WiFi-CSI-based HAR to ensure methodological integrity and reproducibility.</description>
	<pubDate>2025-10-26</pubDate>

	<content:encoded><![CDATA[
	<p><b>Signals, Vol. 6, Pages 59: Why Partitioning Matters: Revealing Overestimated Performance in WiFi-CSI-Based Human Action Recognition</b></p>
	<p>Signals <a href="https://www.mdpi.com/2624-6120/6/4/59">doi: 10.3390/signals6040059</a></p>
	<p>Authors:
		Domonkos Varga
		An Quynh Cao
		</p>
	<p>Human action recognition (HAR) based on WiFi channel state information (CSI) has attracted growing attention due to its contactless, privacy-preserving, and cost-effective nature. Recent studies have reported promising results by leveraging deep learning and image-based representations of CSI. However, methodological flaws in experimental protocols, particularly improper dataset partitioning, can lead to data leakage and significantly overestimate model performance. In this paper, we critically analyze a recently published WiFi-CSI-based HAR approach that converts CSI measurements into images and applies deep learning for classification. We show that the original evaluation relied on random data splitting without subject separation, causing substantial data leakage and inflated results. To address this, we reimplemented the method using subject-independent partitioning, which provides a realistic assessment of generalization ability. Furthermore, we conduct a quantitative study of post-training quantization under both correct and flawed partitioning strategies, revealing that methodological errors can conceal the true performance degradation of compressed models. Our findings demonstrate that evaluation protocols strongly influence reported outcomes, not only for baseline models but also for engineering decisions regarding model optimization and deployment. Based on these insights, we provide guidelines for designing robust experimental protocols in WiFi-CSI-based HAR to ensure methodological integrity and reproducibility.</p>
	]]></content:encoded>

	<dc:title>Why Partitioning Matters: Revealing Overestimated Performance in WiFi-CSI-Based Human Action Recognition</dc:title>
			<dc:creator>Domonkos Varga</dc:creator>
			<dc:creator>An Quynh Cao</dc:creator>
		<dc:identifier>doi: 10.3390/signals6040059</dc:identifier>
	<dc:source>Signals</dc:source>
	<dc:date>2025-10-26</dc:date>

	<prism:publicationName>Signals</prism:publicationName>
	<prism:publicationDate>2025-10-26</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>59</prism:startingPage>
		<prism:doi>10.3390/signals6040059</prism:doi>
	<prism:url>https://www.mdpi.com/2624-6120/6/4/59</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-6120/6/4/58">

	<title>Signals, Vol. 6, Pages 58: Analyzing Shortwave Propagation with a Remote Accessible Software-Defined Ham Radio System</title>
	<link>https://www.mdpi.com/2624-6120/6/4/58</link>
	<description>Ham radio has long been a foundational area of practice in electrical engineering. Advances in signal processing, particularly the advent of software-defined radio (SDR), have revolutionized the field, offering new possibilities and modes of operation. This paper introduces a system designed for long-term collection of shortwave propagation data, leveraging SDR technology. It also presents the analysis of the collected data, demonstrating the system&amp;amp;rsquo;s potential for advancing research in radio wave propagation.</description>
	<pubDate>2025-10-26</pubDate>

	<content:encoded><![CDATA[
	<p><b>Signals, Vol. 6, Pages 58: Analyzing Shortwave Propagation with a Remote Accessible Software-Defined Ham Radio System</b></p>
	<p>Signals <a href="https://www.mdpi.com/2624-6120/6/4/58">doi: 10.3390/signals6040058</a></p>
	<p>Authors:
		Gergely Vakulya
		Helga Anna Albert-Huszár
		</p>
	<p>Ham radio has long been a foundational area of practice in electrical engineering. Advances in signal processing, particularly the advent of software-defined radio (SDR), have revolutionized the field, offering new possibilities and modes of operation. This paper introduces a system designed for long-term collection of shortwave propagation data, leveraging SDR technology. It also presents the analysis of the collected data, demonstrating the system&amp;amp;rsquo;s potential for advancing research in radio wave propagation.</p>
	]]></content:encoded>

	<dc:title>Analyzing Shortwave Propagation with a Remote Accessible Software-Defined Ham Radio System</dc:title>
			<dc:creator>Gergely Vakulya</dc:creator>
			<dc:creator>Helga Anna Albert-Huszár</dc:creator>
		<dc:identifier>doi: 10.3390/signals6040058</dc:identifier>
	<dc:source>Signals</dc:source>
	<dc:date>2025-10-26</dc:date>

	<prism:publicationName>Signals</prism:publicationName>
	<prism:publicationDate>2025-10-26</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>58</prism:startingPage>
		<prism:doi>10.3390/signals6040058</prism:doi>
	<prism:url>https://www.mdpi.com/2624-6120/6/4/58</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-6120/6/4/57">

	<title>Signals, Vol. 6, Pages 57: ROC Calculation for Burst Traffic Packet Detection&amp;mdash;An Old Problem, Newly Revised</title>
	<link>https://www.mdpi.com/2624-6120/6/4/57</link>
	<description>Burst traffic radio systems use short signal bursts, which are prepended with an a priori known preamble sequence. The burst receivers exploit these preamble sequences for burst start detection. The process of burst start detection is commonly known as Packet Detection (PD), which employs preamble sequence cross-correlation and threshold detection. One major figure of merit for PD performance is the so-called ROC&amp;amp;mdash;receiver operating characteristics. ROC describes the trade-off between the probability of missed detection vs. the probability of false alarm. This article describes how to calculate the ROC for specified preamble sequences by deriving the probability density function (PDF) of the cross-correlation metric. We address this long-standing problem in the context of LEO (low Earth orbit) satellite systems, where differentially modulated PN (pseudo-noise) sequences are used for packet detection. For this particular class of preamble signals, the standard Ricean PDF assumption no longer holds and needs to be revised accordingly within this article.</description>
	<pubDate>2025-10-23</pubDate>

	<content:encoded><![CDATA[
	<p><b>Signals, Vol. 6, Pages 57: ROC Calculation for Burst Traffic Packet Detection&amp;mdash;An Old Problem, Newly Revised</b></p>
	<p>Signals <a href="https://www.mdpi.com/2624-6120/6/4/57">doi: 10.3390/signals6040057</a></p>
	<p>Authors:
		Marco Krondorf
		</p>
	<p>Burst traffic radio systems use short signal bursts, which are prepended with an a priori known preamble sequence. The burst receivers exploit these preamble sequences for burst start detection. The process of burst start detection is commonly known as Packet Detection (PD), which employs preamble sequence cross-correlation and threshold detection. One major figure of merit for PD performance is the so-called ROC&amp;amp;mdash;receiver operating characteristics. ROC describes the trade-off between the probability of missed detection vs. the probability of false alarm. This article describes how to calculate the ROC for specified preamble sequences by deriving the probability density function (PDF) of the cross-correlation metric. We address this long-standing problem in the context of LEO (low Earth orbit) satellite systems, where differentially modulated PN (pseudo-noise) sequences are used for packet detection. For this particular class of preamble signals, the standard Ricean PDF assumption no longer holds and needs to be revised accordingly within this article.</p>
	]]></content:encoded>

	<dc:title>ROC Calculation for Burst Traffic Packet Detection&amp;amp;mdash;An Old Problem, Newly Revised</dc:title>
			<dc:creator>Marco Krondorf</dc:creator>
		<dc:identifier>doi: 10.3390/signals6040057</dc:identifier>
	<dc:source>Signals</dc:source>
	<dc:date>2025-10-23</dc:date>

	<prism:publicationName>Signals</prism:publicationName>
	<prism:publicationDate>2025-10-23</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>57</prism:startingPage>
		<prism:doi>10.3390/signals6040057</prism:doi>
	<prism:url>https://www.mdpi.com/2624-6120/6/4/57</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-6120/6/4/56">

	<title>Signals, Vol. 6, Pages 56: A Fuzzy Model for Predicting the Group and Phase Velocities of Circumferential Waves Based on Subtractive Clustering</title>
	<link>https://www.mdpi.com/2624-6120/6/4/56</link>
	<description>Acoustic scattering is a highly effective tool for non-destructive control and structural analysis. In many real-world applications, understanding acoustic scattering is essential for accurately detecting and characterizing defects, assessing material properties, and evaluating structural integrity without causing damage. One of the most critical aspects of characterizing targets&amp;amp;mdash;such as plates, cylinders, and tubes immersed in water&amp;amp;mdash;is the analysis of the phase and group velocities of antisymmetric circumferential waves (A1). Phase velocity helps identify and characterize wave modes, while group velocity allows for tracking energy, detecting, and locating anomalies. Together, they are essential for monitoring and diagnosing cylindrical shells. This research employs a Sugeno fuzzy inference system (SFIS) combined with a Fuzzy Subtractive Clustering (FSC) identification technique to predict the velocities of antisymmetric (A1) circumferential signals propagating around an infinitely long cylindrical shell of different b/a radius ratios, where a is the outer radius, and b is the inner radius. These circumferential waves are generated when the shell is excited perpendicularly to its axis by a plane wave. Phase and group velocities are determined by using resonance eigenmode theory, and these results are used as training and testing data for the fuzzy model. The proposed approach demonstrates high accuracy in modeling and predicting the behavior of these circumferential waves. The fuzzy model&amp;amp;rsquo;s predictions show excellent agreement with the theoretical results, as confirmed by multiple error metrics, including the Mean Absolute Error (MAE), Standard Error (SE), and Mean Relative Error (MRE).</description>
	<pubDate>2025-10-16</pubDate>

	<content:encoded><![CDATA[
	<p><b>Signals, Vol. 6, Pages 56: A Fuzzy Model for Predicting the Group and Phase Velocities of Circumferential Waves Based on Subtractive Clustering</b></p>
	<p>Signals <a href="https://www.mdpi.com/2624-6120/6/4/56">doi: 10.3390/signals6040056</a></p>
	<p>Authors:
		Youssef Nahraoui
		El Houcein Aassif
		Samir Elouaham
		Boujemaa Nassiri
		</p>
	<p>Acoustic scattering is a highly effective tool for non-destructive control and structural analysis. In many real-world applications, understanding acoustic scattering is essential for accurately detecting and characterizing defects, assessing material properties, and evaluating structural integrity without causing damage. One of the most critical aspects of characterizing targets&amp;amp;mdash;such as plates, cylinders, and tubes immersed in water&amp;amp;mdash;is the analysis of the phase and group velocities of antisymmetric circumferential waves (A1). Phase velocity helps identify and characterize wave modes, while group velocity allows for tracking energy, detecting, and locating anomalies. Together, they are essential for monitoring and diagnosing cylindrical shells. This research employs a Sugeno fuzzy inference system (SFIS) combined with a Fuzzy Subtractive Clustering (FSC) identification technique to predict the velocities of antisymmetric (A1) circumferential signals propagating around an infinitely long cylindrical shell of different b/a radius ratios, where a is the outer radius, and b is the inner radius. These circumferential waves are generated when the shell is excited perpendicularly to its axis by a plane wave. Phase and group velocities are determined by using resonance eigenmode theory, and these results are used as training and testing data for the fuzzy model. The proposed approach demonstrates high accuracy in modeling and predicting the behavior of these circumferential waves. The fuzzy model&amp;amp;rsquo;s predictions show excellent agreement with the theoretical results, as confirmed by multiple error metrics, including the Mean Absolute Error (MAE), Standard Error (SE), and Mean Relative Error (MRE).</p>
	]]></content:encoded>

	<dc:title>A Fuzzy Model for Predicting the Group and Phase Velocities of Circumferential Waves Based on Subtractive Clustering</dc:title>
			<dc:creator>Youssef Nahraoui</dc:creator>
			<dc:creator>El Houcein Aassif</dc:creator>
			<dc:creator>Samir Elouaham</dc:creator>
			<dc:creator>Boujemaa Nassiri</dc:creator>
		<dc:identifier>doi: 10.3390/signals6040056</dc:identifier>
	<dc:source>Signals</dc:source>
	<dc:date>2025-10-16</dc:date>

	<prism:publicationName>Signals</prism:publicationName>
	<prism:publicationDate>2025-10-16</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>56</prism:startingPage>
		<prism:doi>10.3390/signals6040056</prism:doi>
	<prism:url>https://www.mdpi.com/2624-6120/6/4/56</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-6120/6/4/55">

	<title>Signals, Vol. 6, Pages 55: Closed-Form Solution Lagrange Multipliers in Worst-Case Performance Optimization Beamforming</title>
	<link>https://www.mdpi.com/2624-6120/6/4/55</link>
	<description>This study presents a method for deriving closed-form solutions for Lagrange multipliers in worst-case performance optimization (WCPO) beamforming. By approximating the array-received signal autocorrelation matrix as a rank-1 Hermitian matrix using the low-rank approximation theory, analytical expressions for the Lagrange multipliers are derived. The method was first developed for a single plane wave scenario and then generalized to multiplane wave cases with an autocorrelation matrix rank of N. Simulations demonstrate that the proposed Lagrange multiplier formula exhibits a performance comparable to that of the second-order cone programming (SOCP) method in terms of signal-to-interference-plus-noise ratio (SINR) and direction-of-arrival (DOA) estimation accuracy, while offering a significant reduction in computational complexity. The proposed method requires three orders of magnitude less computation time than the SOCP and has a computational efficiency similar to that of the diagonal loading (DL) technique, outperforming DL in SINR and DOA estimations. Fourier amplitude spectrum analysis revealed that the beamforming filters obtained using the proposed method and the SOCP shared frequency distribution structures similar to the ideal optimal beamformer (MVDR), whereas the DL method exhibited distinct characteristics. The proposed analytical expressions for the Lagrange multipliers provide a valuable tool for implementing robust and real-time adaptive beamforming for practical applications.</description>
	<pubDate>2025-10-04</pubDate>

	<content:encoded><![CDATA[
	<p><b>Signals, Vol. 6, Pages 55: Closed-Form Solution Lagrange Multipliers in Worst-Case Performance Optimization Beamforming</b></p>
	<p>Signals <a href="https://www.mdpi.com/2624-6120/6/4/55">doi: 10.3390/signals6040055</a></p>
	<p>Authors:
		Tengda Pei
		Bingnan Pei
		</p>
	<p>This study presents a method for deriving closed-form solutions for Lagrange multipliers in worst-case performance optimization (WCPO) beamforming. By approximating the array-received signal autocorrelation matrix as a rank-1 Hermitian matrix using the low-rank approximation theory, analytical expressions for the Lagrange multipliers are derived. The method was first developed for a single plane wave scenario and then generalized to multiplane wave cases with an autocorrelation matrix rank of N. Simulations demonstrate that the proposed Lagrange multiplier formula exhibits a performance comparable to that of the second-order cone programming (SOCP) method in terms of signal-to-interference-plus-noise ratio (SINR) and direction-of-arrival (DOA) estimation accuracy, while offering a significant reduction in computational complexity. The proposed method requires three orders of magnitude less computation time than the SOCP and has a computational efficiency similar to that of the diagonal loading (DL) technique, outperforming DL in SINR and DOA estimations. Fourier amplitude spectrum analysis revealed that the beamforming filters obtained using the proposed method and the SOCP shared frequency distribution structures similar to the ideal optimal beamformer (MVDR), whereas the DL method exhibited distinct characteristics. The proposed analytical expressions for the Lagrange multipliers provide a valuable tool for implementing robust and real-time adaptive beamforming for practical applications.</p>
	]]></content:encoded>

	<dc:title>Closed-Form Solution Lagrange Multipliers in Worst-Case Performance Optimization Beamforming</dc:title>
			<dc:creator>Tengda Pei</dc:creator>
			<dc:creator>Bingnan Pei</dc:creator>
		<dc:identifier>doi: 10.3390/signals6040055</dc:identifier>
	<dc:source>Signals</dc:source>
	<dc:date>2025-10-04</dc:date>

	<prism:publicationName>Signals</prism:publicationName>
	<prism:publicationDate>2025-10-04</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>55</prism:startingPage>
		<prism:doi>10.3390/signals6040055</prism:doi>
	<prism:url>https://www.mdpi.com/2624-6120/6/4/55</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-6120/6/4/54">

	<title>Signals, Vol. 6, Pages 54: Compressive Sensing for Multimodal Biomedical Signal: A Systematic Mapping and Literature Review</title>
	<link>https://www.mdpi.com/2624-6120/6/4/54</link>
	<description>This study investigated the transformative potential of Compressive Sensing (CS) for optimizing multimodal biomedical signal fusion in Wireless Body Sensor Networks (WBSN), specifically targeting challenges in data storage, power consumption, and transmission bandwidth. Through a Systematic Mapping Study (SMS) and Systematic Literature Review (SLR) following the PRISMA protocol, significant advancements in adaptive CS algorithms and multimodal fusion have been achieved. However, this research also identified crucial gaps in computational efficiency, hardware scalability (particularly concerning the complex and often costly adaptive sensing hardware required for dynamic CS applications), and noise robustness for one-dimensional biomedical signals (e.g., ECG, EEG, PPG, and SCG). The findings strongly emphasize the potential of integrating CS with deep reinforcement learning and edge computing to develop energy-efficient, real-time healthcare monitoring systems, paving the way for future innovations in Internet of Medical Things (IoMT) applications.</description>
	<pubDate>2025-10-04</pubDate>

	<content:encoded><![CDATA[
	<p><b>Signals, Vol. 6, Pages 54: Compressive Sensing for Multimodal Biomedical Signal: A Systematic Mapping and Literature Review</b></p>
	<p>Signals <a href="https://www.mdpi.com/2624-6120/6/4/54">doi: 10.3390/signals6040054</a></p>
	<p>Authors:
		Anggunmeka Luhur Prasasti
		Achmad Rizal
		Bayu Erfianto
		Said Ziani
		</p>
	<p>This study investigated the transformative potential of Compressive Sensing (CS) for optimizing multimodal biomedical signal fusion in Wireless Body Sensor Networks (WBSN), specifically targeting challenges in data storage, power consumption, and transmission bandwidth. Through a Systematic Mapping Study (SMS) and Systematic Literature Review (SLR) following the PRISMA protocol, significant advancements in adaptive CS algorithms and multimodal fusion have been achieved. However, this research also identified crucial gaps in computational efficiency, hardware scalability (particularly concerning the complex and often costly adaptive sensing hardware required for dynamic CS applications), and noise robustness for one-dimensional biomedical signals (e.g., ECG, EEG, PPG, and SCG). The findings strongly emphasize the potential of integrating CS with deep reinforcement learning and edge computing to develop energy-efficient, real-time healthcare monitoring systems, paving the way for future innovations in Internet of Medical Things (IoMT) applications.</p>
	]]></content:encoded>

	<dc:title>Compressive Sensing for Multimodal Biomedical Signal: A Systematic Mapping and Literature Review</dc:title>
			<dc:creator>Anggunmeka Luhur Prasasti</dc:creator>
			<dc:creator>Achmad Rizal</dc:creator>
			<dc:creator>Bayu Erfianto</dc:creator>
			<dc:creator>Said Ziani</dc:creator>
		<dc:identifier>doi: 10.3390/signals6040054</dc:identifier>
	<dc:source>Signals</dc:source>
	<dc:date>2025-10-04</dc:date>

	<prism:publicationName>Signals</prism:publicationName>
	<prism:publicationDate>2025-10-04</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Review</prism:section>
	<prism:startingPage>54</prism:startingPage>
		<prism:doi>10.3390/signals6040054</prism:doi>
	<prism:url>https://www.mdpi.com/2624-6120/6/4/54</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-6120/6/4/53">

	<title>Signals, Vol. 6, Pages 53: Towards Next-Generation FPGA-Accelerated Vision-Based Autonomous Driving: A Comprehensive Review</title>
	<link>https://www.mdpi.com/2624-6120/6/4/53</link>
	<description>Autonomous driving has emerged as a rapidly advancing field in both industry and academia over the past decade. Among the enabling technologies, computer vision (CV) has demonstrated high accuracy across various domains, making it a critical component of autonomous vehicle systems. However, CV tasks are computationally intensive and often require hardware accelerators to achieve real-time performance. Field Programmable Gate Arrays (FPGAs) have gained popularity in this context due to their reconfigurability and high energy efficiency. Numerous researchers have explored FPGA-accelerated CV solutions for autonomous driving, addressing key tasks such as lane detection, pedestrian recognition, traffic sign and signal classification, vehicle detection, object detection, environmental variability sensing, and fault analysis. Despite this growing body of work, the field remains fragmented, with significant variability in implementation approaches, evaluation metrics, and hardware platforms. Crucial performance factors, including latency, throughput, power consumption, energy efficiency, detection accuracy, datasets, and FPGA architectures, are often assessed inconsistently. To address this gap, this paper presents a comprehensive literature review of FPGA-accelerated, vision-based autonomous driving systems. It systematically examines existing solutions across sub-domains, categorizes key performance factors and synthesizes the current state of research. This study aims to provide a consolidated reference for researchers, supporting the development of more efficient and reliable next generation autonomous driving systems by highlighting trends, challenges, and opportunities in the field.</description>
	<pubDate>2025-10-01</pubDate>

	<content:encoded><![CDATA[
	<p><b>Signals, Vol. 6, Pages 53: Towards Next-Generation FPGA-Accelerated Vision-Based Autonomous Driving: A Comprehensive Review</b></p>
	<p>Signals <a href="https://www.mdpi.com/2624-6120/6/4/53">doi: 10.3390/signals6040053</a></p>
	<p>Authors:
		Md. Reasad Zaman Chowdhury
		Ashek Seum
		Mahfuzur Rahman Talukder
		Rashed Al Amin
		Fakir Sharif Hossain
		Roman Obermaisser
		</p>
	<p>Autonomous driving has emerged as a rapidly advancing field in both industry and academia over the past decade. Among the enabling technologies, computer vision (CV) has demonstrated high accuracy across various domains, making it a critical component of autonomous vehicle systems. However, CV tasks are computationally intensive and often require hardware accelerators to achieve real-time performance. Field Programmable Gate Arrays (FPGAs) have gained popularity in this context due to their reconfigurability and high energy efficiency. Numerous researchers have explored FPGA-accelerated CV solutions for autonomous driving, addressing key tasks such as lane detection, pedestrian recognition, traffic sign and signal classification, vehicle detection, object detection, environmental variability sensing, and fault analysis. Despite this growing body of work, the field remains fragmented, with significant variability in implementation approaches, evaluation metrics, and hardware platforms. Crucial performance factors, including latency, throughput, power consumption, energy efficiency, detection accuracy, datasets, and FPGA architectures, are often assessed inconsistently. To address this gap, this paper presents a comprehensive literature review of FPGA-accelerated, vision-based autonomous driving systems. It systematically examines existing solutions across sub-domains, categorizes key performance factors and synthesizes the current state of research. This study aims to provide a consolidated reference for researchers, supporting the development of more efficient and reliable next generation autonomous driving systems by highlighting trends, challenges, and opportunities in the field.</p>
	]]></content:encoded>

	<dc:title>Towards Next-Generation FPGA-Accelerated Vision-Based Autonomous Driving: A Comprehensive Review</dc:title>
			<dc:creator>Md. Reasad Zaman Chowdhury</dc:creator>
			<dc:creator>Ashek Seum</dc:creator>
			<dc:creator>Mahfuzur Rahman Talukder</dc:creator>
			<dc:creator>Rashed Al Amin</dc:creator>
			<dc:creator>Fakir Sharif Hossain</dc:creator>
			<dc:creator>Roman Obermaisser</dc:creator>
		<dc:identifier>doi: 10.3390/signals6040053</dc:identifier>
	<dc:source>Signals</dc:source>
	<dc:date>2025-10-01</dc:date>

	<prism:publicationName>Signals</prism:publicationName>
	<prism:publicationDate>2025-10-01</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Review</prism:section>
	<prism:startingPage>53</prism:startingPage>
		<prism:doi>10.3390/signals6040053</prism:doi>
	<prism:url>https://www.mdpi.com/2624-6120/6/4/53</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-6120/6/4/52">

	<title>Signals, Vol. 6, Pages 52: EEG-Based Analysis of Motor Imagery and Multi-Speed Passive Pedaling: Implications for Brain&amp;ndash;Computer Interfaces</title>
	<link>https://www.mdpi.com/2624-6120/6/4/52</link>
	<description>Decoding motor imagery (MI) of lower-limb movements from electroencephalography (EEG) signals remains a challenge due to the involvement of deep cortical regions, limiting the applicability of Brain&amp;amp;ndash;Computer Interfaces (BCIs). This study proposes a novel protocol that combines passive pedaling (PP) as sensory priming with MI at different speeds (30, 45, and 60 rpm) to improve EEG-based classification. Ten healthy participants performed PP followed by MI tasks while EEG data were recorded. An increase in spectral relative power around Cz associated with both PP and MI was observed, varying with speed and suggesting that PP may enhance cortical engagement during MI. Furthermore, our classification strategy, based on Convolutional Neural Networks (CNNs), achieved an accuracy of 0.87&amp;amp;ndash;0.89 across four classes (three speeds and rest). This performance was also compared with the standard Common Spatial Patterns (CSP) and Linear Discriminant Analysis (LDA), which achieved an accuracy of 0.67&amp;amp;ndash;0.76. These results demonstrate the feasibility of multiclass decoding of imagined pedaling velocities and lay the groundwork for speed-adaptive BCIs, supporting future personalized and user-centered neurorehabilitation interventions.</description>
	<pubDate>2025-10-01</pubDate>

	<content:encoded><![CDATA[
	<p><b>Signals, Vol. 6, Pages 52: EEG-Based Analysis of Motor Imagery and Multi-Speed Passive Pedaling: Implications for Brain&amp;ndash;Computer Interfaces</b></p>
	<p>Signals <a href="https://www.mdpi.com/2624-6120/6/4/52">doi: 10.3390/signals6040052</a></p>
	<p>Authors:
		Cristian Felipe Blanco-Diaz
		Aura Ximena Gonzalez-Cely
		Denis Delisle-Rodriguez
		Teodiano Freire Bastos-Filho
		</p>
	<p>Decoding motor imagery (MI) of lower-limb movements from electroencephalography (EEG) signals remains a challenge due to the involvement of deep cortical regions, limiting the applicability of Brain&amp;amp;ndash;Computer Interfaces (BCIs). This study proposes a novel protocol that combines passive pedaling (PP) as sensory priming with MI at different speeds (30, 45, and 60 rpm) to improve EEG-based classification. Ten healthy participants performed PP followed by MI tasks while EEG data were recorded. An increase in spectral relative power around Cz associated with both PP and MI was observed, varying with speed and suggesting that PP may enhance cortical engagement during MI. Furthermore, our classification strategy, based on Convolutional Neural Networks (CNNs), achieved an accuracy of 0.87&amp;amp;ndash;0.89 across four classes (three speeds and rest). This performance was also compared with the standard Common Spatial Patterns (CSP) and Linear Discriminant Analysis (LDA), which achieved an accuracy of 0.67&amp;amp;ndash;0.76. These results demonstrate the feasibility of multiclass decoding of imagined pedaling velocities and lay the groundwork for speed-adaptive BCIs, supporting future personalized and user-centered neurorehabilitation interventions.</p>
	]]></content:encoded>

	<dc:title>EEG-Based Analysis of Motor Imagery and Multi-Speed Passive Pedaling: Implications for Brain&amp;amp;ndash;Computer Interfaces</dc:title>
			<dc:creator>Cristian Felipe Blanco-Diaz</dc:creator>
			<dc:creator>Aura Ximena Gonzalez-Cely</dc:creator>
			<dc:creator>Denis Delisle-Rodriguez</dc:creator>
			<dc:creator>Teodiano Freire Bastos-Filho</dc:creator>
		<dc:identifier>doi: 10.3390/signals6040052</dc:identifier>
	<dc:source>Signals</dc:source>
	<dc:date>2025-10-01</dc:date>

	<prism:publicationName>Signals</prism:publicationName>
	<prism:publicationDate>2025-10-01</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>52</prism:startingPage>
		<prism:doi>10.3390/signals6040052</prism:doi>
	<prism:url>https://www.mdpi.com/2624-6120/6/4/52</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-6120/6/4/51">

	<title>Signals, Vol. 6, Pages 51: Convergence of Integrated Sensing and Communication (ISAC) and Digital-Twin Technologies in Healthcare Systems: A Comprehensive Review</title>
	<link>https://www.mdpi.com/2624-6120/6/4/51</link>
	<description>Modern healthcare systems are under growing strain from aging populations, urbanization, and rising chronic disease burdens, creating an urgent need for real-time monitoring and informed decision-making. This survey examines how the convergence of Integrated Sensing and Communication (ISAC) and digital-twin technologies can meet that need by analyzing how ISAC unifies sensing and communication to gather and transmit data with high timeliness and reliability and how digital-twin platforms use these streams to maintain continuously updated virtual replicas of patients, devices, and care environments. Our synthesis compares ISAC frequency options across sub-6 GHz, millimeter-wave, and terahertz bandswith respect to resolution, penetration depth, exposure compliance, maturity, and cost, and it discusses joint waveform design and emerging 6G architectures. It also presents reference architecture patterns that connect heterogeneous clinical sensors to ISAC links, data ingestion, semantic interoperability pipelines using Fast Healthcare Interoperability Resources (FHIR) and IEEE 11073, and digital-twin synchronization, and it catalogs clinical and operational applications, together with validation and integration requirements. We conduct a targeted scoping review of peer-reviewed literature indexed in major scholarly databases between January 2015 and July 2025, with inclusion restricted to English-language, peer-reviewed studies already cited by this survey, and we apply a transparent screening and data extraction procedure to support reproducibility. The survey further reviews clinical opportunities enabled by data-synchronized twins, including personalized therapy planning, proactive early-warning systems, and virtual intervention testing, while outlining the technical, clinical, and organizational hurdles that must be addressed. Finally, we examine workflow adaptation; governance and ethics; provider training; and outcome measurement frameworks such as length of stay, complication rates, and patient satisfaction, and we conclude that by highlighting both the integration challenges and the operational upside, this survey offers a foundation for the development of safe, ethical, and scalable data-driven healthcare models.</description>
	<pubDate>2025-09-29</pubDate>

	<content:encoded><![CDATA[
	<p><b>Signals, Vol. 6, Pages 51: Convergence of Integrated Sensing and Communication (ISAC) and Digital-Twin Technologies in Healthcare Systems: A Comprehensive Review</b></p>
	<p>Signals <a href="https://www.mdpi.com/2624-6120/6/4/51">doi: 10.3390/signals6040051</a></p>
	<p>Authors:
		Youngboo Kim
		Seungmin Oh
		Gayoung Kim
		</p>
	<p>Modern healthcare systems are under growing strain from aging populations, urbanization, and rising chronic disease burdens, creating an urgent need for real-time monitoring and informed decision-making. This survey examines how the convergence of Integrated Sensing and Communication (ISAC) and digital-twin technologies can meet that need by analyzing how ISAC unifies sensing and communication to gather and transmit data with high timeliness and reliability and how digital-twin platforms use these streams to maintain continuously updated virtual replicas of patients, devices, and care environments. Our synthesis compares ISAC frequency options across sub-6 GHz, millimeter-wave, and terahertz bandswith respect to resolution, penetration depth, exposure compliance, maturity, and cost, and it discusses joint waveform design and emerging 6G architectures. It also presents reference architecture patterns that connect heterogeneous clinical sensors to ISAC links, data ingestion, semantic interoperability pipelines using Fast Healthcare Interoperability Resources (FHIR) and IEEE 11073, and digital-twin synchronization, and it catalogs clinical and operational applications, together with validation and integration requirements. We conduct a targeted scoping review of peer-reviewed literature indexed in major scholarly databases between January 2015 and July 2025, with inclusion restricted to English-language, peer-reviewed studies already cited by this survey, and we apply a transparent screening and data extraction procedure to support reproducibility. The survey further reviews clinical opportunities enabled by data-synchronized twins, including personalized therapy planning, proactive early-warning systems, and virtual intervention testing, while outlining the technical, clinical, and organizational hurdles that must be addressed. Finally, we examine workflow adaptation; governance and ethics; provider training; and outcome measurement frameworks such as length of stay, complication rates, and patient satisfaction, and we conclude that by highlighting both the integration challenges and the operational upside, this survey offers a foundation for the development of safe, ethical, and scalable data-driven healthcare models.</p>
	]]></content:encoded>

	<dc:title>Convergence of Integrated Sensing and Communication (ISAC) and Digital-Twin Technologies in Healthcare Systems: A Comprehensive Review</dc:title>
			<dc:creator>Youngboo Kim</dc:creator>
			<dc:creator>Seungmin Oh</dc:creator>
			<dc:creator>Gayoung Kim</dc:creator>
		<dc:identifier>doi: 10.3390/signals6040051</dc:identifier>
	<dc:source>Signals</dc:source>
	<dc:date>2025-09-29</dc:date>

	<prism:publicationName>Signals</prism:publicationName>
	<prism:publicationDate>2025-09-29</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Review</prism:section>
	<prism:startingPage>51</prism:startingPage>
		<prism:doi>10.3390/signals6040051</prism:doi>
	<prism:url>https://www.mdpi.com/2624-6120/6/4/51</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-6120/6/4/50">

	<title>Signals, Vol. 6, Pages 50: Electromyography-Based Sign Language Recognition: A Low-Channel Approach for Classifying Fruit Name Gestures</title>
	<link>https://www.mdpi.com/2624-6120/6/4/50</link>
	<description>This paper presents a method for recognizing sign language gestures corresponding to fruit names using electromyography (EMG) signals. The proposed system focuses on classification using a limited number of EMG channels, aiming to reduce classification process complexity while maintaining high recognition accuracy. The dataset (DS) contains EMG signal data of 46 hearing-impaired people and descriptions of fruit names, including apple, pear, apricot, nut, cherry, and raspberry, in sign language (SL). Based on the presented DS, gesture movements were classified using five different classification algorithms&amp;amp;mdash;Random Forest, k-Nearest Neighbors, Logistic Regression, Support Vector Machine, and neural networks&amp;amp;mdash;and the algorithm that gives the best result for gesture movements was determined. The best classification result was obtained during recognition of the word cherry based on the RF algorithm, and 97% accuracy was achieved.</description>
	<pubDate>2025-09-25</pubDate>

	<content:encoded><![CDATA[
	<p><b>Signals, Vol. 6, Pages 50: Electromyography-Based Sign Language Recognition: A Low-Channel Approach for Classifying Fruit Name Gestures</b></p>
	<p>Signals <a href="https://www.mdpi.com/2624-6120/6/4/50">doi: 10.3390/signals6040050</a></p>
	<p>Authors:
		Kudratjon Zohirov
		Mirjakhon Temirov
		Sardor Boykobilov
		Golib Berdiev
		Feruz Ruziboev
		Khojiakbar Egamberdiev
		Mamadiyor Sattorov
		Gulmira Pardayeva
		Kuvonch Madatov
		</p>
	<p>This paper presents a method for recognizing sign language gestures corresponding to fruit names using electromyography (EMG) signals. The proposed system focuses on classification using a limited number of EMG channels, aiming to reduce classification process complexity while maintaining high recognition accuracy. The dataset (DS) contains EMG signal data of 46 hearing-impaired people and descriptions of fruit names, including apple, pear, apricot, nut, cherry, and raspberry, in sign language (SL). Based on the presented DS, gesture movements were classified using five different classification algorithms&amp;amp;mdash;Random Forest, k-Nearest Neighbors, Logistic Regression, Support Vector Machine, and neural networks&amp;amp;mdash;and the algorithm that gives the best result for gesture movements was determined. The best classification result was obtained during recognition of the word cherry based on the RF algorithm, and 97% accuracy was achieved.</p>
	]]></content:encoded>

	<dc:title>Electromyography-Based Sign Language Recognition: A Low-Channel Approach for Classifying Fruit Name Gestures</dc:title>
			<dc:creator>Kudratjon Zohirov</dc:creator>
			<dc:creator>Mirjakhon Temirov</dc:creator>
			<dc:creator>Sardor Boykobilov</dc:creator>
			<dc:creator>Golib Berdiev</dc:creator>
			<dc:creator>Feruz Ruziboev</dc:creator>
			<dc:creator>Khojiakbar Egamberdiev</dc:creator>
			<dc:creator>Mamadiyor Sattorov</dc:creator>
			<dc:creator>Gulmira Pardayeva</dc:creator>
			<dc:creator>Kuvonch Madatov</dc:creator>
		<dc:identifier>doi: 10.3390/signals6040050</dc:identifier>
	<dc:source>Signals</dc:source>
	<dc:date>2025-09-25</dc:date>

	<prism:publicationName>Signals</prism:publicationName>
	<prism:publicationDate>2025-09-25</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>50</prism:startingPage>
		<prism:doi>10.3390/signals6040050</prism:doi>
	<prism:url>https://www.mdpi.com/2624-6120/6/4/50</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-6120/6/3/49">

	<title>Signals, Vol. 6, Pages 49: Intelligent Face Recognition: Comprehensive Feature Extraction Methods for Holistic Face Analysis and Modalities</title>
	<link>https://www.mdpi.com/2624-6120/6/3/49</link>
	<description>Face recognition technology utilizes unique facial features to analyze and compare individuals for identification and verification purposes. This technology is crucial for several reasons, such as improving security and authentication, effectively verifying identities, providing personalized user experiences, and automating various operations, including attendance monitoring, access management, and law enforcement activities. In this paper, comprehensive evaluations are conducted using different face detection and modality segmentation methods, feature extraction methods, and classifiers to improve system performance. As for face detection, four methods are proposed: OpenCV&amp;amp;rsquo;s Haar Cascade classifier, Dlib&amp;amp;rsquo;s HOG + SVM frontal face detector, Dlib&amp;amp;rsquo;s CNN face detector, and Mediapipe&amp;amp;rsquo;s face detector. Additionally, two types of feature extraction techniques are proposed: hand-crafted features (traditional methods: global local features) and deep learning features. Three global features were extracted, Scale-Invariant Feature Transform (SIFT), Speeded Robust Features (SURF), and Global Image Structure (GIST). Likewise, the following local feature methods are utilized: Local Binary Pattern (LBP), Weber local descriptor (WLD), and Histogram of Oriented Gradients (HOG). On the other hand, the deep learning-based features fall into two categories: convolutional neural networks (CNNs), including VGG16, VGG19, and VGG-Face, and Siamese neural networks (SNNs), which generate face embeddings. For classification, three methods are employed: Support Vector Machine (SVM), a one-class SVM variant, and Multilayer Perceptron (MLP). The system is evaluated on three datasets: in-house, Labelled Faces in the Wild (LFW), and the Pins dataset (sourced from Pinterest) providing comprehensive benchmark comparisons for facial recognition research. The best performance accuracy for the proposed ten-feature extraction methods applied to the in-house database in the context of the facial recognition task achieved 99.8% accuracy by using the VGG16 model combined with the SVM classifier.</description>
	<pubDate>2025-09-19</pubDate>

	<content:encoded><![CDATA[
	<p><b>Signals, Vol. 6, Pages 49: Intelligent Face Recognition: Comprehensive Feature Extraction Methods for Holistic Face Analysis and Modalities</b></p>
	<p>Signals <a href="https://www.mdpi.com/2624-6120/6/3/49">doi: 10.3390/signals6030049</a></p>
	<p>Authors:
		Thoalfeqar G. Jarullah
		Ahmad Saeed Mohammad
		Musab T. S. Al-Kaltakchi
		Jabir Alshehabi Al-Ani
		</p>
	<p>Face recognition technology utilizes unique facial features to analyze and compare individuals for identification and verification purposes. This technology is crucial for several reasons, such as improving security and authentication, effectively verifying identities, providing personalized user experiences, and automating various operations, including attendance monitoring, access management, and law enforcement activities. In this paper, comprehensive evaluations are conducted using different face detection and modality segmentation methods, feature extraction methods, and classifiers to improve system performance. As for face detection, four methods are proposed: OpenCV&amp;amp;rsquo;s Haar Cascade classifier, Dlib&amp;amp;rsquo;s HOG + SVM frontal face detector, Dlib&amp;amp;rsquo;s CNN face detector, and Mediapipe&amp;amp;rsquo;s face detector. Additionally, two types of feature extraction techniques are proposed: hand-crafted features (traditional methods: global local features) and deep learning features. Three global features were extracted, Scale-Invariant Feature Transform (SIFT), Speeded Robust Features (SURF), and Global Image Structure (GIST). Likewise, the following local feature methods are utilized: Local Binary Pattern (LBP), Weber local descriptor (WLD), and Histogram of Oriented Gradients (HOG). On the other hand, the deep learning-based features fall into two categories: convolutional neural networks (CNNs), including VGG16, VGG19, and VGG-Face, and Siamese neural networks (SNNs), which generate face embeddings. For classification, three methods are employed: Support Vector Machine (SVM), a one-class SVM variant, and Multilayer Perceptron (MLP). The system is evaluated on three datasets: in-house, Labelled Faces in the Wild (LFW), and the Pins dataset (sourced from Pinterest) providing comprehensive benchmark comparisons for facial recognition research. The best performance accuracy for the proposed ten-feature extraction methods applied to the in-house database in the context of the facial recognition task achieved 99.8% accuracy by using the VGG16 model combined with the SVM classifier.</p>
	]]></content:encoded>

	<dc:title>Intelligent Face Recognition: Comprehensive Feature Extraction Methods for Holistic Face Analysis and Modalities</dc:title>
			<dc:creator>Thoalfeqar G. Jarullah</dc:creator>
			<dc:creator>Ahmad Saeed Mohammad</dc:creator>
			<dc:creator>Musab T. S. Al-Kaltakchi</dc:creator>
			<dc:creator>Jabir Alshehabi Al-Ani</dc:creator>
		<dc:identifier>doi: 10.3390/signals6030049</dc:identifier>
	<dc:source>Signals</dc:source>
	<dc:date>2025-09-19</dc:date>

	<prism:publicationName>Signals</prism:publicationName>
	<prism:publicationDate>2025-09-19</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>49</prism:startingPage>
		<prism:doi>10.3390/signals6030049</prism:doi>
	<prism:url>https://www.mdpi.com/2624-6120/6/3/49</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-6120/6/3/48">

	<title>Signals, Vol. 6, Pages 48: Wearable-Sensor and Virtual Reality-Based Interventions for Gait and Balance Rehabilitation in Stroke Survivors: A Systematic Review</title>
	<link>https://www.mdpi.com/2624-6120/6/3/48</link>
	<description>Stroke remains one of the leading causes of disability worldwide, often resulting in persistent impairments in gait and balance. Traditional rehabilitation methods&amp;amp;mdash;though beneficial&amp;amp;mdash;are limited by factors such as therapist dependency, low patient adherence, and restricted access. In recent years, sensor-supported technologies, including virtual reality (VR), robotic-assisted gait training (RAGT), and wearable feedback systems, have emerged as promising adjuncts to conventional therapy. This systematic review evaluates the effectiveness of wearable and immersive technologies for gait and balance rehabilitation in adult stroke survivors. Following PRISMA guidelines, a systematic search of the PubMed and ScienceDirect databases retrieved 697 articles. After screening, eight studies published between 2015 and 2025 were included, encompassing 186 participants. The interventions included VR-based gait training, electromechanical devices (e.g., HAL, RAGT), auditory rhythmic cueing, and smart insoles, compared against conventional rehabilitation or baseline function. Most studies reported significant improvements in motor function, dynamic balance, or gait speed, particularly when interventions were intensive, task-specific, and personalized. Patient engagement, adherence, and feasibility were generally high. However, heterogeneity in study design, small sample sizes, and limited long-term data reduced the strength of the evidence. Technologies were typically implemented as complementary tools rather than standalone treatments. In conclusion, wearable and immersive systems represent promising adjuncts to conventional stroke rehabilitation, with potential to enhance motor outcomes and patient engagement. However, the heterogeneity in protocols, small sample sizes, and methodological limitations underscore the need for more robust, large-scale trials to validate their clinical effectiveness and guide implementation.</description>
	<pubDate>2025-09-11</pubDate>

	<content:encoded><![CDATA[
	<p><b>Signals, Vol. 6, Pages 48: Wearable-Sensor and Virtual Reality-Based Interventions for Gait and Balance Rehabilitation in Stroke Survivors: A Systematic Review</b></p>
	<p>Signals <a href="https://www.mdpi.com/2624-6120/6/3/48">doi: 10.3390/signals6030048</a></p>
	<p>Authors:
		Alejandro Caña-Pino
		Paula Holgado-López
		</p>
	<p>Stroke remains one of the leading causes of disability worldwide, often resulting in persistent impairments in gait and balance. Traditional rehabilitation methods&amp;amp;mdash;though beneficial&amp;amp;mdash;are limited by factors such as therapist dependency, low patient adherence, and restricted access. In recent years, sensor-supported technologies, including virtual reality (VR), robotic-assisted gait training (RAGT), and wearable feedback systems, have emerged as promising adjuncts to conventional therapy. This systematic review evaluates the effectiveness of wearable and immersive technologies for gait and balance rehabilitation in adult stroke survivors. Following PRISMA guidelines, a systematic search of the PubMed and ScienceDirect databases retrieved 697 articles. After screening, eight studies published between 2015 and 2025 were included, encompassing 186 participants. The interventions included VR-based gait training, electromechanical devices (e.g., HAL, RAGT), auditory rhythmic cueing, and smart insoles, compared against conventional rehabilitation or baseline function. Most studies reported significant improvements in motor function, dynamic balance, or gait speed, particularly when interventions were intensive, task-specific, and personalized. Patient engagement, adherence, and feasibility were generally high. However, heterogeneity in study design, small sample sizes, and limited long-term data reduced the strength of the evidence. Technologies were typically implemented as complementary tools rather than standalone treatments. In conclusion, wearable and immersive systems represent promising adjuncts to conventional stroke rehabilitation, with potential to enhance motor outcomes and patient engagement. However, the heterogeneity in protocols, small sample sizes, and methodological limitations underscore the need for more robust, large-scale trials to validate their clinical effectiveness and guide implementation.</p>
	]]></content:encoded>

	<dc:title>Wearable-Sensor and Virtual Reality-Based Interventions for Gait and Balance Rehabilitation in Stroke Survivors: A Systematic Review</dc:title>
			<dc:creator>Alejandro Caña-Pino</dc:creator>
			<dc:creator>Paula Holgado-López</dc:creator>
		<dc:identifier>doi: 10.3390/signals6030048</dc:identifier>
	<dc:source>Signals</dc:source>
	<dc:date>2025-09-11</dc:date>

	<prism:publicationName>Signals</prism:publicationName>
	<prism:publicationDate>2025-09-11</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Review</prism:section>
	<prism:startingPage>48</prism:startingPage>
		<prism:doi>10.3390/signals6030048</prism:doi>
	<prism:url>https://www.mdpi.com/2624-6120/6/3/48</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-6120/6/3/47">

	<title>Signals, Vol. 6, Pages 47: Object Part-Aware Attention-Based Matching for Robust Visual Tracking</title>
	<link>https://www.mdpi.com/2624-6120/6/3/47</link>
	<description>In this paper, we propose a novel visual tracking method with a object part-aware attention-based matching (OPAM) mechanism, which leverages local&amp;amp;ndash;global attention to enhance visual tracking performance. Our method introduces three key components: (1) a local part-aware global self-attention mechanism that embeds rich contextual information among candidate regions, enabling the model to capture mutual dependencies and relationships effectively, (2) a local part-aware global cross-attention mechanism that injects target-specific information into candidate region features, improving the alignment and discrimination between the target and background, and (3) a global cross-attention mechanism that extracts object holistic information from the target-search feature context for further discriminability. By integrating these attention modules, our approach achieves robust feature aggregation and precise target localization. Extensive experiments on a large-scale tracking benchmark demonstrate that our method shows competitive performance metrics in both accuracy and robustness, particularly under challenging scenarios such as occlusion and appearance changes, while running at real-time speeds.</description>
	<pubDate>2025-09-10</pubDate>

	<content:encoded><![CDATA[
	<p><b>Signals, Vol. 6, Pages 47: Object Part-Aware Attention-Based Matching for Robust Visual Tracking</b></p>
	<p>Signals <a href="https://www.mdpi.com/2624-6120/6/3/47">doi: 10.3390/signals6030047</a></p>
	<p>Authors:
		Janghoon Choi
		</p>
	<p>In this paper, we propose a novel visual tracking method with a object part-aware attention-based matching (OPAM) mechanism, which leverages local&amp;amp;ndash;global attention to enhance visual tracking performance. Our method introduces three key components: (1) a local part-aware global self-attention mechanism that embeds rich contextual information among candidate regions, enabling the model to capture mutual dependencies and relationships effectively, (2) a local part-aware global cross-attention mechanism that injects target-specific information into candidate region features, improving the alignment and discrimination between the target and background, and (3) a global cross-attention mechanism that extracts object holistic information from the target-search feature context for further discriminability. By integrating these attention modules, our approach achieves robust feature aggregation and precise target localization. Extensive experiments on a large-scale tracking benchmark demonstrate that our method shows competitive performance metrics in both accuracy and robustness, particularly under challenging scenarios such as occlusion and appearance changes, while running at real-time speeds.</p>
	]]></content:encoded>

	<dc:title>Object Part-Aware Attention-Based Matching for Robust Visual Tracking</dc:title>
			<dc:creator>Janghoon Choi</dc:creator>
		<dc:identifier>doi: 10.3390/signals6030047</dc:identifier>
	<dc:source>Signals</dc:source>
	<dc:date>2025-09-10</dc:date>

	<prism:publicationName>Signals</prism:publicationName>
	<prism:publicationDate>2025-09-10</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>47</prism:startingPage>
		<prism:doi>10.3390/signals6030047</prism:doi>
	<prism:url>https://www.mdpi.com/2624-6120/6/3/47</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-6120/6/3/46">

	<title>Signals, Vol. 6, Pages 46: Deep Learning for Wildlife Monitoring: Near-Infrared Bat Detection Using YOLO Frameworks</title>
	<link>https://www.mdpi.com/2624-6120/6/3/46</link>
	<description>Bats are ecologically vital mammals, serving as pollinators, seed dispersers, and bioindicators of ecosystem health. Many species inhabit natural caves, which offer optimal conditions for survival but present challenges for direct ecological monitoring due to their dark, complex, and inaccessible environments. Traditional monitoring methods, such as mist-netting, are invasive and limited in scope, highlighting the need for non-intrusive alternatives. In this work, we present a portable multisensor platform designed to operate in underground habitats. The system captures multimodal data, including near-infrared (NIR) imagery, ultrasonic audio, 3D structural data, and RGB video. Focusing on NIR imagery, we evaluate the effectiveness of the YOLO object detection framework for automated bat detection and counting. Experiments were conducted using a dataset of NIR images collected in natural shelters. Three YOLO variants (v10, v11, and v12) were trained and tested on this dataset. The models achieved high detection accuracy, with YOLO v12m reaching a mean average precision (mAP) of 0.981. These results demonstrate that combining NIR imaging with deep learning enables accurate and non-invasive monitoring of bats in challenging environments. The proposed approach offers a scalable tool for ecological research and conservation, supporting population assessment and behavioral studies without disturbing bat colonies.</description>
	<pubDate>2025-09-04</pubDate>

	<content:encoded><![CDATA[
	<p><b>Signals, Vol. 6, Pages 46: Deep Learning for Wildlife Monitoring: Near-Infrared Bat Detection Using YOLO Frameworks</b></p>
	<p>Signals <a href="https://www.mdpi.com/2624-6120/6/3/46">doi: 10.3390/signals6030046</a></p>
	<p>Authors:
		José-Joel González-Barbosa
		Israel Cruz Rangel
		Alfonso Ramírez-Pedraza
		Raymundo Ramírez-Pedraza
		Isabel Bárcenas-Reyes
		Erick-Alejandro González-Barbosa
		Miguel Razo-Razo
		</p>
	<p>Bats are ecologically vital mammals, serving as pollinators, seed dispersers, and bioindicators of ecosystem health. Many species inhabit natural caves, which offer optimal conditions for survival but present challenges for direct ecological monitoring due to their dark, complex, and inaccessible environments. Traditional monitoring methods, such as mist-netting, are invasive and limited in scope, highlighting the need for non-intrusive alternatives. In this work, we present a portable multisensor platform designed to operate in underground habitats. The system captures multimodal data, including near-infrared (NIR) imagery, ultrasonic audio, 3D structural data, and RGB video. Focusing on NIR imagery, we evaluate the effectiveness of the YOLO object detection framework for automated bat detection and counting. Experiments were conducted using a dataset of NIR images collected in natural shelters. Three YOLO variants (v10, v11, and v12) were trained and tested on this dataset. The models achieved high detection accuracy, with YOLO v12m reaching a mean average precision (mAP) of 0.981. These results demonstrate that combining NIR imaging with deep learning enables accurate and non-invasive monitoring of bats in challenging environments. The proposed approach offers a scalable tool for ecological research and conservation, supporting population assessment and behavioral studies without disturbing bat colonies.</p>
	]]></content:encoded>

	<dc:title>Deep Learning for Wildlife Monitoring: Near-Infrared Bat Detection Using YOLO Frameworks</dc:title>
			<dc:creator>José-Joel González-Barbosa</dc:creator>
			<dc:creator>Israel Cruz Rangel</dc:creator>
			<dc:creator>Alfonso Ramírez-Pedraza</dc:creator>
			<dc:creator>Raymundo Ramírez-Pedraza</dc:creator>
			<dc:creator>Isabel Bárcenas-Reyes</dc:creator>
			<dc:creator>Erick-Alejandro González-Barbosa</dc:creator>
			<dc:creator>Miguel Razo-Razo</dc:creator>
		<dc:identifier>doi: 10.3390/signals6030046</dc:identifier>
	<dc:source>Signals</dc:source>
	<dc:date>2025-09-04</dc:date>

	<prism:publicationName>Signals</prism:publicationName>
	<prism:publicationDate>2025-09-04</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>46</prism:startingPage>
		<prism:doi>10.3390/signals6030046</prism:doi>
	<prism:url>https://www.mdpi.com/2624-6120/6/3/46</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-6120/6/3/45">

	<title>Signals, Vol. 6, Pages 45: EMG-Based Recognition of Lower Limb Movements in Athletes: A Comparative Study of Classification Techniques</title>
	<link>https://www.mdpi.com/2624-6120/6/3/45</link>
	<description>In this article, the classification of signals arising from the movements of the lower limb of the leg (LLL) based on electromyography (EMG) (walking, sitting, up and down the stairs) was carried out. In the data collection process, 25 athletes aged 15&amp;amp;ndash;22 were involved, and two types of data sets (DS-dataset) were formed using FreeEMG and Biosignalsplux devices. Six important time and frequency domain features were extracted from the EMG signals&amp;amp;mdash;RMS (Root Mean Square), MAV (Mean Absolute Value), WL (Waveform Length), ZC (Zero Crossing), MDF (Median Frequency), and SSCs (Slope Sign Changes). Several classification algorithms were used to detect and classify movements, including RF (Random Forest), NN (Neural Network), SVM (Support Vector Machine), k-NN (k-Nearest Neighbors), and LR (Logistic Regression) models. Analysis of the experimental results showed that the RF algorithm achieved the highest accuracy of 98.7% when classified with DS collected via the Biosignalsplux device, demonstrating an advantage in terms of performance in motion recognition. The results obtained from the open systems used in signal processing enable real-time monitoring of athletes&amp;amp;rsquo; physical condition, which plays a crucial role in accurately and rapidly determining the degree of muscle fatigue and the level of physical stress experienced during training sessions, thereby allowing for more effective control of performance and timely prevention of injuries.</description>
	<pubDate>2025-09-02</pubDate>

	<content:encoded><![CDATA[
	<p><b>Signals, Vol. 6, Pages 45: EMG-Based Recognition of Lower Limb Movements in Athletes: A Comparative Study of Classification Techniques</b></p>
	<p>Signals <a href="https://www.mdpi.com/2624-6120/6/3/45">doi: 10.3390/signals6030045</a></p>
	<p>Authors:
		Kudratjon Zohirov
		Sarvar Makhmudjanov
		Feruz Ruziboev
		Golib Berdiev
		Mirjakhon Temirov
		Gulrukh Sherboboyeva
		Firuza Achilova
		Gulmira Pardayeva
		Sardor Boykobilov
		</p>
	<p>In this article, the classification of signals arising from the movements of the lower limb of the leg (LLL) based on electromyography (EMG) (walking, sitting, up and down the stairs) was carried out. In the data collection process, 25 athletes aged 15&amp;amp;ndash;22 were involved, and two types of data sets (DS-dataset) were formed using FreeEMG and Biosignalsplux devices. Six important time and frequency domain features were extracted from the EMG signals&amp;amp;mdash;RMS (Root Mean Square), MAV (Mean Absolute Value), WL (Waveform Length), ZC (Zero Crossing), MDF (Median Frequency), and SSCs (Slope Sign Changes). Several classification algorithms were used to detect and classify movements, including RF (Random Forest), NN (Neural Network), SVM (Support Vector Machine), k-NN (k-Nearest Neighbors), and LR (Logistic Regression) models. Analysis of the experimental results showed that the RF algorithm achieved the highest accuracy of 98.7% when classified with DS collected via the Biosignalsplux device, demonstrating an advantage in terms of performance in motion recognition. The results obtained from the open systems used in signal processing enable real-time monitoring of athletes&amp;amp;rsquo; physical condition, which plays a crucial role in accurately and rapidly determining the degree of muscle fatigue and the level of physical stress experienced during training sessions, thereby allowing for more effective control of performance and timely prevention of injuries.</p>
	]]></content:encoded>

	<dc:title>EMG-Based Recognition of Lower Limb Movements in Athletes: A Comparative Study of Classification Techniques</dc:title>
			<dc:creator>Kudratjon Zohirov</dc:creator>
			<dc:creator>Sarvar Makhmudjanov</dc:creator>
			<dc:creator>Feruz Ruziboev</dc:creator>
			<dc:creator>Golib Berdiev</dc:creator>
			<dc:creator>Mirjakhon Temirov</dc:creator>
			<dc:creator>Gulrukh Sherboboyeva</dc:creator>
			<dc:creator>Firuza Achilova</dc:creator>
			<dc:creator>Gulmira Pardayeva</dc:creator>
			<dc:creator>Sardor Boykobilov</dc:creator>
		<dc:identifier>doi: 10.3390/signals6030045</dc:identifier>
	<dc:source>Signals</dc:source>
	<dc:date>2025-09-02</dc:date>

	<prism:publicationName>Signals</prism:publicationName>
	<prism:publicationDate>2025-09-02</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>45</prism:startingPage>
		<prism:doi>10.3390/signals6030045</prism:doi>
	<prism:url>https://www.mdpi.com/2624-6120/6/3/45</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-6120/6/3/44">

	<title>Signals, Vol. 6, Pages 44: Resilient Consensus-Based Target Tracking Under False Data Injection Attacks in Multi-Agent Networks</title>
	<link>https://www.mdpi.com/2624-6120/6/3/44</link>
	<description>Distributed target tracking in multi-agent networks plays a critical role in cooperative sensing and autonomous navigation. However, it faces significant challenges in highly dynamic and adversarial setups. This study aims to enhance the resilience of decentralized target tracking algorithms against measurement faults and cyber&amp;amp;ndash;physical threats, especially false data injection attacks. We propose a consensus-based estimation algorithm that integrates a nearly constant velocity model with saturation-based filtering to suppress impulsive measurement variations and promote robust, distributed state estimation. To counteract adversarial conditions, we incorporate a dynamic false data injection detection and isolation mechanism that uses innovation thresholds to identify and disregard suspicious measurements before they can degrade the global estimate. The effectiveness of the proposed algorithms is demonstrated through a series of simulation-based case studies under both benign and adversarial conditions. The results show that increased network connectivity and higher consensus iteration rates improve estimation accuracy and convergence speed, while properly tuned saturation filters achieve a practical balance between fault suppression and accurate estimation. Furthermore, under localized, coordinated, and transient false data injection attacks, the detection mechanism successfully identifies compromised agents and prevents their data from corrupting the distributed global estimate. Overall, this study illustrates that the proposed algorithm provides a simplified fault-tolerant solution that significantly enhances the accuracy and resilience of distributed target tracking without imposing excessive communication or computational burdens.</description>
	<pubDate>2025-09-02</pubDate>

	<content:encoded><![CDATA[
	<p><b>Signals, Vol. 6, Pages 44: Resilient Consensus-Based Target Tracking Under False Data Injection Attacks in Multi-Agent Networks</b></p>
	<p>Signals <a href="https://www.mdpi.com/2624-6120/6/3/44">doi: 10.3390/signals6030044</a></p>
	<p>Authors:
		Amir Ahmad Ghods
		Mohammadreza Doostmohammadian
		</p>
	<p>Distributed target tracking in multi-agent networks plays a critical role in cooperative sensing and autonomous navigation. However, it faces significant challenges in highly dynamic and adversarial setups. This study aims to enhance the resilience of decentralized target tracking algorithms against measurement faults and cyber&amp;amp;ndash;physical threats, especially false data injection attacks. We propose a consensus-based estimation algorithm that integrates a nearly constant velocity model with saturation-based filtering to suppress impulsive measurement variations and promote robust, distributed state estimation. To counteract adversarial conditions, we incorporate a dynamic false data injection detection and isolation mechanism that uses innovation thresholds to identify and disregard suspicious measurements before they can degrade the global estimate. The effectiveness of the proposed algorithms is demonstrated through a series of simulation-based case studies under both benign and adversarial conditions. The results show that increased network connectivity and higher consensus iteration rates improve estimation accuracy and convergence speed, while properly tuned saturation filters achieve a practical balance between fault suppression and accurate estimation. Furthermore, under localized, coordinated, and transient false data injection attacks, the detection mechanism successfully identifies compromised agents and prevents their data from corrupting the distributed global estimate. Overall, this study illustrates that the proposed algorithm provides a simplified fault-tolerant solution that significantly enhances the accuracy and resilience of distributed target tracking without imposing excessive communication or computational burdens.</p>
	]]></content:encoded>

	<dc:title>Resilient Consensus-Based Target Tracking Under False Data Injection Attacks in Multi-Agent Networks</dc:title>
			<dc:creator>Amir Ahmad Ghods</dc:creator>
			<dc:creator>Mohammadreza Doostmohammadian</dc:creator>
		<dc:identifier>doi: 10.3390/signals6030044</dc:identifier>
	<dc:source>Signals</dc:source>
	<dc:date>2025-09-02</dc:date>

	<prism:publicationName>Signals</prism:publicationName>
	<prism:publicationDate>2025-09-02</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>44</prism:startingPage>
		<prism:doi>10.3390/signals6030044</prism:doi>
	<prism:url>https://www.mdpi.com/2624-6120/6/3/44</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-6120/6/3/43">

	<title>Signals, Vol. 6, Pages 43: Personal Identification Using 3D Topographic Cubes Extracted from EEG Signals by Means of Automated Feature Representation</title>
	<link>https://www.mdpi.com/2624-6120/6/3/43</link>
	<description>Electroencephalogram (EEG)-based identification offers a promising biometric solution by leveraging the uniqueness of individual brain activity patterns. This study proposes a framework based on a convolutional autoencoder (CAE) along with a traditional classifier for identifying individuals using EEG brainprints. The convolutional autoencoder extracts a compact and discriminative representation from the topographic data cubes that capture both spatial and temporal dynamics of neural oscillations. The latent tensor features extracted by the CAE are subsequently classified by a machine learning module utilizing Support Vector Machine (SVM), Random Forest (RF), k-Nearest Neighbor (KNN), and Artificial Neural Network (ANN) models. EEG data were collected under three conditions&amp;amp;mdash;resting state, music stimuli, and cognitive task&amp;amp;mdash;to investigate a diverse range of neural responses. Training and testing datasets were extracted from separate sessions to enable a true longitudinal analysis. The performance of the framework was evaluated using the Area Under the Curve (AUC) and accuracy (ACC) metrics. The effect of subject identifiability was also investigated. The proposed framework achieved a performance score up to a maximum AUC of 99.89% and ACC of 96.98%. These results demonstrate the effectiveness of the proposed automated subject-specific patterns in capturing stable EEG brainprints and support the potential of the proposed framework for reliable, session-independent EEG-based biometric identification.</description>
	<pubDate>2025-08-21</pubDate>

	<content:encoded><![CDATA[
	<p><b>Signals, Vol. 6, Pages 43: Personal Identification Using 3D Topographic Cubes Extracted from EEG Signals by Means of Automated Feature Representation</b></p>
	<p>Signals <a href="https://www.mdpi.com/2624-6120/6/3/43">doi: 10.3390/signals6030043</a></p>
	<p>Authors:
		Muhammed Esad Oztemel
		Ömer Muhammet Soysal
		</p>
	<p>Electroencephalogram (EEG)-based identification offers a promising biometric solution by leveraging the uniqueness of individual brain activity patterns. This study proposes a framework based on a convolutional autoencoder (CAE) along with a traditional classifier for identifying individuals using EEG brainprints. The convolutional autoencoder extracts a compact and discriminative representation from the topographic data cubes that capture both spatial and temporal dynamics of neural oscillations. The latent tensor features extracted by the CAE are subsequently classified by a machine learning module utilizing Support Vector Machine (SVM), Random Forest (RF), k-Nearest Neighbor (KNN), and Artificial Neural Network (ANN) models. EEG data were collected under three conditions&amp;amp;mdash;resting state, music stimuli, and cognitive task&amp;amp;mdash;to investigate a diverse range of neural responses. Training and testing datasets were extracted from separate sessions to enable a true longitudinal analysis. The performance of the framework was evaluated using the Area Under the Curve (AUC) and accuracy (ACC) metrics. The effect of subject identifiability was also investigated. The proposed framework achieved a performance score up to a maximum AUC of 99.89% and ACC of 96.98%. These results demonstrate the effectiveness of the proposed automated subject-specific patterns in capturing stable EEG brainprints and support the potential of the proposed framework for reliable, session-independent EEG-based biometric identification.</p>
	]]></content:encoded>

	<dc:title>Personal Identification Using 3D Topographic Cubes Extracted from EEG Signals by Means of Automated Feature Representation</dc:title>
			<dc:creator>Muhammed Esad Oztemel</dc:creator>
			<dc:creator>Ömer Muhammet Soysal</dc:creator>
		<dc:identifier>doi: 10.3390/signals6030043</dc:identifier>
	<dc:source>Signals</dc:source>
	<dc:date>2025-08-21</dc:date>

	<prism:publicationName>Signals</prism:publicationName>
	<prism:publicationDate>2025-08-21</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>43</prism:startingPage>
		<prism:doi>10.3390/signals6030043</prism:doi>
	<prism:url>https://www.mdpi.com/2624-6120/6/3/43</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-6120/6/3/42">

	<title>Signals, Vol. 6, Pages 42: Decision Tree and ANOVA as Feature Selection from Vibration Signals to Improve the Diagnosis of Belt Conveyor Idlers</title>
	<link>https://www.mdpi.com/2624-6120/6/3/42</link>
	<description>This study aims to compare decision tree and Analysis of Variance (ANOVA) techniques as feature selection methods, combined with Wavelet Packet Decomposition (WPD) for feature extraction, to enhance the diagnosis of faults in belt conveyor idlers. Belt conveyors are widely used in mining for efficient transport, but idlers composed of rollers are frequently subject to failure, making continuous monitoring essential to ensure reliability. Automated diagnostic solutions using vibration signals and machine learning rely on signal processing for feature extraction, often requiring dimensionality reduction or feature selection to improve classification accuracy. Due to the limitations of traditional techniques such as Principal Component Analysis (PCA) in handling temporal variations, Decision Tree and ANOVA emerge as effective alternatives for feature selection. This framework applied to each feature selection method, and Support Vector Machine (SVM) was used as a classification technique. The diagnostic performance of each method, including the case without feature selection, was evaluated. The results showed a higher diagnostic accuracy performance for the approaches that applied the features from the decision tree and from ANOVA. The improvement in the diagnosis of roller failures with feature selection was corroborated with the hit rates of failure mode, severity level, and location of a defective roller above 93.5%.</description>
	<pubDate>2025-08-13</pubDate>

	<content:encoded><![CDATA[
	<p><b>Signals, Vol. 6, Pages 42: Decision Tree and ANOVA as Feature Selection from Vibration Signals to Improve the Diagnosis of Belt Conveyor Idlers</b></p>
	<p>Signals <a href="https://www.mdpi.com/2624-6120/6/3/42">doi: 10.3390/signals6030042</a></p>
	<p>Authors:
		João L. L. Soares
		Thiago B. Costa
		Geovane S. do Nascimento
		Walter S. Sousa
		Jullyane M. S. de Figueiredo
		Danilo S. Braga
		André L. A. Mesquita
		Alexandre L. A. Mesquita
		</p>
	<p>This study aims to compare decision tree and Analysis of Variance (ANOVA) techniques as feature selection methods, combined with Wavelet Packet Decomposition (WPD) for feature extraction, to enhance the diagnosis of faults in belt conveyor idlers. Belt conveyors are widely used in mining for efficient transport, but idlers composed of rollers are frequently subject to failure, making continuous monitoring essential to ensure reliability. Automated diagnostic solutions using vibration signals and machine learning rely on signal processing for feature extraction, often requiring dimensionality reduction or feature selection to improve classification accuracy. Due to the limitations of traditional techniques such as Principal Component Analysis (PCA) in handling temporal variations, Decision Tree and ANOVA emerge as effective alternatives for feature selection. This framework applied to each feature selection method, and Support Vector Machine (SVM) was used as a classification technique. The diagnostic performance of each method, including the case without feature selection, was evaluated. The results showed a higher diagnostic accuracy performance for the approaches that applied the features from the decision tree and from ANOVA. The improvement in the diagnosis of roller failures with feature selection was corroborated with the hit rates of failure mode, severity level, and location of a defective roller above 93.5%.</p>
	]]></content:encoded>

	<dc:title>Decision Tree and ANOVA as Feature Selection from Vibration Signals to Improve the Diagnosis of Belt Conveyor Idlers</dc:title>
			<dc:creator>João L. L. Soares</dc:creator>
			<dc:creator>Thiago B. Costa</dc:creator>
			<dc:creator>Geovane S. do Nascimento</dc:creator>
			<dc:creator>Walter S. Sousa</dc:creator>
			<dc:creator>Jullyane M. S. de Figueiredo</dc:creator>
			<dc:creator>Danilo S. Braga</dc:creator>
			<dc:creator>André L. A. Mesquita</dc:creator>
			<dc:creator>Alexandre L. A. Mesquita</dc:creator>
		<dc:identifier>doi: 10.3390/signals6030042</dc:identifier>
	<dc:source>Signals</dc:source>
	<dc:date>2025-08-13</dc:date>

	<prism:publicationName>Signals</prism:publicationName>
	<prism:publicationDate>2025-08-13</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>42</prism:startingPage>
		<prism:doi>10.3390/signals6030042</prism:doi>
	<prism:url>https://www.mdpi.com/2624-6120/6/3/42</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-6120/6/3/41">

	<title>Signals, Vol. 6, Pages 41: Rocket Launch Detection with Smartphone Audio and Transfer Learning</title>
	<link>https://www.mdpi.com/2624-6120/6/3/41</link>
	<description>Rocket launches generate infrasound signatures that have been detected at great distances. Due to the sparsity of the networks that have made these detections, however, most signals are detected tens of minutes to hours after the rocket launch. In this work, a method of near-real-time detection of rocket launches using data from a network of smartphones located 10&amp;amp;ndash;70 km from launch sites is presented. A machine learning model is trained and tested on the open-access Aggregated Smartphone Timeseries of Rocket-generated Acoustics (ASTRA), Smartphone High-explosive Audio Recordings Dataset (SHAReD), and ESC-50 datasets, resulting in a final accuracy of 97% and a false positive rate of &amp;amp;lt;1%. The performance and behavior of the model are summarized, and its suitability for persistent monitoring applications is discussed.</description>
	<pubDate>2025-08-11</pubDate>

	<content:encoded><![CDATA[
	<p><b>Signals, Vol. 6, Pages 41: Rocket Launch Detection with Smartphone Audio and Transfer Learning</b></p>
	<p>Signals <a href="https://www.mdpi.com/2624-6120/6/3/41">doi: 10.3390/signals6030041</a></p>
	<p>Authors:
		Sarah K. Popenhagen
		Samuel Kei Takazawa
		Milton A. Garcés
		</p>
	<p>Rocket launches generate infrasound signatures that have been detected at great distances. Due to the sparsity of the networks that have made these detections, however, most signals are detected tens of minutes to hours after the rocket launch. In this work, a method of near-real-time detection of rocket launches using data from a network of smartphones located 10&amp;amp;ndash;70 km from launch sites is presented. A machine learning model is trained and tested on the open-access Aggregated Smartphone Timeseries of Rocket-generated Acoustics (ASTRA), Smartphone High-explosive Audio Recordings Dataset (SHAReD), and ESC-50 datasets, resulting in a final accuracy of 97% and a false positive rate of &amp;amp;lt;1%. The performance and behavior of the model are summarized, and its suitability for persistent monitoring applications is discussed.</p>
	]]></content:encoded>

	<dc:title>Rocket Launch Detection with Smartphone Audio and Transfer Learning</dc:title>
			<dc:creator>Sarah K. Popenhagen</dc:creator>
			<dc:creator>Samuel Kei Takazawa</dc:creator>
			<dc:creator>Milton A. Garcés</dc:creator>
		<dc:identifier>doi: 10.3390/signals6030041</dc:identifier>
	<dc:source>Signals</dc:source>
	<dc:date>2025-08-11</dc:date>

	<prism:publicationName>Signals</prism:publicationName>
	<prism:publicationDate>2025-08-11</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>41</prism:startingPage>
		<prism:doi>10.3390/signals6030041</prism:doi>
	<prism:url>https://www.mdpi.com/2624-6120/6/3/41</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-6120/6/3/40">

	<title>Signals, Vol. 6, Pages 40: A Deep Learning Approach to Teeth Segmentation and Orientation from Panoramic X-Rays</title>
	<link>https://www.mdpi.com/2624-6120/6/3/40</link>
	<description>Accurate teeth segmentation and orientation are fundamental in modern oral healthcare, enabling precise diagnosis, treatment planning, and dental implant design. In this study, we present a comprehensive approach to teeth segmentation and orientation from panoramic X-ray images, leveraging deep-learning techniques. We built an end-to-end instance segmentation network that uses an encoder&amp;amp;ndash;decoder architecture reinforced with grid-aware attention gates along the skip connections. We introduce oriented bounding box (OBB) generation through principal component analysis (PCA) for precise tooth orientation estimation. Evaluating our approach on the publicly available DNS dataset, comprising 543 panoramic X-ray images, we achieve the highest Intersection-over-Union (IoU) score of 82.43% and a Dice Similarity Coefficient (DSC) score of 90.37% among compared models in teeth instance segmentation. In OBB analysis, we obtain the Rotated IoU (RIoU) score of 82.82%. We also conduct detailed analyses of individual tooth labels and categorical performance, shedding light on strengths and weaknesses. The proposed model&amp;amp;rsquo;s accuracy and versatility offer promising prospects for improving dental diagnoses, treatment planning, and personalized healthcare in the oral domain.</description>
	<pubDate>2025-08-08</pubDate>

	<content:encoded><![CDATA[
	<p><b>Signals, Vol. 6, Pages 40: A Deep Learning Approach to Teeth Segmentation and Orientation from Panoramic X-Rays</b></p>
	<p>Signals <a href="https://www.mdpi.com/2624-6120/6/3/40">doi: 10.3390/signals6030040</a></p>
	<p>Authors:
		Mou Deb
		Madhab Deb
		Mrinal Kanti Dhar
		</p>
	<p>Accurate teeth segmentation and orientation are fundamental in modern oral healthcare, enabling precise diagnosis, treatment planning, and dental implant design. In this study, we present a comprehensive approach to teeth segmentation and orientation from panoramic X-ray images, leveraging deep-learning techniques. We built an end-to-end instance segmentation network that uses an encoder&amp;amp;ndash;decoder architecture reinforced with grid-aware attention gates along the skip connections. We introduce oriented bounding box (OBB) generation through principal component analysis (PCA) for precise tooth orientation estimation. Evaluating our approach on the publicly available DNS dataset, comprising 543 panoramic X-ray images, we achieve the highest Intersection-over-Union (IoU) score of 82.43% and a Dice Similarity Coefficient (DSC) score of 90.37% among compared models in teeth instance segmentation. In OBB analysis, we obtain the Rotated IoU (RIoU) score of 82.82%. We also conduct detailed analyses of individual tooth labels and categorical performance, shedding light on strengths and weaknesses. The proposed model&amp;amp;rsquo;s accuracy and versatility offer promising prospects for improving dental diagnoses, treatment planning, and personalized healthcare in the oral domain.</p>
	]]></content:encoded>

	<dc:title>A Deep Learning Approach to Teeth Segmentation and Orientation from Panoramic X-Rays</dc:title>
			<dc:creator>Mou Deb</dc:creator>
			<dc:creator>Madhab Deb</dc:creator>
			<dc:creator>Mrinal Kanti Dhar</dc:creator>
		<dc:identifier>doi: 10.3390/signals6030040</dc:identifier>
	<dc:source>Signals</dc:source>
	<dc:date>2025-08-08</dc:date>

	<prism:publicationName>Signals</prism:publicationName>
	<prism:publicationDate>2025-08-08</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>40</prism:startingPage>
		<prism:doi>10.3390/signals6030040</prism:doi>
	<prism:url>https://www.mdpi.com/2624-6120/6/3/40</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-6120/6/3/39">

	<title>Signals, Vol. 6, Pages 39: Method for Target Detection in a High Noise Environment Through Frequency Analysis Using an Event-Based Vision Sensor</title>
	<link>https://www.mdpi.com/2624-6120/6/3/39</link>
	<description>Event-based vision sensors (EVSs), often referred to as neuromorphic cameras, operate by responding to changes in brightness on a pixel-by-pixel basis. In contrast, traditional framing cameras employ some fixed sampling interval where integrated intensity is read off the entire focal plane at once. Similar to traditional cameras, EVSs can suffer loss of sensitivity through scenes with high intensity and dynamic clutter, reducing the ability to see points of interest through traditional event processing means. This paper describes a method to reduce the negative impacts of these types of EVS clutter and enable more robust target detection through the use of individual pixel frequency analysis, background suppression, and statistical filtering. Additionally, issues found in normal frequency analysis such as phase differences between sources, aliasing, and spectral leakage are less relevant in this method. The statistical filtering simply determines what pixels have significant frequency content after the background suppression instead of focusing on the actual frequencies in the scene. Initial testing on simulated data demonstrates a proof of concept for this method, which reduces artificial scene noise and enables improved target detection.</description>
	<pubDate>2025-08-05</pubDate>

	<content:encoded><![CDATA[
	<p><b>Signals, Vol. 6, Pages 39: Method for Target Detection in a High Noise Environment Through Frequency Analysis Using an Event-Based Vision Sensor</b></p>
	<p>Signals <a href="https://www.mdpi.com/2624-6120/6/3/39">doi: 10.3390/signals6030039</a></p>
	<p>Authors:
		Will Johnston
		Shannon Young
		David Howe
		Rachel Oliver
		Zachry Theis
		Brian McReynolds
		Michael Dexter
		</p>
	<p>Event-based vision sensors (EVSs), often referred to as neuromorphic cameras, operate by responding to changes in brightness on a pixel-by-pixel basis. In contrast, traditional framing cameras employ some fixed sampling interval where integrated intensity is read off the entire focal plane at once. Similar to traditional cameras, EVSs can suffer loss of sensitivity through scenes with high intensity and dynamic clutter, reducing the ability to see points of interest through traditional event processing means. This paper describes a method to reduce the negative impacts of these types of EVS clutter and enable more robust target detection through the use of individual pixel frequency analysis, background suppression, and statistical filtering. Additionally, issues found in normal frequency analysis such as phase differences between sources, aliasing, and spectral leakage are less relevant in this method. The statistical filtering simply determines what pixels have significant frequency content after the background suppression instead of focusing on the actual frequencies in the scene. Initial testing on simulated data demonstrates a proof of concept for this method, which reduces artificial scene noise and enables improved target detection.</p>
	]]></content:encoded>

	<dc:title>Method for Target Detection in a High Noise Environment Through Frequency Analysis Using an Event-Based Vision Sensor</dc:title>
			<dc:creator>Will Johnston</dc:creator>
			<dc:creator>Shannon Young</dc:creator>
			<dc:creator>David Howe</dc:creator>
			<dc:creator>Rachel Oliver</dc:creator>
			<dc:creator>Zachry Theis</dc:creator>
			<dc:creator>Brian McReynolds</dc:creator>
			<dc:creator>Michael Dexter</dc:creator>
		<dc:identifier>doi: 10.3390/signals6030039</dc:identifier>
	<dc:source>Signals</dc:source>
	<dc:date>2025-08-05</dc:date>

	<prism:publicationName>Signals</prism:publicationName>
	<prism:publicationDate>2025-08-05</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>39</prism:startingPage>
		<prism:doi>10.3390/signals6030039</prism:doi>
	<prism:url>https://www.mdpi.com/2624-6120/6/3/39</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-6120/6/3/38">

	<title>Signals, Vol. 6, Pages 38: Infrared Thermographic Signal Analysis of Bioactive Edible Oils Using CNNs for Quality Assessment</title>
	<link>https://www.mdpi.com/2624-6120/6/3/38</link>
	<description>Nutrition plays a fundamental role in promoting health and preventing chronic diseases, with bioactive food components offering a therapeutic potential in biomedical applications. Among these, edible oils are recognised for their functional properties, which contribute to disease prevention and metabolic regulation. The proposed study aims to evaluate the quality of four bioactive oils (olive oil, sunflower oil, tomato seed oil, and pumpkin seed oil) by analysing their thermal behaviour through infrared (IR) imaging. The study designed a customised electronic system to acquire thermographic signals under controlled temperature and humidity conditions. The acquisition system was used to extract thermal data. Analysis of the acquired thermal signals revealed characteristic heat absorption profiles used to infer differences in oil properties related to stability and degradation potential. A hybrid deep learning model that integrates Convolutional Neural Networks (CNNs) with Long Short-Term Memory (LSTM) units was used to classify and differentiate the oils based on stability, thermal reactivity, and potential health benefits. A signal analysis showed that the AI-based method improves both the accuracy (achieving an F1-score of 93.66%) and the repeatability of quality assessments, providing a non-invasive and intelligent framework for the validation and traceability of nutritional compounds.</description>
	<pubDate>2025-08-01</pubDate>

	<content:encoded><![CDATA[
	<p><b>Signals, Vol. 6, Pages 38: Infrared Thermographic Signal Analysis of Bioactive Edible Oils Using CNNs for Quality Assessment</b></p>
	<p>Signals <a href="https://www.mdpi.com/2624-6120/6/3/38">doi: 10.3390/signals6030038</a></p>
	<p>Authors:
		Danilo Pratticò
		Filippo Laganà
		</p>
	<p>Nutrition plays a fundamental role in promoting health and preventing chronic diseases, with bioactive food components offering a therapeutic potential in biomedical applications. Among these, edible oils are recognised for their functional properties, which contribute to disease prevention and metabolic regulation. The proposed study aims to evaluate the quality of four bioactive oils (olive oil, sunflower oil, tomato seed oil, and pumpkin seed oil) by analysing their thermal behaviour through infrared (IR) imaging. The study designed a customised electronic system to acquire thermographic signals under controlled temperature and humidity conditions. The acquisition system was used to extract thermal data. Analysis of the acquired thermal signals revealed characteristic heat absorption profiles used to infer differences in oil properties related to stability and degradation potential. A hybrid deep learning model that integrates Convolutional Neural Networks (CNNs) with Long Short-Term Memory (LSTM) units was used to classify and differentiate the oils based on stability, thermal reactivity, and potential health benefits. A signal analysis showed that the AI-based method improves both the accuracy (achieving an F1-score of 93.66%) and the repeatability of quality assessments, providing a non-invasive and intelligent framework for the validation and traceability of nutritional compounds.</p>
	]]></content:encoded>

	<dc:title>Infrared Thermographic Signal Analysis of Bioactive Edible Oils Using CNNs for Quality Assessment</dc:title>
			<dc:creator>Danilo Pratticò</dc:creator>
			<dc:creator>Filippo Laganà</dc:creator>
		<dc:identifier>doi: 10.3390/signals6030038</dc:identifier>
	<dc:source>Signals</dc:source>
	<dc:date>2025-08-01</dc:date>

	<prism:publicationName>Signals</prism:publicationName>
	<prism:publicationDate>2025-08-01</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>38</prism:startingPage>
		<prism:doi>10.3390/signals6030038</prism:doi>
	<prism:url>https://www.mdpi.com/2624-6120/6/3/38</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-6120/6/3/37">

	<title>Signals, Vol. 6, Pages 37: Distributed Diffusion Multi-Distribution Filter with IMM for Heavy-Tailed Noise</title>
	<link>https://www.mdpi.com/2624-6120/6/3/37</link>
	<description>With the diversification of space applications, the tracking of maneuvering targets has gradually gained attention. Issues such as their wide range of movement and observation outliers caused by human operation are worthy of in-depth discussion. This paper presents a novel distributed diffusion multi-noise Interacting Multiple Model (IMM) filter for maneuvering target tracking in heavy-tailed noise. The proposed approach leverages parallel Gaussian and Student-t filters to enhance robustness against non-Gaussian process and measurement noise. This hybrid filter is implemented as a node within a distributed network, where the diffusion algorithm leads to the global state asymptotically reaching consensus as the filtering time progresses. Furthermore, a fusion of multiple motion models within the IMM algorithm enables robust tracking of maneuvering targets across the distributed network and process outlier caused by maneuver compared to previous studies. Simulation results demonstrate the effectiveness of the proposed filter in tracking maneuvering targets.</description>
	<pubDate>2025-08-01</pubDate>

	<content:encoded><![CDATA[
	<p><b>Signals, Vol. 6, Pages 37: Distributed Diffusion Multi-Distribution Filter with IMM for Heavy-Tailed Noise</b></p>
	<p>Signals <a href="https://www.mdpi.com/2624-6120/6/3/37">doi: 10.3390/signals6030037</a></p>
	<p>Authors:
		Guannan Chang
		Changwu Jiang
		Wenxing Fu
		Tao Cui
		Peng Dong
		</p>
	<p>With the diversification of space applications, the tracking of maneuvering targets has gradually gained attention. Issues such as their wide range of movement and observation outliers caused by human operation are worthy of in-depth discussion. This paper presents a novel distributed diffusion multi-noise Interacting Multiple Model (IMM) filter for maneuvering target tracking in heavy-tailed noise. The proposed approach leverages parallel Gaussian and Student-t filters to enhance robustness against non-Gaussian process and measurement noise. This hybrid filter is implemented as a node within a distributed network, where the diffusion algorithm leads to the global state asymptotically reaching consensus as the filtering time progresses. Furthermore, a fusion of multiple motion models within the IMM algorithm enables robust tracking of maneuvering targets across the distributed network and process outlier caused by maneuver compared to previous studies. Simulation results demonstrate the effectiveness of the proposed filter in tracking maneuvering targets.</p>
	]]></content:encoded>

	<dc:title>Distributed Diffusion Multi-Distribution Filter with IMM for Heavy-Tailed Noise</dc:title>
			<dc:creator>Guannan Chang</dc:creator>
			<dc:creator>Changwu Jiang</dc:creator>
			<dc:creator>Wenxing Fu</dc:creator>
			<dc:creator>Tao Cui</dc:creator>
			<dc:creator>Peng Dong</dc:creator>
		<dc:identifier>doi: 10.3390/signals6030037</dc:identifier>
	<dc:source>Signals</dc:source>
	<dc:date>2025-08-01</dc:date>

	<prism:publicationName>Signals</prism:publicationName>
	<prism:publicationDate>2025-08-01</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>37</prism:startingPage>
		<prism:doi>10.3390/signals6030037</prism:doi>
	<prism:url>https://www.mdpi.com/2624-6120/6/3/37</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-6120/6/3/36">

	<title>Signals, Vol. 6, Pages 36: Robust Peak Detection Techniques for Harmonic FMCW Radar Systems: Algorithmic Comparison and FPGA Feasibility Under Phase Noise</title>
	<link>https://www.mdpi.com/2624-6120/6/3/36</link>
	<description>Accurate peak detection in the frequency domain is fundamental to reliable range estimation in Frequency-Modulated Continuous-Wave (FMCW) radar systems, particularly in challenging conditions characterized by a low signal-to-noise ratio (SNR) and phase noise impairments. This paper presents a comprehensive comparative analysis of five peak detection algorithms: FFT thresholding, Cell-Averaging Constant False Alarm Rate (CA-CFAR), a simplified Matrix Pencil Method (MPM), SVD-based detection, and a novel Learned Thresholded Subspace Projection (LTSP) approach. The proposed LTSP method leverages singular value decomposition (SVD) to extract the dominant signal subspace, followed by signal reconstruction and spectral peak analysis, enabling robust detection in noisy and spectrally distorted environments. Each technique was analytically modeled and extensively evaluated through Monte Carlo simulations across a wide range of SNRs and oscillator phase noise levels, from &amp;amp;minus;100 dBc/Hz to &amp;amp;minus;70 dBc/Hz. Additionally, real-world validation was performed using a custom-built harmonic FMCW radar prototype operating in the 2.4&amp;amp;ndash;2.5 GHz transmission band and 4.8&amp;amp;ndash;5.0 GHz harmonic reception band. Results show that CA-CFAR offers the highest resilience to phase noise, while the proposed LTSP method delivers competitive detection performance with improved robustness over conventional FFT and MPM techniques. Furthermore, the hardware feasibility of each algorithm is assessed for implementation on a Xilinx FPGA platform, highlighting practical trade-offs between detection performance, computational complexity, and resource utilization. These findings provide valuable guidance for the design of real-time, embedded FMCW radar systems operating under adverse conditions.</description>
	<pubDate>2025-07-30</pubDate>

	<content:encoded><![CDATA[
	<p><b>Signals, Vol. 6, Pages 36: Robust Peak Detection Techniques for Harmonic FMCW Radar Systems: Algorithmic Comparison and FPGA Feasibility Under Phase Noise</b></p>
	<p>Signals <a href="https://www.mdpi.com/2624-6120/6/3/36">doi: 10.3390/signals6030036</a></p>
	<p>Authors:
		Ahmed El-Awamry
		Feng Zheng
		Thomas Kaiser
		Maher Khaliel
		</p>
	<p>Accurate peak detection in the frequency domain is fundamental to reliable range estimation in Frequency-Modulated Continuous-Wave (FMCW) radar systems, particularly in challenging conditions characterized by a low signal-to-noise ratio (SNR) and phase noise impairments. This paper presents a comprehensive comparative analysis of five peak detection algorithms: FFT thresholding, Cell-Averaging Constant False Alarm Rate (CA-CFAR), a simplified Matrix Pencil Method (MPM), SVD-based detection, and a novel Learned Thresholded Subspace Projection (LTSP) approach. The proposed LTSP method leverages singular value decomposition (SVD) to extract the dominant signal subspace, followed by signal reconstruction and spectral peak analysis, enabling robust detection in noisy and spectrally distorted environments. Each technique was analytically modeled and extensively evaluated through Monte Carlo simulations across a wide range of SNRs and oscillator phase noise levels, from &amp;amp;minus;100 dBc/Hz to &amp;amp;minus;70 dBc/Hz. Additionally, real-world validation was performed using a custom-built harmonic FMCW radar prototype operating in the 2.4&amp;amp;ndash;2.5 GHz transmission band and 4.8&amp;amp;ndash;5.0 GHz harmonic reception band. Results show that CA-CFAR offers the highest resilience to phase noise, while the proposed LTSP method delivers competitive detection performance with improved robustness over conventional FFT and MPM techniques. Furthermore, the hardware feasibility of each algorithm is assessed for implementation on a Xilinx FPGA platform, highlighting practical trade-offs between detection performance, computational complexity, and resource utilization. These findings provide valuable guidance for the design of real-time, embedded FMCW radar systems operating under adverse conditions.</p>
	]]></content:encoded>

	<dc:title>Robust Peak Detection Techniques for Harmonic FMCW Radar Systems: Algorithmic Comparison and FPGA Feasibility Under Phase Noise</dc:title>
			<dc:creator>Ahmed El-Awamry</dc:creator>
			<dc:creator>Feng Zheng</dc:creator>
			<dc:creator>Thomas Kaiser</dc:creator>
			<dc:creator>Maher Khaliel</dc:creator>
		<dc:identifier>doi: 10.3390/signals6030036</dc:identifier>
	<dc:source>Signals</dc:source>
	<dc:date>2025-07-30</dc:date>

	<prism:publicationName>Signals</prism:publicationName>
	<prism:publicationDate>2025-07-30</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>36</prism:startingPage>
		<prism:doi>10.3390/signals6030036</prism:doi>
	<prism:url>https://www.mdpi.com/2624-6120/6/3/36</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-6120/6/3/35">

	<title>Signals, Vol. 6, Pages 35: Micro-Doppler Signal Features of Idling Vehicle Vibrations: Dependence on Gear Engagements and Occupancy</title>
	<link>https://www.mdpi.com/2624-6120/6/3/35</link>
	<description>This study investigates the use of a custom-built 10 GHz continuous wave micro-Doppler radar system to analyze external vibrations of idling vehicles under various conditions. Scenarios included different gear engagements with one occupant and parked gear with up to four occupants. Motivated by security concerns, such as the threat posed by idling vehicles with multiple occupants, the research explores how micro-Doppler signatures can indicate vehicle readiness to move. Experiments focused on a mid-size SUV, with similar trends seen in other vehicles. Radar data were compared to in situ accelerometer measurements, confirming that the radar system can detect subtle frequency changes, especially during gear shifts. The system&amp;amp;rsquo;s sensitivity enables it to distinguish variations tied to gear state and passenger load. Extracted features like frequency and magnitude show strong potential for use in machine learning models, offering a non-invasive, remote sensing method for reliably identifying vehicle operational states and occupancy levels in security or monitoring contexts. Spectrogram and PSD analyses reveal consistent tonal vibrations around 30 Hz, tied to engine activity, with harmonics at 60 Hz and 90 Hz. Gear shifts produce impulse signatures primarily below 20 Hz, and transient data show distinct peaks at 50, 80, and 100 Hz. Key features at 23 Hz and 45 Hz effectively indicate engine and gear states. Radar and accelerometer data align well, supporting the potential for remote sensing and machine learning-based classification.</description>
	<pubDate>2025-07-24</pubDate>

	<content:encoded><![CDATA[
	<p><b>Signals, Vol. 6, Pages 35: Micro-Doppler Signal Features of Idling Vehicle Vibrations: Dependence on Gear Engagements and Occupancy</b></p>
	<p>Signals <a href="https://www.mdpi.com/2624-6120/6/3/35">doi: 10.3390/signals6030035</a></p>
	<p>Authors:
		Ram M. Narayanan
		Benjamin D. Simone
		Daniel K. Watson
		Karl M. Reichard
		Kyle A. Gallagher
		</p>
	<p>This study investigates the use of a custom-built 10 GHz continuous wave micro-Doppler radar system to analyze external vibrations of idling vehicles under various conditions. Scenarios included different gear engagements with one occupant and parked gear with up to four occupants. Motivated by security concerns, such as the threat posed by idling vehicles with multiple occupants, the research explores how micro-Doppler signatures can indicate vehicle readiness to move. Experiments focused on a mid-size SUV, with similar trends seen in other vehicles. Radar data were compared to in situ accelerometer measurements, confirming that the radar system can detect subtle frequency changes, especially during gear shifts. The system&amp;amp;rsquo;s sensitivity enables it to distinguish variations tied to gear state and passenger load. Extracted features like frequency and magnitude show strong potential for use in machine learning models, offering a non-invasive, remote sensing method for reliably identifying vehicle operational states and occupancy levels in security or monitoring contexts. Spectrogram and PSD analyses reveal consistent tonal vibrations around 30 Hz, tied to engine activity, with harmonics at 60 Hz and 90 Hz. Gear shifts produce impulse signatures primarily below 20 Hz, and transient data show distinct peaks at 50, 80, and 100 Hz. Key features at 23 Hz and 45 Hz effectively indicate engine and gear states. Radar and accelerometer data align well, supporting the potential for remote sensing and machine learning-based classification.</p>
	]]></content:encoded>

	<dc:title>Micro-Doppler Signal Features of Idling Vehicle Vibrations: Dependence on Gear Engagements and Occupancy</dc:title>
			<dc:creator>Ram M. Narayanan</dc:creator>
			<dc:creator>Benjamin D. Simone</dc:creator>
			<dc:creator>Daniel K. Watson</dc:creator>
			<dc:creator>Karl M. Reichard</dc:creator>
			<dc:creator>Kyle A. Gallagher</dc:creator>
		<dc:identifier>doi: 10.3390/signals6030035</dc:identifier>
	<dc:source>Signals</dc:source>
	<dc:date>2025-07-24</dc:date>

	<prism:publicationName>Signals</prism:publicationName>
	<prism:publicationDate>2025-07-24</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>35</prism:startingPage>
		<prism:doi>10.3390/signals6030035</prism:doi>
	<prism:url>https://www.mdpi.com/2624-6120/6/3/35</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-6120/6/3/34">

	<title>Signals, Vol. 6, Pages 34: Beyond Correlation: Mutual Information to Detect Damage in Nonlinear Systems</title>
	<link>https://www.mdpi.com/2624-6120/6/3/34</link>
	<description>Analyzing and measuring the similarity between two signals is a common task in many vibration-based structural health monitoring applications. Coherence between input and response signals serves as a convenient indicator of damage, based on the premise that nonlinearity due to damage in a linear system manifests as a loss of coherence in specific frequency bands. Because input excitations in civil structures are difficult to measure, damage indicators based on the coherence between two response signals have been developed. These indicators have shown promise in detecting nonlinear behavior in structures that were initially linear. This paper proposes a new damage indicator based on Mutual Information, a nonlinear extension of the squared correlation coefficient, to quantify the similarity between two signals without making assumptions about the nature of their interactions or the underlying dynamics of the system. Mutual Information is distinguished from other nonlinear similarity metrics due to its ability to capture all types of nonlinear dependencies, its high computational efficiency, and its invariance to invertible transformations, such as scaling. The proposed approach is demonstrated using a standard dataset containing experimental data from a three-story aluminum frame structure under 17 different damage states. The results show that the proposed metric can detect deviations from the baseline state due to changes in mass, stiffness, or newly induced nonlinear behavior, suggesting its potential for monitoring changes in the structural system.</description>
	<pubDate>2025-07-21</pubDate>

	<content:encoded><![CDATA[
	<p><b>Signals, Vol. 6, Pages 34: Beyond Correlation: Mutual Information to Detect Damage in Nonlinear Systems</b></p>
	<p>Signals <a href="https://www.mdpi.com/2624-6120/6/3/34">doi: 10.3390/signals6030034</a></p>
	<p>Authors:
		Jale Tezcan
		Claudia Marin-Artieda
		</p>
	<p>Analyzing and measuring the similarity between two signals is a common task in many vibration-based structural health monitoring applications. Coherence between input and response signals serves as a convenient indicator of damage, based on the premise that nonlinearity due to damage in a linear system manifests as a loss of coherence in specific frequency bands. Because input excitations in civil structures are difficult to measure, damage indicators based on the coherence between two response signals have been developed. These indicators have shown promise in detecting nonlinear behavior in structures that were initially linear. This paper proposes a new damage indicator based on Mutual Information, a nonlinear extension of the squared correlation coefficient, to quantify the similarity between two signals without making assumptions about the nature of their interactions or the underlying dynamics of the system. Mutual Information is distinguished from other nonlinear similarity metrics due to its ability to capture all types of nonlinear dependencies, its high computational efficiency, and its invariance to invertible transformations, such as scaling. The proposed approach is demonstrated using a standard dataset containing experimental data from a three-story aluminum frame structure under 17 different damage states. The results show that the proposed metric can detect deviations from the baseline state due to changes in mass, stiffness, or newly induced nonlinear behavior, suggesting its potential for monitoring changes in the structural system.</p>
	]]></content:encoded>

	<dc:title>Beyond Correlation: Mutual Information to Detect Damage in Nonlinear Systems</dc:title>
			<dc:creator>Jale Tezcan</dc:creator>
			<dc:creator>Claudia Marin-Artieda</dc:creator>
		<dc:identifier>doi: 10.3390/signals6030034</dc:identifier>
	<dc:source>Signals</dc:source>
	<dc:date>2025-07-21</dc:date>

	<prism:publicationName>Signals</prism:publicationName>
	<prism:publicationDate>2025-07-21</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>34</prism:startingPage>
		<prism:doi>10.3390/signals6030034</prism:doi>
	<prism:url>https://www.mdpi.com/2624-6120/6/3/34</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-6120/6/3/33">

	<title>Signals, Vol. 6, Pages 33: Performance of G3-PLC Channel in the Presence of Spread Spectrum Modulated Electromagnetic Interference</title>
	<link>https://www.mdpi.com/2624-6120/6/3/33</link>
	<description>Power converters in the smart grid systems are essential to link renewable energy sources with all grid appliances and equipment. However, this raises the possibility of electromagnetic interference (EMI) between the smart grid elements. Hence, spread spectrum (SS) modulation techniques have been used to mitigate the EMI peaks generated from the power converters. Consequently, the performance of the nearby communication systems is affected under the presence of EMI, which is not covered in many situations. In this paper, the behavior of the G3 Power Line Communication (PLC) channel is evaluated in terms of the Shannon&amp;amp;ndash;Hartley equation in the presence of SS-modulated EMI from a buck converter. The SS-modulation technique used is the Random Carrier Frequency Modulation with Constant Duty cycle (RCFMFD). Moreover, The analysis is validated by experimental results obtained with a test setup reproducing the parasitic coupling between the PLC system and the power converter.</description>
	<pubDate>2025-07-17</pubDate>

	<content:encoded><![CDATA[
	<p><b>Signals, Vol. 6, Pages 33: Performance of G3-PLC Channel in the Presence of Spread Spectrum Modulated Electromagnetic Interference</b></p>
	<p>Signals <a href="https://www.mdpi.com/2624-6120/6/3/33">doi: 10.3390/signals6030033</a></p>
	<p>Authors:
		Waseem ElSayed
		Amr Madi
		Piotr Lezynski
		Robert Smolenski
		Paolo Crovetti
		</p>
	<p>Power converters in the smart grid systems are essential to link renewable energy sources with all grid appliances and equipment. However, this raises the possibility of electromagnetic interference (EMI) between the smart grid elements. Hence, spread spectrum (SS) modulation techniques have been used to mitigate the EMI peaks generated from the power converters. Consequently, the performance of the nearby communication systems is affected under the presence of EMI, which is not covered in many situations. In this paper, the behavior of the G3 Power Line Communication (PLC) channel is evaluated in terms of the Shannon&amp;amp;ndash;Hartley equation in the presence of SS-modulated EMI from a buck converter. The SS-modulation technique used is the Random Carrier Frequency Modulation with Constant Duty cycle (RCFMFD). Moreover, The analysis is validated by experimental results obtained with a test setup reproducing the parasitic coupling between the PLC system and the power converter.</p>
	]]></content:encoded>

	<dc:title>Performance of G3-PLC Channel in the Presence of Spread Spectrum Modulated Electromagnetic Interference</dc:title>
			<dc:creator>Waseem ElSayed</dc:creator>
			<dc:creator>Amr Madi</dc:creator>
			<dc:creator>Piotr Lezynski</dc:creator>
			<dc:creator>Robert Smolenski</dc:creator>
			<dc:creator>Paolo Crovetti</dc:creator>
		<dc:identifier>doi: 10.3390/signals6030033</dc:identifier>
	<dc:source>Signals</dc:source>
	<dc:date>2025-07-17</dc:date>

	<prism:publicationName>Signals</prism:publicationName>
	<prism:publicationDate>2025-07-17</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>33</prism:startingPage>
		<prism:doi>10.3390/signals6030033</prism:doi>
	<prism:url>https://www.mdpi.com/2624-6120/6/3/33</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-6120/6/3/32">

	<title>Signals, Vol. 6, Pages 32: Power&amp;ndash;Cadence Relationships in Cycling: Building Models from a Limited Number of Data Points</title>
	<link>https://www.mdpi.com/2624-6120/6/3/32</link>
	<description>Accurate modeling of the power&amp;amp;ndash;cadence relationship is essential for assessing maximal anaerobic power (Pmax) of the lower limbs. Experimental data points from Force&amp;amp;ndash;Velocity tests during cycling do not always reflect the maximal and cadence-specific power individuals can produce. The quality of the models and the accuracy of Pmax estimation is potentially compromised by the inclusion of non-maximal data points. This study evaluated a novel residual-based filtering method that selects five strategically located, maximal data points to improve model fit and Pmax prediction. Twenty-three recreationally active male participants (age: 26 &amp;amp;plusmn; 5 years; height: 178 &amp;amp;plusmn; 5 cm; body mass: 73 &amp;amp;plusmn; 11 kg) completed a Force&amp;amp;ndash;Velocity test consisting of multiple maximal cycling efforts on a stationary ergometer. Power and cadence data were used to generate third-order polynomial models: from all data points (High Number, HN), from the highest power value in each 5-RPM interval (Moderate Number, MN), and from five selected data points (Low Number, LN). The LN model yielded the best goodness of fit (R2 = 0.995 &amp;amp;plusmn; 0.008; SEE = 29 &amp;amp;plusmn; 15 W), the most accurate estimates of experimentally measured peak power (mean absolute percentage error = 1.45%), and the highest Pmax values (1220 &amp;amp;plusmn; 168 W). Selecting a limited number of maximal data points improves the modeling of individual power&amp;amp;ndash;cadence relationships and Pmax assessment.</description>
	<pubDate>2025-07-10</pubDate>

	<content:encoded><![CDATA[
	<p><b>Signals, Vol. 6, Pages 32: Power&amp;ndash;Cadence Relationships in Cycling: Building Models from a Limited Number of Data Points</b></p>
	<p>Signals <a href="https://www.mdpi.com/2624-6120/6/3/32">doi: 10.3390/signals6030032</a></p>
	<p>Authors:
		David M. Rouffet
		Briar L. Rudsits
		Michael W. Daniels
		Temi Ariyo
		Christophe A. Hautier
		</p>
	<p>Accurate modeling of the power&amp;amp;ndash;cadence relationship is essential for assessing maximal anaerobic power (Pmax) of the lower limbs. Experimental data points from Force&amp;amp;ndash;Velocity tests during cycling do not always reflect the maximal and cadence-specific power individuals can produce. The quality of the models and the accuracy of Pmax estimation is potentially compromised by the inclusion of non-maximal data points. This study evaluated a novel residual-based filtering method that selects five strategically located, maximal data points to improve model fit and Pmax prediction. Twenty-three recreationally active male participants (age: 26 &amp;amp;plusmn; 5 years; height: 178 &amp;amp;plusmn; 5 cm; body mass: 73 &amp;amp;plusmn; 11 kg) completed a Force&amp;amp;ndash;Velocity test consisting of multiple maximal cycling efforts on a stationary ergometer. Power and cadence data were used to generate third-order polynomial models: from all data points (High Number, HN), from the highest power value in each 5-RPM interval (Moderate Number, MN), and from five selected data points (Low Number, LN). The LN model yielded the best goodness of fit (R2 = 0.995 &amp;amp;plusmn; 0.008; SEE = 29 &amp;amp;plusmn; 15 W), the most accurate estimates of experimentally measured peak power (mean absolute percentage error = 1.45%), and the highest Pmax values (1220 &amp;amp;plusmn; 168 W). Selecting a limited number of maximal data points improves the modeling of individual power&amp;amp;ndash;cadence relationships and Pmax assessment.</p>
	]]></content:encoded>

	<dc:title>Power&amp;amp;ndash;Cadence Relationships in Cycling: Building Models from a Limited Number of Data Points</dc:title>
			<dc:creator>David M. Rouffet</dc:creator>
			<dc:creator>Briar L. Rudsits</dc:creator>
			<dc:creator>Michael W. Daniels</dc:creator>
			<dc:creator>Temi Ariyo</dc:creator>
			<dc:creator>Christophe A. Hautier</dc:creator>
		<dc:identifier>doi: 10.3390/signals6030032</dc:identifier>
	<dc:source>Signals</dc:source>
	<dc:date>2025-07-10</dc:date>

	<prism:publicationName>Signals</prism:publicationName>
	<prism:publicationDate>2025-07-10</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>32</prism:startingPage>
		<prism:doi>10.3390/signals6030032</prism:doi>
	<prism:url>https://www.mdpi.com/2624-6120/6/3/32</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-6120/6/3/31">

	<title>Signals, Vol. 6, Pages 31: A Framework for Bluetooth-Based Real-Time Audio Data Acquisition in Mobile Robotics</title>
	<link>https://www.mdpi.com/2624-6120/6/3/31</link>
	<description>This paper presents a novel framework addressing the fundamental challenge of concurrent real-time audio acquisition and motor control in resource-constrained mobile robotics. The ESP32-based system integrates a digital MEMS microphone with rover mobility through a unified Bluetooth protocol. Key innovations include (1) a dual-thread architecture enabling non-blocking concurrent operation, (2) an adaptive eight-bit compression algorithm optimizing bandwidth while preserving audio quality, and (3) a mathematical model for real-time resource allocation. A comprehensive empirical evaluation demonstrates consistent control latency below 150 ms with 90&amp;amp;ndash;95% audio packet delivery rates across varied environments. The framework enables mobile acoustic sensing applications while maintaining responsive motor control, validated through comprehensive testing in 40&amp;amp;ndash;85 dB acoustic environments at distances up to 10 m. A performance analysis demonstrates the feasibility of high-fidelity mobile acoustic sensing on embedded platforms, opening new possibilities for environmental monitoring, surveillance, and autonomous acoustic exploration systems.</description>
	<pubDate>2025-07-02</pubDate>

	<content:encoded><![CDATA[
	<p><b>Signals, Vol. 6, Pages 31: A Framework for Bluetooth-Based Real-Time Audio Data Acquisition in Mobile Robotics</b></p>
	<p>Signals <a href="https://www.mdpi.com/2624-6120/6/3/31">doi: 10.3390/signals6030031</a></p>
	<p>Authors:
		Sandeep Gupta
		Udit Mamodiya
		A. K. M. Zakir Hossain
		Ahmed J. A. Al-Gburi
		</p>
	<p>This paper presents a novel framework addressing the fundamental challenge of concurrent real-time audio acquisition and motor control in resource-constrained mobile robotics. The ESP32-based system integrates a digital MEMS microphone with rover mobility through a unified Bluetooth protocol. Key innovations include (1) a dual-thread architecture enabling non-blocking concurrent operation, (2) an adaptive eight-bit compression algorithm optimizing bandwidth while preserving audio quality, and (3) a mathematical model for real-time resource allocation. A comprehensive empirical evaluation demonstrates consistent control latency below 150 ms with 90&amp;amp;ndash;95% audio packet delivery rates across varied environments. The framework enables mobile acoustic sensing applications while maintaining responsive motor control, validated through comprehensive testing in 40&amp;amp;ndash;85 dB acoustic environments at distances up to 10 m. A performance analysis demonstrates the feasibility of high-fidelity mobile acoustic sensing on embedded platforms, opening new possibilities for environmental monitoring, surveillance, and autonomous acoustic exploration systems.</p>
	]]></content:encoded>

	<dc:title>A Framework for Bluetooth-Based Real-Time Audio Data Acquisition in Mobile Robotics</dc:title>
			<dc:creator>Sandeep Gupta</dc:creator>
			<dc:creator>Udit Mamodiya</dc:creator>
			<dc:creator>A. K. M. Zakir Hossain</dc:creator>
			<dc:creator>Ahmed J. A. Al-Gburi</dc:creator>
		<dc:identifier>doi: 10.3390/signals6030031</dc:identifier>
	<dc:source>Signals</dc:source>
	<dc:date>2025-07-02</dc:date>

	<prism:publicationName>Signals</prism:publicationName>
	<prism:publicationDate>2025-07-02</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>31</prism:startingPage>
		<prism:doi>10.3390/signals6030031</prism:doi>
	<prism:url>https://www.mdpi.com/2624-6120/6/3/31</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-6120/6/3/30">

	<title>Signals, Vol. 6, Pages 30: Multi-Instance Multi-Scale Graph Attention Neural Net with Label Semantic Embeddings for Instrument Recognition</title>
	<link>https://www.mdpi.com/2624-6120/6/3/30</link>
	<description>Instrument recognition is a crucial aspect of music information retrieval, and in recent years, machine learning-based methods have become the primary approach to addressing this challenge. However, existing models often struggle to accurately identify multiple instruments within music tracks that vary in length and quality. One key issue is that the instruments of interest may not appear in every clip of the audio sample, and when they do, they are often unevenly distributed across different sections of the track. Additionally, in polyphonic music, multiple instruments are often played simultaneously, leading to signal overlap. Using the same overlapping audio signals as partial classification features for different instruments will reduce the distinguishability of features between instruments, thereby affecting the performance of instrument recognition. These complexities present significant challenges for current instrument recognition models. Therefore, this paper proposes a multi-instance multi-scale graph attention neural network (MMGAT) with label semantic embeddings for instrument recognition. MMGAT designs an instance correlation graph to model the presence and quantitative timbre similarity of instruments at different positions from the perspective of multi-instance learning. Then, to enhance the distinguishability of signals after the overlap of different instruments and improve classification accuracy, MMGAT learns semantic information from the labels of different instruments as embeddings and incorporates them into the overlapping audio signal features, thereby enhancing the differentiability of audio features for various instruments. MMGAT then designs an instance-based multi-instance multi-scale graph attention neural network to recognize different instruments based on the instance correlation graphs and label semantic embeddings. The effectiveness of MMGAT is validated through experiments and compared to commonly used instrument recognition models. The experimental results demonstrate that MMGAT outperforms existing approaches in instrument recognition tasks.</description>
	<pubDate>2025-06-24</pubDate>

	<content:encoded><![CDATA[
	<p><b>Signals, Vol. 6, Pages 30: Multi-Instance Multi-Scale Graph Attention Neural Net with Label Semantic Embeddings for Instrument Recognition</b></p>
	<p>Signals <a href="https://www.mdpi.com/2624-6120/6/3/30">doi: 10.3390/signals6030030</a></p>
	<p>Authors:
		Na Bai
		Zhaoli Wu
		Jian Zhang
		</p>
	<p>Instrument recognition is a crucial aspect of music information retrieval, and in recent years, machine learning-based methods have become the primary approach to addressing this challenge. However, existing models often struggle to accurately identify multiple instruments within music tracks that vary in length and quality. One key issue is that the instruments of interest may not appear in every clip of the audio sample, and when they do, they are often unevenly distributed across different sections of the track. Additionally, in polyphonic music, multiple instruments are often played simultaneously, leading to signal overlap. Using the same overlapping audio signals as partial classification features for different instruments will reduce the distinguishability of features between instruments, thereby affecting the performance of instrument recognition. These complexities present significant challenges for current instrument recognition models. Therefore, this paper proposes a multi-instance multi-scale graph attention neural network (MMGAT) with label semantic embeddings for instrument recognition. MMGAT designs an instance correlation graph to model the presence and quantitative timbre similarity of instruments at different positions from the perspective of multi-instance learning. Then, to enhance the distinguishability of signals after the overlap of different instruments and improve classification accuracy, MMGAT learns semantic information from the labels of different instruments as embeddings and incorporates them into the overlapping audio signal features, thereby enhancing the differentiability of audio features for various instruments. MMGAT then designs an instance-based multi-instance multi-scale graph attention neural network to recognize different instruments based on the instance correlation graphs and label semantic embeddings. The effectiveness of MMGAT is validated through experiments and compared to commonly used instrument recognition models. The experimental results demonstrate that MMGAT outperforms existing approaches in instrument recognition tasks.</p>
	]]></content:encoded>

	<dc:title>Multi-Instance Multi-Scale Graph Attention Neural Net with Label Semantic Embeddings for Instrument Recognition</dc:title>
			<dc:creator>Na Bai</dc:creator>
			<dc:creator>Zhaoli Wu</dc:creator>
			<dc:creator>Jian Zhang</dc:creator>
		<dc:identifier>doi: 10.3390/signals6030030</dc:identifier>
	<dc:source>Signals</dc:source>
	<dc:date>2025-06-24</dc:date>

	<prism:publicationName>Signals</prism:publicationName>
	<prism:publicationDate>2025-06-24</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>30</prism:startingPage>
		<prism:doi>10.3390/signals6030030</prism:doi>
	<prism:url>https://www.mdpi.com/2624-6120/6/3/30</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-6120/6/2/29">

	<title>Signals, Vol. 6, Pages 29: Comparative Analysis of Attention Mechanisms in Densely Connected Network for Network Traffic Prediction</title>
	<link>https://www.mdpi.com/2624-6120/6/2/29</link>
	<description>Recently, STDenseNet (SpatioTemporal Densely connected convolutional Network) showed remarkable performance in predicting network traffic by leveraging the inductive bias of convolution layers. However, it is known that such convolution layers can only barely capture long-term spatial and temporal dependencies. To solve this problem, we propose Attention-DenseNet (ADNet), which effectively incorporates an attention module into STDenseNet to learn representations for long-term spatio-temporal patterns. Specifically, we explored the optimal positions and the types of attention modules in combination with STDenseNet. Our key findings are as follows: i) attention modules are very effective when positioned between the last dense module and the final feature fusion module, meaning that the attention module plays a key role in aggregating low-level local features with long-term dependency. Hence, the final feature fusion module can easily exploit both global and local information; ii) the best attention module is different depending on the spatio-temporal characteristics of the dataset. To verify the effectiveness of the proposed ADNet, we performed experiments on the Telecom Italia dataset, a well-known benchmark dataset for network traffic prediction. The experimental results show that, compared to STDenseNet, our ADNet improved RMSE performance by 3.72%, 2.84%, and 5.87% in call service (Call), short message service (SMS), and Internet access (Internet) sub-datasets, respectively.</description>
	<pubDate>2025-06-19</pubDate>

	<content:encoded><![CDATA[
	<p><b>Signals, Vol. 6, Pages 29: Comparative Analysis of Attention Mechanisms in Densely Connected Network for Network Traffic Prediction</b></p>
	<p>Signals <a href="https://www.mdpi.com/2624-6120/6/2/29">doi: 10.3390/signals6020029</a></p>
	<p>Authors:
		Myeongjun Oh
		Sung Oh
		Jongkyung Im
		Myungho Kim
		Joung-Sik Kim
		Ji-Yeon Park
		Na-Rae Yi
		Sung-Ho Bae
		</p>
	<p>Recently, STDenseNet (SpatioTemporal Densely connected convolutional Network) showed remarkable performance in predicting network traffic by leveraging the inductive bias of convolution layers. However, it is known that such convolution layers can only barely capture long-term spatial and temporal dependencies. To solve this problem, we propose Attention-DenseNet (ADNet), which effectively incorporates an attention module into STDenseNet to learn representations for long-term spatio-temporal patterns. Specifically, we explored the optimal positions and the types of attention modules in combination with STDenseNet. Our key findings are as follows: i) attention modules are very effective when positioned between the last dense module and the final feature fusion module, meaning that the attention module plays a key role in aggregating low-level local features with long-term dependency. Hence, the final feature fusion module can easily exploit both global and local information; ii) the best attention module is different depending on the spatio-temporal characteristics of the dataset. To verify the effectiveness of the proposed ADNet, we performed experiments on the Telecom Italia dataset, a well-known benchmark dataset for network traffic prediction. The experimental results show that, compared to STDenseNet, our ADNet improved RMSE performance by 3.72%, 2.84%, and 5.87% in call service (Call), short message service (SMS), and Internet access (Internet) sub-datasets, respectively.</p>
	]]></content:encoded>

	<dc:title>Comparative Analysis of Attention Mechanisms in Densely Connected Network for Network Traffic Prediction</dc:title>
			<dc:creator>Myeongjun Oh</dc:creator>
			<dc:creator>Sung Oh</dc:creator>
			<dc:creator>Jongkyung Im</dc:creator>
			<dc:creator>Myungho Kim</dc:creator>
			<dc:creator>Joung-Sik Kim</dc:creator>
			<dc:creator>Ji-Yeon Park</dc:creator>
			<dc:creator>Na-Rae Yi</dc:creator>
			<dc:creator>Sung-Ho Bae</dc:creator>
		<dc:identifier>doi: 10.3390/signals6020029</dc:identifier>
	<dc:source>Signals</dc:source>
	<dc:date>2025-06-19</dc:date>

	<prism:publicationName>Signals</prism:publicationName>
	<prism:publicationDate>2025-06-19</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>29</prism:startingPage>
		<prism:doi>10.3390/signals6020029</prism:doi>
	<prism:url>https://www.mdpi.com/2624-6120/6/2/29</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-6120/6/2/28">

	<title>Signals, Vol. 6, Pages 28: Vector Signals and Invariant Systems: Re-Tooling Linear Systems Theory</title>
	<link>https://www.mdpi.com/2624-6120/6/2/28</link>
	<description>In a previous work, we identified the importance of rotation invariance in the standard complex-valued theory of linear time-invariant (LTI) systems and proposed a generalized vector-valued (VV) definition of convolution that reinterprets the complex-valued product of the traditional formalism as a scale rotation within the framework of geometric algebra. Based on this convolution definition, we characterized linear rotation-invariant time-invariant (LRITI) systems by defining and using a VV impulse response&amp;amp;mdash;effectively generalizing time-domain analysis for VV signals and LRITI systems. In this work, we provide a compatible frequency-domain analysis for VV signals and LRITI systems. First, VV bivector exponential signals are shown to be eigenfunctions of LRITI systems. A Fourier transform is defined, and we propose two generalized definitions of frequency response: the first valid for bivector exponentials in an arbitrary plane and the second valid for a general signal decomposed into a set of totally orthogonal planes (TOPs). Finally, we establish a convolution property for the Fourier transform with respect to TOPs. Together, these results provide compatible time-domain and frequency-domain analyses, thereby enabling a more comprehensive analysis of VV signals and LRITI systems.</description>
	<pubDate>2025-06-18</pubDate>

	<content:encoded><![CDATA[
	<p><b>Signals, Vol. 6, Pages 28: Vector Signals and Invariant Systems: Re-Tooling Linear Systems Theory</b></p>
	<p>Signals <a href="https://www.mdpi.com/2624-6120/6/2/28">doi: 10.3390/signals6020028</a></p>
	<p>Authors:
		Mamta Dalal
		Steven Sandoval
		</p>
	<p>In a previous work, we identified the importance of rotation invariance in the standard complex-valued theory of linear time-invariant (LTI) systems and proposed a generalized vector-valued (VV) definition of convolution that reinterprets the complex-valued product of the traditional formalism as a scale rotation within the framework of geometric algebra. Based on this convolution definition, we characterized linear rotation-invariant time-invariant (LRITI) systems by defining and using a VV impulse response&amp;amp;mdash;effectively generalizing time-domain analysis for VV signals and LRITI systems. In this work, we provide a compatible frequency-domain analysis for VV signals and LRITI systems. First, VV bivector exponential signals are shown to be eigenfunctions of LRITI systems. A Fourier transform is defined, and we propose two generalized definitions of frequency response: the first valid for bivector exponentials in an arbitrary plane and the second valid for a general signal decomposed into a set of totally orthogonal planes (TOPs). Finally, we establish a convolution property for the Fourier transform with respect to TOPs. Together, these results provide compatible time-domain and frequency-domain analyses, thereby enabling a more comprehensive analysis of VV signals and LRITI systems.</p>
	]]></content:encoded>

	<dc:title>Vector Signals and Invariant Systems: Re-Tooling Linear Systems Theory</dc:title>
			<dc:creator>Mamta Dalal</dc:creator>
			<dc:creator>Steven Sandoval</dc:creator>
		<dc:identifier>doi: 10.3390/signals6020028</dc:identifier>
	<dc:source>Signals</dc:source>
	<dc:date>2025-06-18</dc:date>

	<prism:publicationName>Signals</prism:publicationName>
	<prism:publicationDate>2025-06-18</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>28</prism:startingPage>
		<prism:doi>10.3390/signals6020028</prism:doi>
	<prism:url>https://www.mdpi.com/2624-6120/6/2/28</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-6120/6/2/27">

	<title>Signals, Vol. 6, Pages 27: Noise Impact Analysis of School Environments Based on the Deployment of IoT Sensor Nodes</title>
	<link>https://www.mdpi.com/2624-6120/6/2/27</link>
	<description>This work presents an on-field noise analysis during the class breaks in Greek school units (a high school and a senior high school) based on the design and deployment of low-cost IoT sensor nodes and IoT platforms. The course breaks form 20% of a regular school day, during which intense mobility and high noise levels usually evolve. Indoor noise levels, along with environmental conditions, have been measured through a wireless network that comprises IoT nodes that integrate humidity, temperature, and acoustic level sensors. PM10 and PM2.5 values have also been acquired through data sensors located nearby the school complex. School buildings that have been recently renovated for minimizing their energy footprint and CO2 emissions have been selected in comparison with similar works in academia. The data are collected, shipped, and stored into a time-series database in cloud facilities where an IoT platform has been developed for processing and analysis purposes. The findings show that low-cost sensors can efficiently monitor noise levels after proper adjustments. Additionally, the statistical evaluation of the received sensor measurements has indicated that ubiquitous high noise levels during the course breaks potentially affect teachers&amp;amp;rsquo; leisure time, despite the thermal isolation of the facilities. Within this context, we prove that the proposed IoT Sensor Network could form a tool to essentially monitor school infrastructures and thus to prompt for improvements regarding the building facilities. Several guides to further mitigate noise and achieve high-quality levels in learning institutes are also described.</description>
	<pubDate>2025-06-03</pubDate>

	<content:encoded><![CDATA[
	<p><b>Signals, Vol. 6, Pages 27: Noise Impact Analysis of School Environments Based on the Deployment of IoT Sensor Nodes</b></p>
	<p>Signals <a href="https://www.mdpi.com/2624-6120/6/2/27">doi: 10.3390/signals6020027</a></p>
	<p>Authors:
		Georgios Dimitriou
		Fotios Gioulekas
		</p>
	<p>This work presents an on-field noise analysis during the class breaks in Greek school units (a high school and a senior high school) based on the design and deployment of low-cost IoT sensor nodes and IoT platforms. The course breaks form 20% of a regular school day, during which intense mobility and high noise levels usually evolve. Indoor noise levels, along with environmental conditions, have been measured through a wireless network that comprises IoT nodes that integrate humidity, temperature, and acoustic level sensors. PM10 and PM2.5 values have also been acquired through data sensors located nearby the school complex. School buildings that have been recently renovated for minimizing their energy footprint and CO2 emissions have been selected in comparison with similar works in academia. The data are collected, shipped, and stored into a time-series database in cloud facilities where an IoT platform has been developed for processing and analysis purposes. The findings show that low-cost sensors can efficiently monitor noise levels after proper adjustments. Additionally, the statistical evaluation of the received sensor measurements has indicated that ubiquitous high noise levels during the course breaks potentially affect teachers&amp;amp;rsquo; leisure time, despite the thermal isolation of the facilities. Within this context, we prove that the proposed IoT Sensor Network could form a tool to essentially monitor school infrastructures and thus to prompt for improvements regarding the building facilities. Several guides to further mitigate noise and achieve high-quality levels in learning institutes are also described.</p>
	]]></content:encoded>

	<dc:title>Noise Impact Analysis of School Environments Based on the Deployment of IoT Sensor Nodes</dc:title>
			<dc:creator>Georgios Dimitriou</dc:creator>
			<dc:creator>Fotios Gioulekas</dc:creator>
		<dc:identifier>doi: 10.3390/signals6020027</dc:identifier>
	<dc:source>Signals</dc:source>
	<dc:date>2025-06-03</dc:date>

	<prism:publicationName>Signals</prism:publicationName>
	<prism:publicationDate>2025-06-03</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>27</prism:startingPage>
		<prism:doi>10.3390/signals6020027</prism:doi>
	<prism:url>https://www.mdpi.com/2624-6120/6/2/27</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-6120/6/2/26">

	<title>Signals, Vol. 6, Pages 26: Self Attention-Driven ECG Denoising: A Transformer-Based Approach for Robust Cardiac Signal Enhancement</title>
	<link>https://www.mdpi.com/2624-6120/6/2/26</link>
	<description>The analysis of electrocardiogram (ECG) signals is profoundly affected by the presence of electromyographic (EMG) noise, which can lead to substantial misinterpretations in healthcare applications. To address this challenge, we present ECGDnet, an innovative architecture based on Transformer technology, specifically engineered to denoise multi-channel ECG signals. By leveraging multi-head self-attention mechanisms, positional embeddings, and an advanced sequence-to-sequence processing architecture, ECGDnet effectively captures both local and global temporal dependencies inherent in cardiac signals. Experimental validation on real-world datasets demonstrates ECGDnet&amp;amp;rsquo;s remarkable efficacy in noise suppression, achieving a Signal-to-Noise Ratio (SNR) of 19.83, a Normalized Mean Squared Error (NMSE) of 0.9842, a Reconstruction Error (RE) of 0.0158, and a Pearson Correlation Coefficient (PCC) of 0.9924. These results represent significant improvements from traditional deep learning approaches while maintaining complex signal morphology and effectively mitigating noise interference.</description>
	<pubDate>2025-06-03</pubDate>

	<content:encoded><![CDATA[
	<p><b>Signals, Vol. 6, Pages 26: Self Attention-Driven ECG Denoising: A Transformer-Based Approach for Robust Cardiac Signal Enhancement</b></p>
	<p>Signals <a href="https://www.mdpi.com/2624-6120/6/2/26">doi: 10.3390/signals6020026</a></p>
	<p>Authors:
		Aymane Edder
		Fatima-Ezzahraa Ben-Bouazza
		Idriss Tafala
		Oumaima Manchadi
		Bassma Jioudi
		</p>
	<p>The analysis of electrocardiogram (ECG) signals is profoundly affected by the presence of electromyographic (EMG) noise, which can lead to substantial misinterpretations in healthcare applications. To address this challenge, we present ECGDnet, an innovative architecture based on Transformer technology, specifically engineered to denoise multi-channel ECG signals. By leveraging multi-head self-attention mechanisms, positional embeddings, and an advanced sequence-to-sequence processing architecture, ECGDnet effectively captures both local and global temporal dependencies inherent in cardiac signals. Experimental validation on real-world datasets demonstrates ECGDnet&amp;amp;rsquo;s remarkable efficacy in noise suppression, achieving a Signal-to-Noise Ratio (SNR) of 19.83, a Normalized Mean Squared Error (NMSE) of 0.9842, a Reconstruction Error (RE) of 0.0158, and a Pearson Correlation Coefficient (PCC) of 0.9924. These results represent significant improvements from traditional deep learning approaches while maintaining complex signal morphology and effectively mitigating noise interference.</p>
	]]></content:encoded>

	<dc:title>Self Attention-Driven ECG Denoising: A Transformer-Based Approach for Robust Cardiac Signal Enhancement</dc:title>
			<dc:creator>Aymane Edder</dc:creator>
			<dc:creator>Fatima-Ezzahraa Ben-Bouazza</dc:creator>
			<dc:creator>Idriss Tafala</dc:creator>
			<dc:creator>Oumaima Manchadi</dc:creator>
			<dc:creator>Bassma Jioudi</dc:creator>
		<dc:identifier>doi: 10.3390/signals6020026</dc:identifier>
	<dc:source>Signals</dc:source>
	<dc:date>2025-06-03</dc:date>

	<prism:publicationName>Signals</prism:publicationName>
	<prism:publicationDate>2025-06-03</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>26</prism:startingPage>
		<prism:doi>10.3390/signals6020026</prism:doi>
	<prism:url>https://www.mdpi.com/2624-6120/6/2/26</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-6120/6/2/25">

	<title>Signals, Vol. 6, Pages 25: Voter Authentication Using Enhanced ResNet50 for Facial Recognition</title>
	<link>https://www.mdpi.com/2624-6120/6/2/25</link>
	<description>Electoral fraud, particularly multiple voting, undermines the integrity of democratic processes. To address this challenge, this study introduces an innovative facial recognition system that integrates an enhanced 50-layer Residual Network (ResNet50) architecture with Additive Angular Margin Loss (ArcFace) and Multi-Task Cascaded Convolutional Neural Networks (MTCNN) for face detection. Using the Mahalanobis distance, the system verifies voter identities by comparing captured facial images with previously recorded biometric features. Extensive evaluations demonstrate the methodology&amp;amp;rsquo;s effectiveness, achieving a facial recognition accuracy of 99.85%. This significant improvement over existing baseline methods has the potential to enhance electoral transparency and prevent multiple voting. The findings contribute to developing robust biometric-based electoral systems, thereby promoting democratic trust and accountability.</description>
	<pubDate>2025-05-23</pubDate>

	<content:encoded><![CDATA[
	<p><b>Signals, Vol. 6, Pages 25: Voter Authentication Using Enhanced ResNet50 for Facial Recognition</b></p>
	<p>Signals <a href="https://www.mdpi.com/2624-6120/6/2/25">doi: 10.3390/signals6020025</a></p>
	<p>Authors:
		Aminou Halidou
		Daniel Georges Olle Olle
		Arnaud Nguembang Fadja
		Daramy Vandi Von Kallon
		Tchana Ngninkeu Gil Thibault
		</p>
	<p>Electoral fraud, particularly multiple voting, undermines the integrity of democratic processes. To address this challenge, this study introduces an innovative facial recognition system that integrates an enhanced 50-layer Residual Network (ResNet50) architecture with Additive Angular Margin Loss (ArcFace) and Multi-Task Cascaded Convolutional Neural Networks (MTCNN) for face detection. Using the Mahalanobis distance, the system verifies voter identities by comparing captured facial images with previously recorded biometric features. Extensive evaluations demonstrate the methodology&amp;amp;rsquo;s effectiveness, achieving a facial recognition accuracy of 99.85%. This significant improvement over existing baseline methods has the potential to enhance electoral transparency and prevent multiple voting. The findings contribute to developing robust biometric-based electoral systems, thereby promoting democratic trust and accountability.</p>
	]]></content:encoded>

	<dc:title>Voter Authentication Using Enhanced ResNet50 for Facial Recognition</dc:title>
			<dc:creator>Aminou Halidou</dc:creator>
			<dc:creator>Daniel Georges Olle Olle</dc:creator>
			<dc:creator>Arnaud Nguembang Fadja</dc:creator>
			<dc:creator>Daramy Vandi Von Kallon</dc:creator>
			<dc:creator>Tchana Ngninkeu Gil Thibault</dc:creator>
		<dc:identifier>doi: 10.3390/signals6020025</dc:identifier>
	<dc:source>Signals</dc:source>
	<dc:date>2025-05-23</dc:date>

	<prism:publicationName>Signals</prism:publicationName>
	<prism:publicationDate>2025-05-23</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>25</prism:startingPage>
		<prism:doi>10.3390/signals6020025</prism:doi>
	<prism:url>https://www.mdpi.com/2624-6120/6/2/25</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-6120/6/2/24">

	<title>Signals, Vol. 6, Pages 24: User Visit Certification and Visit Trace System Using Inaudible Frequency</title>
	<link>https://www.mdpi.com/2624-6120/6/2/24</link>
	<description>This study proposes a user visit certification and visit trace system using inaudible frequencies in the range of audible frequencies but not those audible to people. The signal frequency consists of inaudible frequencies in the range of 18 kHz to 20 kHz, which can be generated by normal speakers. This system recognizes the signal frequency and sends signal values, users&amp;amp;rsquo; IDs, and location information to a server to certify the current user&amp;amp;rsquo;s location. The server categorizes and stores the user&amp;amp;rsquo;s visit history by individual, and the user can check their personal visit trace information in the application. To verify the utility of the proposed system, we developed an application for user certification and tracing based on a smart device and a built server system. We conducted user certification and trace experiments using the proposed system, resulting in 99.6% accuracy. As a comparative experiment, we conducted a visit certification experiment using a QR code and the proposed system and found that the proposed system performed better. Thus, the proposed system will be a useful technology for epidemiological surveys for individual users and electronic entry lists to restaurants and facilities in the age of COVID-19.</description>
	<pubDate>2025-05-15</pubDate>

	<content:encoded><![CDATA[
	<p><b>Signals, Vol. 6, Pages 24: User Visit Certification and Visit Trace System Using Inaudible Frequency</b></p>
	<p>Signals <a href="https://www.mdpi.com/2624-6120/6/2/24">doi: 10.3390/signals6020024</a></p>
	<p>Authors:
		Myoungbeom Chung
		</p>
	<p>This study proposes a user visit certification and visit trace system using inaudible frequencies in the range of audible frequencies but not those audible to people. The signal frequency consists of inaudible frequencies in the range of 18 kHz to 20 kHz, which can be generated by normal speakers. This system recognizes the signal frequency and sends signal values, users&amp;amp;rsquo; IDs, and location information to a server to certify the current user&amp;amp;rsquo;s location. The server categorizes and stores the user&amp;amp;rsquo;s visit history by individual, and the user can check their personal visit trace information in the application. To verify the utility of the proposed system, we developed an application for user certification and tracing based on a smart device and a built server system. We conducted user certification and trace experiments using the proposed system, resulting in 99.6% accuracy. As a comparative experiment, we conducted a visit certification experiment using a QR code and the proposed system and found that the proposed system performed better. Thus, the proposed system will be a useful technology for epidemiological surveys for individual users and electronic entry lists to restaurants and facilities in the age of COVID-19.</p>
	]]></content:encoded>

	<dc:title>User Visit Certification and Visit Trace System Using Inaudible Frequency</dc:title>
			<dc:creator>Myoungbeom Chung</dc:creator>
		<dc:identifier>doi: 10.3390/signals6020024</dc:identifier>
	<dc:source>Signals</dc:source>
	<dc:date>2025-05-15</dc:date>

	<prism:publicationName>Signals</prism:publicationName>
	<prism:publicationDate>2025-05-15</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>24</prism:startingPage>
		<prism:doi>10.3390/signals6020024</prism:doi>
	<prism:url>https://www.mdpi.com/2624-6120/6/2/24</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-6120/6/2/23">

	<title>Signals, Vol. 6, Pages 23: The Fast Discrete Tchebichef Transform Algorithms for Short-Length Input Sequences</title>
	<link>https://www.mdpi.com/2624-6120/6/2/23</link>
	<description>In this article, the fast algorithms for the discrete Tchebichef transform (DTT) are proposed for input sequences of lengths in the range from 3 to 8. At present, DTT is widely applied in signal processing, image compression, and video coding. The review of the articles related to fast DTT algorithms has shown that such algorithms are mainly developed for input signal lengths 4 and 8. However, several problems exist for which signal and image processing with different apertures is required. To avoid this shortcoming, the structural approach and a sparse matrix factorization are applied in this paper to develop fast real DTT algorithms for short-length input signals. According to the structural approach, the rows and columns of the transform matrix are rearranged, possibly by changing the signs of some rows or columns. Next, the matched submatrix templates are extracted from the matrix structure and decomposed into a matrix product to construct the factorization of an initial matrix. A sparse matrix factorization assumes that the butterfly architecture can be extracted from the transform matrix. Combining the structural approach with a sparse matrix factorization, we obtained the matrix representation with reduced computational complexity. Based on the obtained matrix representation, the fast algorithms were developed for the real DTT via the data flow graphs. The fast algorithms for integer DTT can be easily obtained using the constructed data flow graphs. To confirm the correctness of the designed algorithms, the MATLAB R2023b software was applied. The constructed factorizations of the real DTT matrices reduce the number of multiplication operations by 78% on average compared to the direct matrix-vector product at signal lengths in the range from 3 to 8. The number of additions decreased by 5% on average within the same signal length range.</description>
	<pubDate>2025-05-09</pubDate>

	<content:encoded><![CDATA[
	<p><b>Signals, Vol. 6, Pages 23: The Fast Discrete Tchebichef Transform Algorithms for Short-Length Input Sequences</b></p>
	<p>Signals <a href="https://www.mdpi.com/2624-6120/6/2/23">doi: 10.3390/signals6020023</a></p>
	<p>Authors:
		Aleksandr Cariow
		Marina Polyakova
		</p>
	<p>In this article, the fast algorithms for the discrete Tchebichef transform (DTT) are proposed for input sequences of lengths in the range from 3 to 8. At present, DTT is widely applied in signal processing, image compression, and video coding. The review of the articles related to fast DTT algorithms has shown that such algorithms are mainly developed for input signal lengths 4 and 8. However, several problems exist for which signal and image processing with different apertures is required. To avoid this shortcoming, the structural approach and a sparse matrix factorization are applied in this paper to develop fast real DTT algorithms for short-length input signals. According to the structural approach, the rows and columns of the transform matrix are rearranged, possibly by changing the signs of some rows or columns. Next, the matched submatrix templates are extracted from the matrix structure and decomposed into a matrix product to construct the factorization of an initial matrix. A sparse matrix factorization assumes that the butterfly architecture can be extracted from the transform matrix. Combining the structural approach with a sparse matrix factorization, we obtained the matrix representation with reduced computational complexity. Based on the obtained matrix representation, the fast algorithms were developed for the real DTT via the data flow graphs. The fast algorithms for integer DTT can be easily obtained using the constructed data flow graphs. To confirm the correctness of the designed algorithms, the MATLAB R2023b software was applied. The constructed factorizations of the real DTT matrices reduce the number of multiplication operations by 78% on average compared to the direct matrix-vector product at signal lengths in the range from 3 to 8. The number of additions decreased by 5% on average within the same signal length range.</p>
	]]></content:encoded>

	<dc:title>The Fast Discrete Tchebichef Transform Algorithms for Short-Length Input Sequences</dc:title>
			<dc:creator>Aleksandr Cariow</dc:creator>
			<dc:creator>Marina Polyakova</dc:creator>
		<dc:identifier>doi: 10.3390/signals6020023</dc:identifier>
	<dc:source>Signals</dc:source>
	<dc:date>2025-05-09</dc:date>

	<prism:publicationName>Signals</prism:publicationName>
	<prism:publicationDate>2025-05-09</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>23</prism:startingPage>
		<prism:doi>10.3390/signals6020023</prism:doi>
	<prism:url>https://www.mdpi.com/2624-6120/6/2/23</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-6120/6/2/22">

	<title>Signals, Vol. 6, Pages 22: Speech Emotion Recognition: Comparative Analysis of CNN-LSTM and Attention-Enhanced CNN-LSTM Models</title>
	<link>https://www.mdpi.com/2624-6120/6/2/22</link>
	<description>Speech Emotion Recognition (SER) technology helps computers understand human emotions in speech, which fills a critical niche in advancing human&amp;amp;ndash;computer interaction and mental health diagnostics. The primary objective of this study is to enhance SER accuracy and generalization through innovative deep learning models. Despite its importance in various fields like human&amp;amp;ndash;computer interaction and mental health diagnosis, accurately identifying emotions from speech can be challenging due to differences in speakers, accents, and background noise. The work proposes two innovative deep learning models to improve SER accuracy: a CNN-LSTM model and an Attention-Enhanced CNN-LSTM model. These models were tested on the Ryerson Audio-Visual Database of Emotional Speech and Song (RAVDESS), collected between 2015 and 2018, which comprises 1440 audio files of male and female actors expressing eight emotions. Both models achieved impressive accuracy rates of over 96% in classifying emotions into eight categories. By comparing the CNN-LSTM and Attention-Enhanced CNN-LSTM models, this study offers comparative insights into modeling techniques, contributes to the development of more effective emotion recognition systems, and offers practical implications for real-time applications in healthcare and customer service.</description>
	<pubDate>2025-05-09</pubDate>

	<content:encoded><![CDATA[
	<p><b>Signals, Vol. 6, Pages 22: Speech Emotion Recognition: Comparative Analysis of CNN-LSTM and Attention-Enhanced CNN-LSTM Models</b></p>
	<p>Signals <a href="https://www.mdpi.com/2624-6120/6/2/22">doi: 10.3390/signals6020022</a></p>
	<p>Authors:
		Jamsher Bhanbhro
		Asif Aziz Memon
		Bharat Lal
		Shahnawaz Talpur
		Madeha Memon
		</p>
	<p>Speech Emotion Recognition (SER) technology helps computers understand human emotions in speech, which fills a critical niche in advancing human&amp;amp;ndash;computer interaction and mental health diagnostics. The primary objective of this study is to enhance SER accuracy and generalization through innovative deep learning models. Despite its importance in various fields like human&amp;amp;ndash;computer interaction and mental health diagnosis, accurately identifying emotions from speech can be challenging due to differences in speakers, accents, and background noise. The work proposes two innovative deep learning models to improve SER accuracy: a CNN-LSTM model and an Attention-Enhanced CNN-LSTM model. These models were tested on the Ryerson Audio-Visual Database of Emotional Speech and Song (RAVDESS), collected between 2015 and 2018, which comprises 1440 audio files of male and female actors expressing eight emotions. Both models achieved impressive accuracy rates of over 96% in classifying emotions into eight categories. By comparing the CNN-LSTM and Attention-Enhanced CNN-LSTM models, this study offers comparative insights into modeling techniques, contributes to the development of more effective emotion recognition systems, and offers practical implications for real-time applications in healthcare and customer service.</p>
	]]></content:encoded>

	<dc:title>Speech Emotion Recognition: Comparative Analysis of CNN-LSTM and Attention-Enhanced CNN-LSTM Models</dc:title>
			<dc:creator>Jamsher Bhanbhro</dc:creator>
			<dc:creator>Asif Aziz Memon</dc:creator>
			<dc:creator>Bharat Lal</dc:creator>
			<dc:creator>Shahnawaz Talpur</dc:creator>
			<dc:creator>Madeha Memon</dc:creator>
		<dc:identifier>doi: 10.3390/signals6020022</dc:identifier>
	<dc:source>Signals</dc:source>
	<dc:date>2025-05-09</dc:date>

	<prism:publicationName>Signals</prism:publicationName>
	<prism:publicationDate>2025-05-09</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>22</prism:startingPage>
		<prism:doi>10.3390/signals6020022</prism:doi>
	<prism:url>https://www.mdpi.com/2624-6120/6/2/22</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-6120/6/2/21">

	<title>Signals, Vol. 6, Pages 21: Structural Monitoring of a Drawbridge in Operation: Signal Analysis</title>
	<link>https://www.mdpi.com/2624-6120/6/2/21</link>
	<description>Monitoring large critical infrastructures is a highly complex and costly task. The use of a network of sensors to aid in the detection and identification of potential anomalies is therefore an important step towards easing maintenance effort while maintaining operational soundness. To address this challenge, a monitoring system was developed and installed in a seaport drawbridge. The structural parameters monitored during operation can be used to assess the bridge&amp;amp;rsquo;s structural behavior. This provides the ability to identify potential anomalies that could lead to its failure at an early stage, allowing for the better planning of maintenance interventions, saving time and money. In this paper, the monitoring system will be presented and the employed signal identification and analysis methods will be described.</description>
	<pubDate>2025-05-01</pubDate>

	<content:encoded><![CDATA[
	<p><b>Signals, Vol. 6, Pages 21: Structural Monitoring of a Drawbridge in Operation: Signal Analysis</b></p>
	<p>Signals <a href="https://www.mdpi.com/2624-6120/6/2/21">doi: 10.3390/signals6020021</a></p>
	<p>Authors:
		Pedro J. S. C. P. Sousa
		Susana Dias
		Nuno Viriato Ramos
		Job Santos Silva
		Mário A. P. Vaz
		Paulo J. Tavares
		Pedro M. G. P. Moreira
		</p>
	<p>Monitoring large critical infrastructures is a highly complex and costly task. The use of a network of sensors to aid in the detection and identification of potential anomalies is therefore an important step towards easing maintenance effort while maintaining operational soundness. To address this challenge, a monitoring system was developed and installed in a seaport drawbridge. The structural parameters monitored during operation can be used to assess the bridge&amp;amp;rsquo;s structural behavior. This provides the ability to identify potential anomalies that could lead to its failure at an early stage, allowing for the better planning of maintenance interventions, saving time and money. In this paper, the monitoring system will be presented and the employed signal identification and analysis methods will be described.</p>
	]]></content:encoded>

	<dc:title>Structural Monitoring of a Drawbridge in Operation: Signal Analysis</dc:title>
			<dc:creator>Pedro J. S. C. P. Sousa</dc:creator>
			<dc:creator>Susana Dias</dc:creator>
			<dc:creator>Nuno Viriato Ramos</dc:creator>
			<dc:creator>Job Santos Silva</dc:creator>
			<dc:creator>Mário A. P. Vaz</dc:creator>
			<dc:creator>Paulo J. Tavares</dc:creator>
			<dc:creator>Pedro M. G. P. Moreira</dc:creator>
		<dc:identifier>doi: 10.3390/signals6020021</dc:identifier>
	<dc:source>Signals</dc:source>
	<dc:date>2025-05-01</dc:date>

	<prism:publicationName>Signals</prism:publicationName>
	<prism:publicationDate>2025-05-01</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>21</prism:startingPage>
		<prism:doi>10.3390/signals6020021</prism:doi>
	<prism:url>https://www.mdpi.com/2624-6120/6/2/21</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
    
<cc:License rdf:about="https://creativecommons.org/licenses/by/4.0/">
	<cc:permits rdf:resource="https://creativecommons.org/ns#Reproduction" />
	<cc:permits rdf:resource="https://creativecommons.org/ns#Distribution" />
	<cc:permits rdf:resource="https://creativecommons.org/ns#DerivativeWorks" />
</cc:License>

</rdf:RDF>
