<?xml version="1.0" encoding="UTF-8"?>
<rdf:RDF xmlns="http://purl.org/rss/1.0/"
 xmlns:dc="http://purl.org/dc/elements/1.1/"
 xmlns:dcterms="http://purl.org/dc/terms/"
 xmlns:cc="http://web.resource.org/cc/"
 xmlns:prism="http://prismstandard.org/namespaces/basic/2.0/"
 xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
 xmlns:admin="http://webns.net/mvcb/"
 xmlns:content="http://purl.org/rss/1.0/modules/content/">
    <channel rdf:about="https://www.mdpi.com/rss/journal/sensors">
		<title>Sensors</title>
		<description>Latest open access articles published in Sensors at https://www.mdpi.com/journal/sensors</description>
		<link>https://www.mdpi.com/journal/sensors</link>
		<admin:generatorAgent rdf:resource="https://www.mdpi.com/journal/sensors"/>
		<admin:errorReportsTo rdf:resource="mailto:support@mdpi.com"/>
		<dc:publisher>MDPI</dc:publisher>
		<dc:language>en</dc:language>
		<dc:rights>Creative Commons Attribution (CC-BY)</dc:rights>
						<prism:copyright>MDPI</prism:copyright>
		<prism:rightsAgent>support@mdpi.com</prism:rightsAgent>
		<image rdf:resource="https://pub.mdpi-res.com/img/design/mdpi-pub-logo.png?13cf3b5bd783e021?1778040678"/>
				<items>
			<rdf:Seq>
            				<rdf:li rdf:resource="https://www.mdpi.com/1424-8220/26/9/2899" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1424-8220/26/9/2898" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1424-8220/26/9/2897" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1424-8220/26/9/2896" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1424-8220/26/9/2895" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1424-8220/26/9/2894" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1424-8220/26/9/2893" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1424-8220/26/9/2892" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1424-8220/26/9/2891" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1424-8220/26/9/2890" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1424-8220/26/9/2889" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1424-8220/26/9/2888" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1424-8220/26/9/2887" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1424-8220/26/9/2886" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1424-8220/26/9/2884" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1424-8220/26/9/2885" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1424-8220/26/9/2883" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1424-8220/26/9/2882" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1424-8220/26/9/2881" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1424-8220/26/9/2879" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1424-8220/26/9/2880" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1424-8220/26/9/2878" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1424-8220/26/9/2877" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1424-8220/26/9/2876" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1424-8220/26/9/2875" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1424-8220/26/9/2873" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1424-8220/26/9/2874" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1424-8220/26/9/2872" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1424-8220/26/9/2871" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1424-8220/26/9/2869" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1424-8220/26/9/2870" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1424-8220/26/9/2867" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1424-8220/26/9/2868" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1424-8220/26/9/2866" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1424-8220/26/9/2865" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1424-8220/26/9/2864" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1424-8220/26/9/2863" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1424-8220/26/9/2862" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1424-8220/26/9/2861" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1424-8220/26/9/2860" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1424-8220/26/9/2859" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1424-8220/26/9/2858" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1424-8220/26/9/2857" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1424-8220/26/9/2856" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1424-8220/26/9/2854" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1424-8220/26/9/2853" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1424-8220/26/9/2855" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1424-8220/26/9/2852" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1424-8220/26/9/2846" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1424-8220/26/9/2849" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1424-8220/26/9/2850" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1424-8220/26/9/2851" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1424-8220/26/9/2848" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1424-8220/26/9/2847" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1424-8220/26/9/2845" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1424-8220/26/9/2844" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1424-8220/26/9/2843" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1424-8220/26/9/2842" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1424-8220/26/9/2841" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1424-8220/26/9/2837" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1424-8220/26/9/2840" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1424-8220/26/9/2839" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1424-8220/26/9/2838" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1424-8220/26/9/2836" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1424-8220/26/9/2833" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1424-8220/26/9/2835" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1424-8220/26/9/2834" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1424-8220/26/9/2832" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1424-8220/26/9/2831" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1424-8220/26/9/2829" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1424-8220/26/9/2830" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1424-8220/26/9/2827" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1424-8220/26/9/2828" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1424-8220/26/9/2826" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1424-8220/26/9/2825" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1424-8220/26/9/2824" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1424-8220/26/9/2823" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1424-8220/26/9/2822" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1424-8220/26/9/2821" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1424-8220/26/9/2820" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1424-8220/26/9/2819" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1424-8220/26/9/2818" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1424-8220/26/9/2817" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1424-8220/26/9/2816" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1424-8220/26/9/2814" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1424-8220/26/9/2813" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1424-8220/26/9/2815" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1424-8220/26/9/2812" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1424-8220/26/9/2810" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1424-8220/26/9/2811" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1424-8220/26/9/2807" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1424-8220/26/9/2806" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1424-8220/26/9/2808" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1424-8220/26/9/2809" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1424-8220/26/9/2802" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1424-8220/26/9/2805" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1424-8220/26/9/2804" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1424-8220/26/9/2803" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1424-8220/26/9/2801" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1424-8220/26/9/2798" />
                    	</rdf:Seq>
		</items>
				<cc:license rdf:resource="https://creativecommons.org/licenses/by/4.0/" />
	</channel>

        <item rdf:about="https://www.mdpi.com/1424-8220/26/9/2899">

	<title>Sensors, Vol. 26, Pages 2899: An Exploratory Analysis of Postural Control in People with Type 2 Diabetes Mellitus Using a Smartphone IMU Sensor</title>
	<link>https://www.mdpi.com/1424-8220/26/9/2899</link>
	<description>Background: There is a growing need for highly accessible and simplified methods to track postural control in adults affected by neurodegenerative diseases. Therefore, the aim of this study was to assess the validity of smartphone-derived postural control analyses compared with traditional center-of-pressure (COP) measures in healthy adults and people with type 2 diabetes mellitus (T2DM). Methods: A total of 36 participants (21 controls, 15 T2DM) completed static postural testing during single- and double-leg stance, also with eyes open and eyes closed. Data from a smartphone attached to the lower back measured trunk acceleration (SP-ACC) concurrently with gold-standard center of pressure (COP). The root mean square (RMS) and movement velocity (MV) were extracted from both trunk acceleration and COP data. The effect of balance condition and groups were statistically evaluated using non-parametric statistical tests. Results: SP-ACC and COP metrics showed progressive sway increases with task difficulty in both groups (all p &amp;amp;lt; 0.001). RMS-ACC demonstrated moderate-to-strong correlations with RMS-COP across conditions (r = 0.55&amp;amp;ndash;0.90). Compared with controls, the T2DM group exhibited significantly higher RMS-ACC in DLS-EC and SLS-EO (both p &amp;amp;lt; 0.01) and higher MV-ACC in DLS-EO, SLS-EO, and SLS-EC (p = 0.04&amp;amp;ndash;&amp;amp;lt;0.001), reflecting impaired postural control. Conclusions: Smartphone-based IMU assessments showed good agreement with COP analysis and detected condition-specific balance deficits in T2DM. These findings support smartphone-based IMU metrics as a promising tool for accessible and scalable balance screening in diabetes care.</description>
	<pubDate>2026-05-06</pubDate>

	<content:encoded><![CDATA[
	<p><b>Sensors, Vol. 26, Pages 2899: An Exploratory Analysis of Postural Control in People with Type 2 Diabetes Mellitus Using a Smartphone IMU Sensor</b></p>
	<p>Sensors <a href="https://www.mdpi.com/1424-8220/26/9/2899">doi: 10.3390/s26092899</a></p>
	<p>Authors:
		Trine Rolighed Thomsen
		Sophia Pölhöšová
		Asger Ahlmann Bech
		Aksayan Arunanthy Mahalingasivam
		Nicklas Højgaard-Hessellund Rasmussen
		Anderson Souza Oliveira
		</p>
	<p>Background: There is a growing need for highly accessible and simplified methods to track postural control in adults affected by neurodegenerative diseases. Therefore, the aim of this study was to assess the validity of smartphone-derived postural control analyses compared with traditional center-of-pressure (COP) measures in healthy adults and people with type 2 diabetes mellitus (T2DM). Methods: A total of 36 participants (21 controls, 15 T2DM) completed static postural testing during single- and double-leg stance, also with eyes open and eyes closed. Data from a smartphone attached to the lower back measured trunk acceleration (SP-ACC) concurrently with gold-standard center of pressure (COP). The root mean square (RMS) and movement velocity (MV) were extracted from both trunk acceleration and COP data. The effect of balance condition and groups were statistically evaluated using non-parametric statistical tests. Results: SP-ACC and COP metrics showed progressive sway increases with task difficulty in both groups (all p &amp;amp;lt; 0.001). RMS-ACC demonstrated moderate-to-strong correlations with RMS-COP across conditions (r = 0.55&amp;amp;ndash;0.90). Compared with controls, the T2DM group exhibited significantly higher RMS-ACC in DLS-EC and SLS-EO (both p &amp;amp;lt; 0.01) and higher MV-ACC in DLS-EO, SLS-EO, and SLS-EC (p = 0.04&amp;amp;ndash;&amp;amp;lt;0.001), reflecting impaired postural control. Conclusions: Smartphone-based IMU assessments showed good agreement with COP analysis and detected condition-specific balance deficits in T2DM. These findings support smartphone-based IMU metrics as a promising tool for accessible and scalable balance screening in diabetes care.</p>
	]]></content:encoded>

	<dc:title>An Exploratory Analysis of Postural Control in People with Type 2 Diabetes Mellitus Using a Smartphone IMU Sensor</dc:title>
			<dc:creator>Trine Rolighed Thomsen</dc:creator>
			<dc:creator>Sophia Pölhöšová</dc:creator>
			<dc:creator>Asger Ahlmann Bech</dc:creator>
			<dc:creator>Aksayan Arunanthy Mahalingasivam</dc:creator>
			<dc:creator>Nicklas Højgaard-Hessellund Rasmussen</dc:creator>
			<dc:creator>Anderson Souza Oliveira</dc:creator>
		<dc:identifier>doi: 10.3390/s26092899</dc:identifier>
	<dc:source>Sensors</dc:source>
	<dc:date>2026-05-06</dc:date>

	<prism:publicationName>Sensors</prism:publicationName>
	<prism:publicationDate>2026-05-06</prism:publicationDate>
	<prism:volume>26</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>2899</prism:startingPage>
		<prism:doi>10.3390/s26092899</prism:doi>
	<prism:url>https://www.mdpi.com/1424-8220/26/9/2899</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1424-8220/26/9/2898">

	<title>Sensors, Vol. 26, Pages 2898: Wavelet-Based Health Monitoring Approach for Train Door Actuation Using Motor Current Analysis</title>
	<link>https://www.mdpi.com/1424-8220/26/9/2898</link>
	<description>Train door actuation systems are critical safety components in railway vehicles, where early fault detection is essential for safe operation and reduced service disruptions. Conventional monitoring approaches often rely on additional sensors such as infrared detectors or vision systems, which increase system complexity and cost. To overcome these limitations, this study proposes a wavelet-based health monitoring structure for detecting electrical and mechanical faults using motor current signal analysis. A dynamic model of the train door actuation mechanism, including a DC motor, gearbox, and lead screw, was developed in MATLAB/Simulink to simulate conditions such as armature electrical faults, brush wear, increased friction, and lead screw misalignment. Motor current signals were analyzed using the Discrete Wavelet Transform with a Daubechies (db10) mother wavelet to extract diagnostic features based on the L1-norms of wavelet coefficients at levels W8 and W9 along with the motor starting current peak. Experimental validation using a LabVIEW-based test platform demonstrated fault detection accuracy above 96% with a response time below 0.3 s, confirming the effectiveness of the proposed approach for predictive maintenance of railway door systems.</description>
	<pubDate>2026-05-06</pubDate>

	<content:encoded><![CDATA[
	<p><b>Sensors, Vol. 26, Pages 2898: Wavelet-Based Health Monitoring Approach for Train Door Actuation Using Motor Current Analysis</b></p>
	<p>Sensors <a href="https://www.mdpi.com/1424-8220/26/9/2898">doi: 10.3390/s26092898</a></p>
	<p>Authors:
		Yaojung Shiao
		Premkumar Gadde
		Manichandra Bollepelly
		</p>
	<p>Train door actuation systems are critical safety components in railway vehicles, where early fault detection is essential for safe operation and reduced service disruptions. Conventional monitoring approaches often rely on additional sensors such as infrared detectors or vision systems, which increase system complexity and cost. To overcome these limitations, this study proposes a wavelet-based health monitoring structure for detecting electrical and mechanical faults using motor current signal analysis. A dynamic model of the train door actuation mechanism, including a DC motor, gearbox, and lead screw, was developed in MATLAB/Simulink to simulate conditions such as armature electrical faults, brush wear, increased friction, and lead screw misalignment. Motor current signals were analyzed using the Discrete Wavelet Transform with a Daubechies (db10) mother wavelet to extract diagnostic features based on the L1-norms of wavelet coefficients at levels W8 and W9 along with the motor starting current peak. Experimental validation using a LabVIEW-based test platform demonstrated fault detection accuracy above 96% with a response time below 0.3 s, confirming the effectiveness of the proposed approach for predictive maintenance of railway door systems.</p>
	]]></content:encoded>

	<dc:title>Wavelet-Based Health Monitoring Approach for Train Door Actuation Using Motor Current Analysis</dc:title>
			<dc:creator>Yaojung Shiao</dc:creator>
			<dc:creator>Premkumar Gadde</dc:creator>
			<dc:creator>Manichandra Bollepelly</dc:creator>
		<dc:identifier>doi: 10.3390/s26092898</dc:identifier>
	<dc:source>Sensors</dc:source>
	<dc:date>2026-05-06</dc:date>

	<prism:publicationName>Sensors</prism:publicationName>
	<prism:publicationDate>2026-05-06</prism:publicationDate>
	<prism:volume>26</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>2898</prism:startingPage>
		<prism:doi>10.3390/s26092898</prism:doi>
	<prism:url>https://www.mdpi.com/1424-8220/26/9/2898</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1424-8220/26/9/2897">

	<title>Sensors, Vol. 26, Pages 2897: A General Finite Beam on Tensionless Foundation Model for Rail Track Characterization and Evaluation</title>
	<link>https://www.mdpi.com/1424-8220/26/9/2897</link>
	<description>Rail infrastructure plays an important role in freight and passenger mobility, and the assessment of rail track structure depends critically on understanding how the rail interacts with the supporting foundation. When rail support degrades (e.g., due to ballast fouling, settlement, etc.), the rail exhibits greater localized deformation that can lead to serious deleterious conditions. Track modulus represents a fundamental diagnostic measure of rail support, encompassing the vertical stiffness characteristics of the foundation and its resistance against downward rail movement. Existing track modulus characterization methodologies typically comprise deflection measurements of railway track (e.g., tie deflections) under known loads. Track modulus estimations result from analyzing deflection and load under assumptions of a traditional Winkler foundation, which can oversimplify mechanic relationships. Specifically, in the context of rail&amp;amp;ndash;ballast&amp;amp;ndash;subgrade interaction, a tensionless foundation permits gap development which can occur as track structure separates from the supporting ballast; additionally, track modulus may vary along the track length as conditions vary spatially. This paper presents a general analytical solution of ballasted track support characterization based on an iterative algorithm for the static response of a finite beam resting on a tensionless Winkler foundation. The method relates to multiple loads (e.g., concentrated axle loads and distributed self-weight), deflection along the track, and track condition through singularity functions, superposition of discrete support springs, and moment&amp;amp;ndash;curvature relationships. The model estimates rail deflections, lift-off points and shear and moment diagrams along the track. The technique permits: (1) validations against benchmark solutions and previously published results, (2) estimations of track modulus from known loads and measured deflections, and ultimately, (3) a framework for designing and processing sensor data streams for use in analyses and evaluations of railway track structure.</description>
	<pubDate>2026-05-05</pubDate>

	<content:encoded><![CDATA[
	<p><b>Sensors, Vol. 26, Pages 2897: A General Finite Beam on Tensionless Foundation Model for Rail Track Characterization and Evaluation</b></p>
	<p>Sensors <a href="https://www.mdpi.com/1424-8220/26/9/2897">doi: 10.3390/s26092897</a></p>
	<p>Authors:
		Hamoud H. Alshallaqi
		Brett A. Story
		</p>
	<p>Rail infrastructure plays an important role in freight and passenger mobility, and the assessment of rail track structure depends critically on understanding how the rail interacts with the supporting foundation. When rail support degrades (e.g., due to ballast fouling, settlement, etc.), the rail exhibits greater localized deformation that can lead to serious deleterious conditions. Track modulus represents a fundamental diagnostic measure of rail support, encompassing the vertical stiffness characteristics of the foundation and its resistance against downward rail movement. Existing track modulus characterization methodologies typically comprise deflection measurements of railway track (e.g., tie deflections) under known loads. Track modulus estimations result from analyzing deflection and load under assumptions of a traditional Winkler foundation, which can oversimplify mechanic relationships. Specifically, in the context of rail&amp;amp;ndash;ballast&amp;amp;ndash;subgrade interaction, a tensionless foundation permits gap development which can occur as track structure separates from the supporting ballast; additionally, track modulus may vary along the track length as conditions vary spatially. This paper presents a general analytical solution of ballasted track support characterization based on an iterative algorithm for the static response of a finite beam resting on a tensionless Winkler foundation. The method relates to multiple loads (e.g., concentrated axle loads and distributed self-weight), deflection along the track, and track condition through singularity functions, superposition of discrete support springs, and moment&amp;amp;ndash;curvature relationships. The model estimates rail deflections, lift-off points and shear and moment diagrams along the track. The technique permits: (1) validations against benchmark solutions and previously published results, (2) estimations of track modulus from known loads and measured deflections, and ultimately, (3) a framework for designing and processing sensor data streams for use in analyses and evaluations of railway track structure.</p>
	]]></content:encoded>

	<dc:title>A General Finite Beam on Tensionless Foundation Model for Rail Track Characterization and Evaluation</dc:title>
			<dc:creator>Hamoud H. Alshallaqi</dc:creator>
			<dc:creator>Brett A. Story</dc:creator>
		<dc:identifier>doi: 10.3390/s26092897</dc:identifier>
	<dc:source>Sensors</dc:source>
	<dc:date>2026-05-05</dc:date>

	<prism:publicationName>Sensors</prism:publicationName>
	<prism:publicationDate>2026-05-05</prism:publicationDate>
	<prism:volume>26</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>2897</prism:startingPage>
		<prism:doi>10.3390/s26092897</prism:doi>
	<prism:url>https://www.mdpi.com/1424-8220/26/9/2897</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1424-8220/26/9/2896">

	<title>Sensors, Vol. 26, Pages 2896: Fingerprint Recognition Based on Molecular-Scale Conductance Response via Electrochemically Gated Quantum Tunnelling</title>
	<link>https://www.mdpi.com/1424-8220/26/9/2896</link>
	<description>Molecular-scale detection based on quantum tunnelling is promising for molecular electronics and high-sensitivity analysis, owing to its sensitivity to molecular structure and energy levels. However, conventional two-electrode tunnelling measurements suffer from overlapping conductivity of different molecules, limiting molecular discrimination in complex systems. To address this, we propose an electrochemical-gate-controlled nanoscale tunnelling strategy that expands the two-electrode system to a three-electrode configuration via a tunable gate potential, enabling the differentiation of distinct molecules at near-single-molecule sensitivity. Scanning the gate potential under constant tunnelling bias modulates the alignment between molecular orbitals and the electrode Fermi level, altering the statistical characteristics of molecular tunnelling transport. Experimental results show that target molecules induce a bimodal distribution of tunnelling current (background and molecule-correlated channels), with the second peak exhibiting distinct gate potential dependence. Comparative analysis of ascorbic acid (AA), acetylcholine (ACh), and uric acid (UA) reveals unique trajectories of characteristic peaks with gate potential, forming an electrochemical gate response fingerprint. This gate-dependent conductance trajectory provides a novel statistical dimension for molecular recognition, enabling differentiation of distinct molecules.</description>
	<pubDate>2026-05-05</pubDate>

	<content:encoded><![CDATA[
	<p><b>Sensors, Vol. 26, Pages 2896: Fingerprint Recognition Based on Molecular-Scale Conductance Response via Electrochemically Gated Quantum Tunnelling</b></p>
	<p>Sensors <a href="https://www.mdpi.com/1424-8220/26/9/2896">doi: 10.3390/s26092896</a></p>
	<p>Authors:
		Zifan Wang
		Long Yi
		Ga Zhang
		Xufei Ma
		Ye Tian
		Bintian Zhang
		Xu Liu
		Longhua Tang
		</p>
	<p>Molecular-scale detection based on quantum tunnelling is promising for molecular electronics and high-sensitivity analysis, owing to its sensitivity to molecular structure and energy levels. However, conventional two-electrode tunnelling measurements suffer from overlapping conductivity of different molecules, limiting molecular discrimination in complex systems. To address this, we propose an electrochemical-gate-controlled nanoscale tunnelling strategy that expands the two-electrode system to a three-electrode configuration via a tunable gate potential, enabling the differentiation of distinct molecules at near-single-molecule sensitivity. Scanning the gate potential under constant tunnelling bias modulates the alignment between molecular orbitals and the electrode Fermi level, altering the statistical characteristics of molecular tunnelling transport. Experimental results show that target molecules induce a bimodal distribution of tunnelling current (background and molecule-correlated channels), with the second peak exhibiting distinct gate potential dependence. Comparative analysis of ascorbic acid (AA), acetylcholine (ACh), and uric acid (UA) reveals unique trajectories of characteristic peaks with gate potential, forming an electrochemical gate response fingerprint. This gate-dependent conductance trajectory provides a novel statistical dimension for molecular recognition, enabling differentiation of distinct molecules.</p>
	]]></content:encoded>

	<dc:title>Fingerprint Recognition Based on Molecular-Scale Conductance Response via Electrochemically Gated Quantum Tunnelling</dc:title>
			<dc:creator>Zifan Wang</dc:creator>
			<dc:creator>Long Yi</dc:creator>
			<dc:creator>Ga Zhang</dc:creator>
			<dc:creator>Xufei Ma</dc:creator>
			<dc:creator>Ye Tian</dc:creator>
			<dc:creator>Bintian Zhang</dc:creator>
			<dc:creator>Xu Liu</dc:creator>
			<dc:creator>Longhua Tang</dc:creator>
		<dc:identifier>doi: 10.3390/s26092896</dc:identifier>
	<dc:source>Sensors</dc:source>
	<dc:date>2026-05-05</dc:date>

	<prism:publicationName>Sensors</prism:publicationName>
	<prism:publicationDate>2026-05-05</prism:publicationDate>
	<prism:volume>26</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>2896</prism:startingPage>
		<prism:doi>10.3390/s26092896</prism:doi>
	<prism:url>https://www.mdpi.com/1424-8220/26/9/2896</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1424-8220/26/9/2895">

	<title>Sensors, Vol. 26, Pages 2895: Physics-Enhanced Orthogonal Sensing for Self-Supervised Anomaly Detection in Rolling Mills</title>
	<link>https://www.mdpi.com/1424-8220/26/9/2895</link>
	<description>The rolling mill guiding system is a key component that affects the quality of steel products. However, due to the harsh on-site environment, there is usually a lack of effective online monitoring and early warning mechanisms. Moreover, in industrial environments, fault samples are very scarce, making supervised artificial intelligence methods difficult to apply. This paper proposes a &amp;amp;ldquo;physics-enhanced&amp;amp;rdquo; orthogonal-sensing cyber-physical architecture that integrates hardware and software design. At the hardware level, an embedded orthogonal sensing layout (P&amp;amp;perp;V) is designed to decouple drive-chain vibration from rolling-force fluctuations at the transducer level. At the algorithm level, the state monitoring of the guiding system is formulated as a self-supervised anomaly detection problem, and a two-branch network architecture is designed: one branch uses the CSD transformer to capture physical coupling characteristics, while the other branch uses VQ-VAE to extract operating-condition context. Experimental results on a dataset comprising real operational data and expert-validated synthetic fault scenarios show that the system achieves an AUC-ROC of 0.952 and a false alarm rate of 0.048 under a 95% TPR, with an end-to-end processing latency of approximately 8 ms per window and a system-level fault response time of approximately 108 ms, and thus meets the requirements of real-time industrial monitoring.</description>
	<pubDate>2026-05-05</pubDate>

	<content:encoded><![CDATA[
	<p><b>Sensors, Vol. 26, Pages 2895: Physics-Enhanced Orthogonal Sensing for Self-Supervised Anomaly Detection in Rolling Mills</b></p>
	<p>Sensors <a href="https://www.mdpi.com/1424-8220/26/9/2895">doi: 10.3390/s26092895</a></p>
	<p>Authors:
		Yifan Wang
		Bin Zheng
		Yehan Feng
		Xiong Chen
		</p>
	<p>The rolling mill guiding system is a key component that affects the quality of steel products. However, due to the harsh on-site environment, there is usually a lack of effective online monitoring and early warning mechanisms. Moreover, in industrial environments, fault samples are very scarce, making supervised artificial intelligence methods difficult to apply. This paper proposes a &amp;amp;ldquo;physics-enhanced&amp;amp;rdquo; orthogonal-sensing cyber-physical architecture that integrates hardware and software design. At the hardware level, an embedded orthogonal sensing layout (P&amp;amp;perp;V) is designed to decouple drive-chain vibration from rolling-force fluctuations at the transducer level. At the algorithm level, the state monitoring of the guiding system is formulated as a self-supervised anomaly detection problem, and a two-branch network architecture is designed: one branch uses the CSD transformer to capture physical coupling characteristics, while the other branch uses VQ-VAE to extract operating-condition context. Experimental results on a dataset comprising real operational data and expert-validated synthetic fault scenarios show that the system achieves an AUC-ROC of 0.952 and a false alarm rate of 0.048 under a 95% TPR, with an end-to-end processing latency of approximately 8 ms per window and a system-level fault response time of approximately 108 ms, and thus meets the requirements of real-time industrial monitoring.</p>
	]]></content:encoded>

	<dc:title>Physics-Enhanced Orthogonal Sensing for Self-Supervised Anomaly Detection in Rolling Mills</dc:title>
			<dc:creator>Yifan Wang</dc:creator>
			<dc:creator>Bin Zheng</dc:creator>
			<dc:creator>Yehan Feng</dc:creator>
			<dc:creator>Xiong Chen</dc:creator>
		<dc:identifier>doi: 10.3390/s26092895</dc:identifier>
	<dc:source>Sensors</dc:source>
	<dc:date>2026-05-05</dc:date>

	<prism:publicationName>Sensors</prism:publicationName>
	<prism:publicationDate>2026-05-05</prism:publicationDate>
	<prism:volume>26</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>2895</prism:startingPage>
		<prism:doi>10.3390/s26092895</prism:doi>
	<prism:url>https://www.mdpi.com/1424-8220/26/9/2895</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1424-8220/26/9/2894">

	<title>Sensors, Vol. 26, Pages 2894: A Four-Wavelength Flow-Through Fluorescence&amp;ndash;Scatterometric Sensor That Allows for Real-Time Determination of Fat and Protein Content in Milk&amp;ndash;Air Mixtures with High Accuracy</title>
	<link>https://www.mdpi.com/1424-8220/26/9/2894</link>
	<description>(1) Background: Currently, there is a problem of prompt determination of fat and protein content in the milk&amp;amp;ndash;air mixture of milking machines. (2) Methods: A design of a sensor prototype is proposed, combining measurements of light scattering (scatterometry) and fluorescence (fluorometry) to determine the component composition of the milk&amp;amp;ndash;air mixture formed during milking. (3) Results: An optical and electronic circuit of a flow sensor has been developed, using four sources of optical radiation: blue, green and red semiconductor lasers (light scattering in milk) and a UV LED (milk fluorescence), as well as an axial photodiode array for recording the light scattering indicatrix and the fluorescence intensity of the milk&amp;amp;ndash;air mixture. The use of three laser sources in the scatterometric circuit allows for the determination of the fat content in milk with an error of 0.05%, which is better than all currently known analogs. The developed sensor enables the detection of counterfeit milk containing palm oil instead of milk fat. It operates reliably in a temperature range of 5&amp;amp;ndash;35 &amp;amp;deg;C and at milk flow rates of up to 100 mL/sec. (4) Conclusions: The sensor is capable of transmitting real-time data on the fat and protein content of milk to an RS-232 serial port, enabling integration into milking robots and automated milking systems.</description>
	<pubDate>2026-05-05</pubDate>

	<content:encoded><![CDATA[
	<p><b>Sensors, Vol. 26, Pages 2894: A Four-Wavelength Flow-Through Fluorescence&amp;ndash;Scatterometric Sensor That Allows for Real-Time Determination of Fat and Protein Content in Milk&amp;ndash;Air Mixtures with High Accuracy</b></p>
	<p>Sensors <a href="https://www.mdpi.com/1424-8220/26/9/2894">doi: 10.3390/s26092894</a></p>
	<p>Authors:
		Maxim E. Astashev
		Dmitry N. Ignatenko
		Elena A. Molkova
		Ivan M. Gogolev
		Andrey V. Onegov
		Sergey Y. Smolentsev
		Artem R. Khakimov
		Semen S. Ruzin
		Dmitry A. Budnikov
		Dmitriy Yu. Pavkin
		Sergey V. Gudkov
		</p>
	<p>(1) Background: Currently, there is a problem of prompt determination of fat and protein content in the milk&amp;amp;ndash;air mixture of milking machines. (2) Methods: A design of a sensor prototype is proposed, combining measurements of light scattering (scatterometry) and fluorescence (fluorometry) to determine the component composition of the milk&amp;amp;ndash;air mixture formed during milking. (3) Results: An optical and electronic circuit of a flow sensor has been developed, using four sources of optical radiation: blue, green and red semiconductor lasers (light scattering in milk) and a UV LED (milk fluorescence), as well as an axial photodiode array for recording the light scattering indicatrix and the fluorescence intensity of the milk&amp;amp;ndash;air mixture. The use of three laser sources in the scatterometric circuit allows for the determination of the fat content in milk with an error of 0.05%, which is better than all currently known analogs. The developed sensor enables the detection of counterfeit milk containing palm oil instead of milk fat. It operates reliably in a temperature range of 5&amp;amp;ndash;35 &amp;amp;deg;C and at milk flow rates of up to 100 mL/sec. (4) Conclusions: The sensor is capable of transmitting real-time data on the fat and protein content of milk to an RS-232 serial port, enabling integration into milking robots and automated milking systems.</p>
	]]></content:encoded>

	<dc:title>A Four-Wavelength Flow-Through Fluorescence&amp;amp;ndash;Scatterometric Sensor That Allows for Real-Time Determination of Fat and Protein Content in Milk&amp;amp;ndash;Air Mixtures with High Accuracy</dc:title>
			<dc:creator>Maxim E. Astashev</dc:creator>
			<dc:creator>Dmitry N. Ignatenko</dc:creator>
			<dc:creator>Elena A. Molkova</dc:creator>
			<dc:creator>Ivan M. Gogolev</dc:creator>
			<dc:creator>Andrey V. Onegov</dc:creator>
			<dc:creator>Sergey Y. Smolentsev</dc:creator>
			<dc:creator>Artem R. Khakimov</dc:creator>
			<dc:creator>Semen S. Ruzin</dc:creator>
			<dc:creator>Dmitry A. Budnikov</dc:creator>
			<dc:creator>Dmitriy Yu. Pavkin</dc:creator>
			<dc:creator>Sergey V. Gudkov</dc:creator>
		<dc:identifier>doi: 10.3390/s26092894</dc:identifier>
	<dc:source>Sensors</dc:source>
	<dc:date>2026-05-05</dc:date>

	<prism:publicationName>Sensors</prism:publicationName>
	<prism:publicationDate>2026-05-05</prism:publicationDate>
	<prism:volume>26</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>2894</prism:startingPage>
		<prism:doi>10.3390/s26092894</prism:doi>
	<prism:url>https://www.mdpi.com/1424-8220/26/9/2894</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1424-8220/26/9/2893">

	<title>Sensors, Vol. 26, Pages 2893: Automated Ergonomic Risk Assessment of Wheelchair Users During Cabinet Interaction Using Vision-Based 3D Pose Estimation</title>
	<link>https://www.mdpi.com/1424-8220/26/9/2893</link>
	<description>Advanced sensor signal analysis is increasingly important for intelligent health management in human-centered environments, where continuous perception and real-time interpretation of motion-related signals are essential for safe and adaptive assistance. In this study, we propose a vision-based sensor signal analysis framework for automated ergonomic risk assessment of wheelchair users during cabinet interaction. The proposed framework integrates YOLOv11 for human detection, MHFormer for monocular 3D pose reconstruction, and a fuzzy logic-enhanced RULA model for continuous ergonomic risk quantification from video-derived motion signals. To support model development and evaluation, we constructed a dedicated wheelchair cabinet-operation dataset comprising 30 participants, including 14 experienced wheelchair users and 16 trained simulation participants, across five representative cabinet-operation scenarios. The raw dataset contained approximately 5 h of RGB video and about 150,000 original frames. To reduce redundancy caused by highly similar consecutive frames and to mitigate overfitting risk, representative frames were sampled from the continuous video sequences, resulting in 10,000 images for annotation and model development. Based on the proposed framework, raw visual sensor signals are transformed into temporally continuous kinematic representations and ergonomic risk scores, enabling non-contact and real-time health-state interpretation in assistive living environments. The proposed method achieved an average joint-angle estimation RMSE of 7.5&amp;amp;deg;, representing an approximately 60% reduction compared with a Kinect v2-based motion capture baseline (18.6&amp;amp;deg;), which is widely used for low-cost ergonomic evaluation. In benchmark evaluation, the proposed method achieved 84% risk-classification accuracy with a Cohen&amp;amp;rsquo;s kappa of 0.66, outperforming representative baseline approaches. The results further indicated that low revolving-door and low-drawer operations were associated with higher and more sustained ergonomic risk exposure than sliding-door interaction. These findings demonstrate that vision-based sensor signal analysis can provide an effective solution for intelligent health management, ergonomic monitoring, and perception-driven assessment in accessible and assistive autonomous living systems.</description>
	<pubDate>2026-05-05</pubDate>

	<content:encoded><![CDATA[
	<p><b>Sensors, Vol. 26, Pages 2893: Automated Ergonomic Risk Assessment of Wheelchair Users During Cabinet Interaction Using Vision-Based 3D Pose Estimation</b></p>
	<p>Sensors <a href="https://www.mdpi.com/1424-8220/26/9/2893">doi: 10.3390/s26092893</a></p>
	<p>Authors:
		Yilin Xu
		Ziqian Yang
		Tao Sun
		Jiachuan Ning
		</p>
	<p>Advanced sensor signal analysis is increasingly important for intelligent health management in human-centered environments, where continuous perception and real-time interpretation of motion-related signals are essential for safe and adaptive assistance. In this study, we propose a vision-based sensor signal analysis framework for automated ergonomic risk assessment of wheelchair users during cabinet interaction. The proposed framework integrates YOLOv11 for human detection, MHFormer for monocular 3D pose reconstruction, and a fuzzy logic-enhanced RULA model for continuous ergonomic risk quantification from video-derived motion signals. To support model development and evaluation, we constructed a dedicated wheelchair cabinet-operation dataset comprising 30 participants, including 14 experienced wheelchair users and 16 trained simulation participants, across five representative cabinet-operation scenarios. The raw dataset contained approximately 5 h of RGB video and about 150,000 original frames. To reduce redundancy caused by highly similar consecutive frames and to mitigate overfitting risk, representative frames were sampled from the continuous video sequences, resulting in 10,000 images for annotation and model development. Based on the proposed framework, raw visual sensor signals are transformed into temporally continuous kinematic representations and ergonomic risk scores, enabling non-contact and real-time health-state interpretation in assistive living environments. The proposed method achieved an average joint-angle estimation RMSE of 7.5&amp;amp;deg;, representing an approximately 60% reduction compared with a Kinect v2-based motion capture baseline (18.6&amp;amp;deg;), which is widely used for low-cost ergonomic evaluation. In benchmark evaluation, the proposed method achieved 84% risk-classification accuracy with a Cohen&amp;amp;rsquo;s kappa of 0.66, outperforming representative baseline approaches. The results further indicated that low revolving-door and low-drawer operations were associated with higher and more sustained ergonomic risk exposure than sliding-door interaction. These findings demonstrate that vision-based sensor signal analysis can provide an effective solution for intelligent health management, ergonomic monitoring, and perception-driven assessment in accessible and assistive autonomous living systems.</p>
	]]></content:encoded>

	<dc:title>Automated Ergonomic Risk Assessment of Wheelchair Users During Cabinet Interaction Using Vision-Based 3D Pose Estimation</dc:title>
			<dc:creator>Yilin Xu</dc:creator>
			<dc:creator>Ziqian Yang</dc:creator>
			<dc:creator>Tao Sun</dc:creator>
			<dc:creator>Jiachuan Ning</dc:creator>
		<dc:identifier>doi: 10.3390/s26092893</dc:identifier>
	<dc:source>Sensors</dc:source>
	<dc:date>2026-05-05</dc:date>

	<prism:publicationName>Sensors</prism:publicationName>
	<prism:publicationDate>2026-05-05</prism:publicationDate>
	<prism:volume>26</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>2893</prism:startingPage>
		<prism:doi>10.3390/s26092893</prism:doi>
	<prism:url>https://www.mdpi.com/1424-8220/26/9/2893</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1424-8220/26/9/2892">

	<title>Sensors, Vol. 26, Pages 2892: Standalone RFID Access Control System with Data-Integrity Verification Capabilities</title>
	<link>https://www.mdpi.com/1424-8220/26/9/2892</link>
	<description>Today, access control systems are used in almost every institution and building. This is because they are an effective solution that provides a high level of security. There are many commercially available systems that provide security-related access features for buildings, including biometric options. Most use a centralized architecture, where each building can be remotely controlled via an Internet connection. This paper presents a completely different system from those on the market, a decentralized system with clone-detection and data-integrity verification mechanisms that allows access to buildings. The overall architecture includes hardware encoding of the access system&amp;amp;rsquo;s location, and access is granted based on information written to the RFID card by the card-issuing center. This allows the system to be easily reconfigured at the hardware level prior to installation in the access area. The proposed system uses a confidential RFID card data integrity algorithm that uses the card data and immutable UID to determine a checksum in order to validate the RFID card data. As a result, any unwanted modification of at least one bit invalidates the card and blocks access to the building. The system was implemented, validated, and extensively tested over a one-year period with no reported operational issues.</description>
	<pubDate>2026-05-05</pubDate>

	<content:encoded><![CDATA[
	<p><b>Sensors, Vol. 26, Pages 2892: Standalone RFID Access Control System with Data-Integrity Verification Capabilities</b></p>
	<p>Sensors <a href="https://www.mdpi.com/1424-8220/26/9/2892">doi: 10.3390/s26092892</a></p>
	<p>Authors:
		Valentin Popa
		Adrian I. Petrariu
		Partemie M. Mutescu
		Alexandru A. Maftei
		Alexandru Lavric
		</p>
	<p>Today, access control systems are used in almost every institution and building. This is because they are an effective solution that provides a high level of security. There are many commercially available systems that provide security-related access features for buildings, including biometric options. Most use a centralized architecture, where each building can be remotely controlled via an Internet connection. This paper presents a completely different system from those on the market, a decentralized system with clone-detection and data-integrity verification mechanisms that allows access to buildings. The overall architecture includes hardware encoding of the access system&amp;amp;rsquo;s location, and access is granted based on information written to the RFID card by the card-issuing center. This allows the system to be easily reconfigured at the hardware level prior to installation in the access area. The proposed system uses a confidential RFID card data integrity algorithm that uses the card data and immutable UID to determine a checksum in order to validate the RFID card data. As a result, any unwanted modification of at least one bit invalidates the card and blocks access to the building. The system was implemented, validated, and extensively tested over a one-year period with no reported operational issues.</p>
	]]></content:encoded>

	<dc:title>Standalone RFID Access Control System with Data-Integrity Verification Capabilities</dc:title>
			<dc:creator>Valentin Popa</dc:creator>
			<dc:creator>Adrian I. Petrariu</dc:creator>
			<dc:creator>Partemie M. Mutescu</dc:creator>
			<dc:creator>Alexandru A. Maftei</dc:creator>
			<dc:creator>Alexandru Lavric</dc:creator>
		<dc:identifier>doi: 10.3390/s26092892</dc:identifier>
	<dc:source>Sensors</dc:source>
	<dc:date>2026-05-05</dc:date>

	<prism:publicationName>Sensors</prism:publicationName>
	<prism:publicationDate>2026-05-05</prism:publicationDate>
	<prism:volume>26</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>2892</prism:startingPage>
		<prism:doi>10.3390/s26092892</prism:doi>
	<prism:url>https://www.mdpi.com/1424-8220/26/9/2892</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1424-8220/26/9/2891">

	<title>Sensors, Vol. 26, Pages 2891: A Real-Time SDR-Based Vehicular Scatterometer with Multi-Subband Coherent Synthesis</title>
	<link>https://www.mdpi.com/1424-8220/26/9/2891</link>
	<description>Ground-based scatterometers are widely used for quantitative microwave backscattering measurements in soil moisture retrieval, vegetation monitoring, and satellite scatterometer validation. However, low-cost software-defined radio (SDR) transceivers provide limited instantaneous bandwidth, making it difficult to transmit and process signals with bandwidths on the order of hundreds of MHz for fine range resolution, especially for systems requiring real-time onboard processing. To address this problem, this paper presents a vehicular, fully polarimetric, SDR-based scatterometer that achieves an equivalent wideband response by sequentially transmitting adjacent narrow subbands and coherently synthesizing them onboard. To enable real-time operation on a resource-limited field-programmable gate array/system-on-chip (FPGA/SoC) platform, we adopt a frequency-domain synthesis-pulse-compression pipeline that avoids interpolation and eliminates repeated matched filtering across subbands. A slot-based online phase calibration is performed within the settling window after each fast lock to estimate and compensate random local oscillator (LO) phase offsets, preserving coherent stitching. In addition, pulse repetition within each subband and coherent accumulation are integrated to improve the signal-to-noise ratio (SNR) under real-time throughput constraints. A Zynq-based implementation demonstrates deterministic onboard range-profile output, with a minimum processing latency of about 1.57 ms per frame. Loopback and outdoor experiments validate the equivalent 200 MHz bandwidth (five 40 MHz subbands), achieving approximately 0.75 m resolution and yielding sidelobe metrics consistent with the designed windowing, including a peak sidelobe ratio (PSLR) of &amp;amp;minus;27.43 dB and an integrated sidelobe ratio (ISLR) of &amp;amp;minus;12.38 dB. Field scans over farmland further show consistent &amp;amp;sigma;0 trends across incidence angle and azimuth, indicating reliable onboard quantitative backscattering measurement. These results demonstrate that the proposed method provides a feasible solution for deterministic real-time equivalent wideband scatterometry on a low-cost SDR platform.</description>
	<pubDate>2026-05-05</pubDate>

	<content:encoded><![CDATA[
	<p><b>Sensors, Vol. 26, Pages 2891: A Real-Time SDR-Based Vehicular Scatterometer with Multi-Subband Coherent Synthesis</b></p>
	<p>Sensors <a href="https://www.mdpi.com/1424-8220/26/9/2891">doi: 10.3390/s26092891</a></p>
	<p>Authors:
		Shijie Yang
		Wei Guo
		Caiyun Wang
		Peng Liu
		Te Wang
		Zhenzhen Liang
		Qing Xing
		Xingming Zheng
		Bingze Li
		</p>
	<p>Ground-based scatterometers are widely used for quantitative microwave backscattering measurements in soil moisture retrieval, vegetation monitoring, and satellite scatterometer validation. However, low-cost software-defined radio (SDR) transceivers provide limited instantaneous bandwidth, making it difficult to transmit and process signals with bandwidths on the order of hundreds of MHz for fine range resolution, especially for systems requiring real-time onboard processing. To address this problem, this paper presents a vehicular, fully polarimetric, SDR-based scatterometer that achieves an equivalent wideband response by sequentially transmitting adjacent narrow subbands and coherently synthesizing them onboard. To enable real-time operation on a resource-limited field-programmable gate array/system-on-chip (FPGA/SoC) platform, we adopt a frequency-domain synthesis-pulse-compression pipeline that avoids interpolation and eliminates repeated matched filtering across subbands. A slot-based online phase calibration is performed within the settling window after each fast lock to estimate and compensate random local oscillator (LO) phase offsets, preserving coherent stitching. In addition, pulse repetition within each subband and coherent accumulation are integrated to improve the signal-to-noise ratio (SNR) under real-time throughput constraints. A Zynq-based implementation demonstrates deterministic onboard range-profile output, with a minimum processing latency of about 1.57 ms per frame. Loopback and outdoor experiments validate the equivalent 200 MHz bandwidth (five 40 MHz subbands), achieving approximately 0.75 m resolution and yielding sidelobe metrics consistent with the designed windowing, including a peak sidelobe ratio (PSLR) of &amp;amp;minus;27.43 dB and an integrated sidelobe ratio (ISLR) of &amp;amp;minus;12.38 dB. Field scans over farmland further show consistent &amp;amp;sigma;0 trends across incidence angle and azimuth, indicating reliable onboard quantitative backscattering measurement. These results demonstrate that the proposed method provides a feasible solution for deterministic real-time equivalent wideband scatterometry on a low-cost SDR platform.</p>
	]]></content:encoded>

	<dc:title>A Real-Time SDR-Based Vehicular Scatterometer with Multi-Subband Coherent Synthesis</dc:title>
			<dc:creator>Shijie Yang</dc:creator>
			<dc:creator>Wei Guo</dc:creator>
			<dc:creator>Caiyun Wang</dc:creator>
			<dc:creator>Peng Liu</dc:creator>
			<dc:creator>Te Wang</dc:creator>
			<dc:creator>Zhenzhen Liang</dc:creator>
			<dc:creator>Qing Xing</dc:creator>
			<dc:creator>Xingming Zheng</dc:creator>
			<dc:creator>Bingze Li</dc:creator>
		<dc:identifier>doi: 10.3390/s26092891</dc:identifier>
	<dc:source>Sensors</dc:source>
	<dc:date>2026-05-05</dc:date>

	<prism:publicationName>Sensors</prism:publicationName>
	<prism:publicationDate>2026-05-05</prism:publicationDate>
	<prism:volume>26</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>2891</prism:startingPage>
		<prism:doi>10.3390/s26092891</prism:doi>
	<prism:url>https://www.mdpi.com/1424-8220/26/9/2891</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1424-8220/26/9/2890">

	<title>Sensors, Vol. 26, Pages 2890: Adaptive Dual Reinforcement Learning for Hybrid Spatial&amp;ndash;Temporal Networks in RIS-Assisted Indoor Localization (ADRL-HSTNet)</title>
	<link>https://www.mdpi.com/1424-8220/26/9/2890</link>
	<description>Reconfigurable intelligent surface sensors (RISs) have emerged as a promising technology for enhancing wireless indoor localization by intelligently controlling signal propagation; however, extracting reliable localization fingerprints from RIS-assisted signals remains challenging due to multipath fading, environmental noise, and nonlinear spatial&amp;amp;ndash;temporal channel dynamics. To address this, we propose an Adaptive Dual-Reinforcement Learning-Hybrid Spatial&amp;amp;ndash;Temporal Network (ADRL-HSTNet) for RIS-assisted indoor localization. The framework utilizes dual-channel RSSI and phase measurements, followed by noise filtering, normalization, and sliding-window segmentation prior to feature extraction. It then constructs enhanced representations through handcrafted feature extraction and multi-branch processing, including patch-based features, wavelet-domain representations, statistical descriptors, and multi-level segmentation masks. These heterogeneous inputs are encoded using lightweight transformer-based encoders to capture multiscale dependencies. A first reinforcement learning selector adaptively weights the most informative feature branches to produce a fused representation, which is further processed by spatial and temporal transformer modules. Their outputs are adaptively combined via a second reinforcement learning selector to obtain robust localization embedding. The model jointly performs classification, coordinate regression, and uncertainty estimation end-to-end. Experimental results across multiple RIS configurations outperformed the KAN, LSTM-KAN, and RHL-Net (compared against the proposed ADRL-HSTNet) baselines, achieving accuracies of 83.33%, 75.22%, 93.33%, and 88.89%, confirming the effectiveness of the proposed approach.</description>
	<pubDate>2026-05-05</pubDate>

	<content:encoded><![CDATA[
	<p><b>Sensors, Vol. 26, Pages 2890: Adaptive Dual Reinforcement Learning for Hybrid Spatial&amp;ndash;Temporal Networks in RIS-Assisted Indoor Localization (ADRL-HSTNet)</b></p>
	<p>Sensors <a href="https://www.mdpi.com/1424-8220/26/9/2890">doi: 10.3390/s26092890</a></p>
	<p>Authors:
		Mostafa Mohamed
		Ahmed Radi
		Shady Zahran
		</p>
	<p>Reconfigurable intelligent surface sensors (RISs) have emerged as a promising technology for enhancing wireless indoor localization by intelligently controlling signal propagation; however, extracting reliable localization fingerprints from RIS-assisted signals remains challenging due to multipath fading, environmental noise, and nonlinear spatial&amp;amp;ndash;temporal channel dynamics. To address this, we propose an Adaptive Dual-Reinforcement Learning-Hybrid Spatial&amp;amp;ndash;Temporal Network (ADRL-HSTNet) for RIS-assisted indoor localization. The framework utilizes dual-channel RSSI and phase measurements, followed by noise filtering, normalization, and sliding-window segmentation prior to feature extraction. It then constructs enhanced representations through handcrafted feature extraction and multi-branch processing, including patch-based features, wavelet-domain representations, statistical descriptors, and multi-level segmentation masks. These heterogeneous inputs are encoded using lightweight transformer-based encoders to capture multiscale dependencies. A first reinforcement learning selector adaptively weights the most informative feature branches to produce a fused representation, which is further processed by spatial and temporal transformer modules. Their outputs are adaptively combined via a second reinforcement learning selector to obtain robust localization embedding. The model jointly performs classification, coordinate regression, and uncertainty estimation end-to-end. Experimental results across multiple RIS configurations outperformed the KAN, LSTM-KAN, and RHL-Net (compared against the proposed ADRL-HSTNet) baselines, achieving accuracies of 83.33%, 75.22%, 93.33%, and 88.89%, confirming the effectiveness of the proposed approach.</p>
	]]></content:encoded>

	<dc:title>Adaptive Dual Reinforcement Learning for Hybrid Spatial&amp;amp;ndash;Temporal Networks in RIS-Assisted Indoor Localization (ADRL-HSTNet)</dc:title>
			<dc:creator>Mostafa Mohamed</dc:creator>
			<dc:creator>Ahmed Radi</dc:creator>
			<dc:creator>Shady Zahran</dc:creator>
		<dc:identifier>doi: 10.3390/s26092890</dc:identifier>
	<dc:source>Sensors</dc:source>
	<dc:date>2026-05-05</dc:date>

	<prism:publicationName>Sensors</prism:publicationName>
	<prism:publicationDate>2026-05-05</prism:publicationDate>
	<prism:volume>26</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>2890</prism:startingPage>
		<prism:doi>10.3390/s26092890</prism:doi>
	<prism:url>https://www.mdpi.com/1424-8220/26/9/2890</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1424-8220/26/9/2889">

	<title>Sensors, Vol. 26, Pages 2889: SETJiP: Spatial and Extra Temporal Jigsaw Puzzles for Video Anomaly Detection</title>
	<link>https://www.mdpi.com/1424-8220/26/9/2889</link>
	<description>Video Anomaly Detection (VAD) is commonly formulated as a one-class classification task. Global motion, with temporal variations across most pixels within an object-centric region, e.g., walking, is typically regular, whereas localized motion, e.g., waving, can be ambiguous. Decoupled spatial and temporal jigsaw puzzles (DSTJiP) is a self-supervised method that learns discriminative representations by predicting the original order of spatially and temporally shuffled patches. However, DSTJiP&amp;amp;rsquo;s uniform sampling and equal weighting do not assign stronger supervision to global-motion examples within the temporal objective. Consequently, the temporal supervision allocated to global-motion examples may become insufficient across training-data regimes with varying proportions of these examples, deteriorating VAD performance. Nevertheless, excessively strengthening such supervision also degrades performance. To address these issues, we propose spatial and extra temporal jigsaw puzzles (SETJiP) with two RGB-only training schemes that provide stronger and more conservative temporal supervision for global-motion examples, respectively. One scheme strengthens temporal supervision on these examples via additional temporal jigsaw puzzles. The other does so more conservatively by upweighting their temporal jigsaw puzzles. Experiments on four VAD benchmarks show that both schemes improve on DSTJiP and remain highly competitive with state-of-the-art methods.</description>
	<pubDate>2026-05-05</pubDate>

	<content:encoded><![CDATA[
	<p><b>Sensors, Vol. 26, Pages 2889: SETJiP: Spatial and Extra Temporal Jigsaw Puzzles for Video Anomaly Detection</b></p>
	<p>Sensors <a href="https://www.mdpi.com/1424-8220/26/9/2889">doi: 10.3390/s26092889</a></p>
	<p>Authors:
		Liheng Shen
		Tetsu Matsukawa
		Einoshin Suzuki
		</p>
	<p>Video Anomaly Detection (VAD) is commonly formulated as a one-class classification task. Global motion, with temporal variations across most pixels within an object-centric region, e.g., walking, is typically regular, whereas localized motion, e.g., waving, can be ambiguous. Decoupled spatial and temporal jigsaw puzzles (DSTJiP) is a self-supervised method that learns discriminative representations by predicting the original order of spatially and temporally shuffled patches. However, DSTJiP&amp;amp;rsquo;s uniform sampling and equal weighting do not assign stronger supervision to global-motion examples within the temporal objective. Consequently, the temporal supervision allocated to global-motion examples may become insufficient across training-data regimes with varying proportions of these examples, deteriorating VAD performance. Nevertheless, excessively strengthening such supervision also degrades performance. To address these issues, we propose spatial and extra temporal jigsaw puzzles (SETJiP) with two RGB-only training schemes that provide stronger and more conservative temporal supervision for global-motion examples, respectively. One scheme strengthens temporal supervision on these examples via additional temporal jigsaw puzzles. The other does so more conservatively by upweighting their temporal jigsaw puzzles. Experiments on four VAD benchmarks show that both schemes improve on DSTJiP and remain highly competitive with state-of-the-art methods.</p>
	]]></content:encoded>

	<dc:title>SETJiP: Spatial and Extra Temporal Jigsaw Puzzles for Video Anomaly Detection</dc:title>
			<dc:creator>Liheng Shen</dc:creator>
			<dc:creator>Tetsu Matsukawa</dc:creator>
			<dc:creator>Einoshin Suzuki</dc:creator>
		<dc:identifier>doi: 10.3390/s26092889</dc:identifier>
	<dc:source>Sensors</dc:source>
	<dc:date>2026-05-05</dc:date>

	<prism:publicationName>Sensors</prism:publicationName>
	<prism:publicationDate>2026-05-05</prism:publicationDate>
	<prism:volume>26</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>2889</prism:startingPage>
		<prism:doi>10.3390/s26092889</prism:doi>
	<prism:url>https://www.mdpi.com/1424-8220/26/9/2889</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1424-8220/26/9/2888">

	<title>Sensors, Vol. 26, Pages 2888: Evaluating the Adversarial Robustness and Clinical Safety of Quantized Hierarchical Transformers for Edge-Based Malaria Microscopy</title>
	<link>https://www.mdpi.com/1424-8220/26/9/2888</link>
	<description>Automated mobile microscopy in Internet of Things (IoT) networks is essential for scaling malaria screening in resource-constrained environments. Deploying standard convolutional architectures here introduces severe adversarial vulnerabilities. Post-Training Quantization (PTQ) mitigates hardware constraints by converting floating-point models to 8-bit integers (INT8); however, its impact on clinical safety and security remains unexplored. This study presents an adversarial audit of quantized Vision Transformers for medical edge deployment. We evaluated a Swin-Tiny transformer against ViT-Tiny and MobileNetV3 baselines using a 27,558-image malaria dataset and an out-of-distribution (OOD) White Blood Cell dataset. Our findings redefine the &amp;amp;ldquo;Quantization Shield&amp;amp;rdquo; hypothesis. PTQ compresses the Swin model by 3.9&amp;amp;times; (to 27.89 MB) with a negligible 0.11% accuracy drop, maintaining statistical reliability on OOD tests. However, the hypothesized architectural resilience shatters under white-box Projected Gradient Descent (PGD) attacks. Despite robustness against single-step attacks, both MobileNetV3 and the INT8 Swin-Tiny collapse to 0.00% accuracy under iterative PGD. Conversely, the quantized Swin-Tiny resists black-box transfer attacks from a surrogate, maintaining 81.00% accuracy. We conclude that while quantized Vision Transformers meet mobile sensor constraints, integer quantization provides zero innate defense against targeted iterative perturbations, exposing a critical vulnerability in diagnostic IoT networks.</description>
	<pubDate>2026-05-05</pubDate>

	<content:encoded><![CDATA[
	<p><b>Sensors, Vol. 26, Pages 2888: Evaluating the Adversarial Robustness and Clinical Safety of Quantized Hierarchical Transformers for Edge-Based Malaria Microscopy</b></p>
	<p>Sensors <a href="https://www.mdpi.com/1424-8220/26/9/2888">doi: 10.3390/s26092888</a></p>
	<p>Authors:
		Umar Hasan
		Turki G. Alghamdi
		Muhammad Ali Nayeem
		</p>
	<p>Automated mobile microscopy in Internet of Things (IoT) networks is essential for scaling malaria screening in resource-constrained environments. Deploying standard convolutional architectures here introduces severe adversarial vulnerabilities. Post-Training Quantization (PTQ) mitigates hardware constraints by converting floating-point models to 8-bit integers (INT8); however, its impact on clinical safety and security remains unexplored. This study presents an adversarial audit of quantized Vision Transformers for medical edge deployment. We evaluated a Swin-Tiny transformer against ViT-Tiny and MobileNetV3 baselines using a 27,558-image malaria dataset and an out-of-distribution (OOD) White Blood Cell dataset. Our findings redefine the &amp;amp;ldquo;Quantization Shield&amp;amp;rdquo; hypothesis. PTQ compresses the Swin model by 3.9&amp;amp;times; (to 27.89 MB) with a negligible 0.11% accuracy drop, maintaining statistical reliability on OOD tests. However, the hypothesized architectural resilience shatters under white-box Projected Gradient Descent (PGD) attacks. Despite robustness against single-step attacks, both MobileNetV3 and the INT8 Swin-Tiny collapse to 0.00% accuracy under iterative PGD. Conversely, the quantized Swin-Tiny resists black-box transfer attacks from a surrogate, maintaining 81.00% accuracy. We conclude that while quantized Vision Transformers meet mobile sensor constraints, integer quantization provides zero innate defense against targeted iterative perturbations, exposing a critical vulnerability in diagnostic IoT networks.</p>
	]]></content:encoded>

	<dc:title>Evaluating the Adversarial Robustness and Clinical Safety of Quantized Hierarchical Transformers for Edge-Based Malaria Microscopy</dc:title>
			<dc:creator>Umar Hasan</dc:creator>
			<dc:creator>Turki G. Alghamdi</dc:creator>
			<dc:creator>Muhammad Ali Nayeem</dc:creator>
		<dc:identifier>doi: 10.3390/s26092888</dc:identifier>
	<dc:source>Sensors</dc:source>
	<dc:date>2026-05-05</dc:date>

	<prism:publicationName>Sensors</prism:publicationName>
	<prism:publicationDate>2026-05-05</prism:publicationDate>
	<prism:volume>26</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>2888</prism:startingPage>
		<prism:doi>10.3390/s26092888</prism:doi>
	<prism:url>https://www.mdpi.com/1424-8220/26/9/2888</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1424-8220/26/9/2887">

	<title>Sensors, Vol. 26, Pages 2887: Precision Gas Sensing Interface Circuit with Digital Potentiometer-Based Dynamic Gain Control</title>
	<link>https://www.mdpi.com/1424-8220/26/9/2887</link>
	<description>This paper proposes a digital potentiometer-based adaptive gas sensor interface for stable detection without signal saturation under extreme environmental fluctuations. Conventional fixed-gain circuits often suffer from limited dynamic range, leading to data loss when severe baseline drifts exceed ADC input limits. To address this, we developed a real-time control algorithm that actively adjusts attenuator and amplifier gains, maintaining the ADC input voltage (VADC) near the common-mode voltage (VCM). Experimental results demonstrate that the interface remains stable even when the buffer voltage reaches 2.75 V, significantly surpassing the 1.2 V ADC limit. Sensor resistance data, reconstructed by inversely calculating updated circuit parameters, achieved high accuracy with a Mean Absolute Percentage Error (MAPE) of 1.628% and a maximum relative error under 4.8%. Consequently, this study proves that logically extending the physically limited ADC dynamic range enables high-precision gas sensing in diverse environments without requiring high-performance computing devices. This approach provides a cost-effective and robust solution for compact IoT-based gas monitoring systems.</description>
	<pubDate>2026-05-05</pubDate>

	<content:encoded><![CDATA[
	<p><b>Sensors, Vol. 26, Pages 2887: Precision Gas Sensing Interface Circuit with Digital Potentiometer-Based Dynamic Gain Control</b></p>
	<p>Sensors <a href="https://www.mdpi.com/1424-8220/26/9/2887">doi: 10.3390/s26092887</a></p>
	<p>Authors:
		Soon-Kyu Kwon
		Hyeon-June Kim
		</p>
	<p>This paper proposes a digital potentiometer-based adaptive gas sensor interface for stable detection without signal saturation under extreme environmental fluctuations. Conventional fixed-gain circuits often suffer from limited dynamic range, leading to data loss when severe baseline drifts exceed ADC input limits. To address this, we developed a real-time control algorithm that actively adjusts attenuator and amplifier gains, maintaining the ADC input voltage (VADC) near the common-mode voltage (VCM). Experimental results demonstrate that the interface remains stable even when the buffer voltage reaches 2.75 V, significantly surpassing the 1.2 V ADC limit. Sensor resistance data, reconstructed by inversely calculating updated circuit parameters, achieved high accuracy with a Mean Absolute Percentage Error (MAPE) of 1.628% and a maximum relative error under 4.8%. Consequently, this study proves that logically extending the physically limited ADC dynamic range enables high-precision gas sensing in diverse environments without requiring high-performance computing devices. This approach provides a cost-effective and robust solution for compact IoT-based gas monitoring systems.</p>
	]]></content:encoded>

	<dc:title>Precision Gas Sensing Interface Circuit with Digital Potentiometer-Based Dynamic Gain Control</dc:title>
			<dc:creator>Soon-Kyu Kwon</dc:creator>
			<dc:creator>Hyeon-June Kim</dc:creator>
		<dc:identifier>doi: 10.3390/s26092887</dc:identifier>
	<dc:source>Sensors</dc:source>
	<dc:date>2026-05-05</dc:date>

	<prism:publicationName>Sensors</prism:publicationName>
	<prism:publicationDate>2026-05-05</prism:publicationDate>
	<prism:volume>26</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>2887</prism:startingPage>
		<prism:doi>10.3390/s26092887</prism:doi>
	<prism:url>https://www.mdpi.com/1424-8220/26/9/2887</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1424-8220/26/9/2886">

	<title>Sensors, Vol. 26, Pages 2886: A Scene Detection Complexity Metric for Infrared Small Target Detection</title>
	<link>https://www.mdpi.com/1424-8220/26/9/2886</link>
	<description>Infrared small target detection is widely used in aerospace surveillance, maritime search and rescue, and military reconnaissance. However, the performance of detection algorithms is highly dependent on scene characteristics, and methods that perform well in simple backgrounds may degrade substantially in complex environments. Existing indicators, such as information entropy, average gradient, and peak signal-to-noise ratio, can reflect detection difficulty from individual perspectives, but they do not provide a unified measure that jointly considers target saliency, background complexity, and target&amp;amp;ndash;background coupling. To address this issue, this study proposes a scene detection complexity (SDC) metric for quantifying the difficulty of infrared small target detection. Six basic indicators are selected from three dimensions, namely target saliency, background complexity, and target&amp;amp;ndash;background coupling: statistical variance, target&amp;amp;ndash;background contrast, signal-to-clutter ratio, information entropy, structural similarity, and target size. After Min&amp;amp;ndash;Max normalization, objective weights are determined by combining the entropy weight method and principal component analysis, and the weighted indicators are fused into an SDC value in the range of [0,1]. Experiments on 100 test images selected from IRST640, MSISTD, SIRST-V2, and an infrared small-aircraft sequence dataset show that the proposed SDC achieves a Pearson linear correlation coefficient of 0.956 with subjective difficulty ratings and &amp;amp;minus;0.902 with image-level detection scores obtained from seven representative algorithms. The results further indicate that traditional methods are more sensitive to increasing scene complexity, whereas deep-learning-based methods are comparatively more robust in complex backgrounds. The proposed SDC provides a unified and objective tool for performance evaluation, algorithm selection, and pre-assessment of scene difficulty in infrared small target detection.</description>
	<pubDate>2026-05-05</pubDate>

	<content:encoded><![CDATA[
	<p><b>Sensors, Vol. 26, Pages 2886: A Scene Detection Complexity Metric for Infrared Small Target Detection</b></p>
	<p>Sensors <a href="https://www.mdpi.com/1424-8220/26/9/2886">doi: 10.3390/s26092886</a></p>
	<p>Authors:
		Zhiyuan Huang
		Zhiyong Zhang
		</p>
	<p>Infrared small target detection is widely used in aerospace surveillance, maritime search and rescue, and military reconnaissance. However, the performance of detection algorithms is highly dependent on scene characteristics, and methods that perform well in simple backgrounds may degrade substantially in complex environments. Existing indicators, such as information entropy, average gradient, and peak signal-to-noise ratio, can reflect detection difficulty from individual perspectives, but they do not provide a unified measure that jointly considers target saliency, background complexity, and target&amp;amp;ndash;background coupling. To address this issue, this study proposes a scene detection complexity (SDC) metric for quantifying the difficulty of infrared small target detection. Six basic indicators are selected from three dimensions, namely target saliency, background complexity, and target&amp;amp;ndash;background coupling: statistical variance, target&amp;amp;ndash;background contrast, signal-to-clutter ratio, information entropy, structural similarity, and target size. After Min&amp;amp;ndash;Max normalization, objective weights are determined by combining the entropy weight method and principal component analysis, and the weighted indicators are fused into an SDC value in the range of [0,1]. Experiments on 100 test images selected from IRST640, MSISTD, SIRST-V2, and an infrared small-aircraft sequence dataset show that the proposed SDC achieves a Pearson linear correlation coefficient of 0.956 with subjective difficulty ratings and &amp;amp;minus;0.902 with image-level detection scores obtained from seven representative algorithms. The results further indicate that traditional methods are more sensitive to increasing scene complexity, whereas deep-learning-based methods are comparatively more robust in complex backgrounds. The proposed SDC provides a unified and objective tool for performance evaluation, algorithm selection, and pre-assessment of scene difficulty in infrared small target detection.</p>
	]]></content:encoded>

	<dc:title>A Scene Detection Complexity Metric for Infrared Small Target Detection</dc:title>
			<dc:creator>Zhiyuan Huang</dc:creator>
			<dc:creator>Zhiyong Zhang</dc:creator>
		<dc:identifier>doi: 10.3390/s26092886</dc:identifier>
	<dc:source>Sensors</dc:source>
	<dc:date>2026-05-05</dc:date>

	<prism:publicationName>Sensors</prism:publicationName>
	<prism:publicationDate>2026-05-05</prism:publicationDate>
	<prism:volume>26</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>2886</prism:startingPage>
		<prism:doi>10.3390/s26092886</prism:doi>
	<prism:url>https://www.mdpi.com/1424-8220/26/9/2886</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1424-8220/26/9/2884">

	<title>Sensors, Vol. 26, Pages 2884: Robust Rear-View Human Tracking for Robotic Visual Sensing: A Spatiotemporal Prediction and Multi-Modal Fusion Approach</title>
	<link>https://www.mdpi.com/1424-8220/26/9/2884</link>
	<description>Rear-view human tracking and re-identification remain critical challenges for robotic visual sensing in unmanned vehicles, particularly under adverse weather conditions and severe occlusion. Conventional deep learning models often suffer from feature contamination and trajectory drift under dynamic illumination. To overcome these bottlenecks, we propose a lightweight tracking framework driven by spatiotemporal prediction and multimodal feature fusion. Specifically, an ego-motion-aware Kalman prediction mechanism maintains temporal continuity during complete occlusions. Upon target reappearance, a multi-factor descriptor&amp;amp;mdash;fusing color histograms with geometric constraints&amp;amp;mdash;is employed within a dynamic Mahalanobis search region. This is coupled with a specular-reflection-penalized adaptive learning rate (&amp;amp;eta;k) that actively freezes template updates during severe environmental degradation conditions. Evaluated on a custom Mecanum-wheeled robot, the proposed method achieves a peak precision of 94.2% and a tracking success rate of 93.4%. Extensive experiments in extreme rainy night scenarios demonstrate a 35% reduction in average tracking error, maintaining a Center Location Error (CLE) below 11 pixels. Furthermore, the system achieves a rapid target re-identification response of 72.83 ms during occlusion phases. Ultimately, this framework delivers a highly robust and real-time solution for autonomous navigation in complex dynamic environments.</description>
	<pubDate>2026-05-05</pubDate>

	<content:encoded><![CDATA[
	<p><b>Sensors, Vol. 26, Pages 2884: Robust Rear-View Human Tracking for Robotic Visual Sensing: A Spatiotemporal Prediction and Multi-Modal Fusion Approach</b></p>
	<p>Sensors <a href="https://www.mdpi.com/1424-8220/26/9/2884">doi: 10.3390/s26092884</a></p>
	<p>Authors:
		Xu Jia
		Jia Xie
		Yongguo Li
		Jintao Liang
		Zengmin Zhang
		</p>
	<p>Rear-view human tracking and re-identification remain critical challenges for robotic visual sensing in unmanned vehicles, particularly under adverse weather conditions and severe occlusion. Conventional deep learning models often suffer from feature contamination and trajectory drift under dynamic illumination. To overcome these bottlenecks, we propose a lightweight tracking framework driven by spatiotemporal prediction and multimodal feature fusion. Specifically, an ego-motion-aware Kalman prediction mechanism maintains temporal continuity during complete occlusions. Upon target reappearance, a multi-factor descriptor&amp;amp;mdash;fusing color histograms with geometric constraints&amp;amp;mdash;is employed within a dynamic Mahalanobis search region. This is coupled with a specular-reflection-penalized adaptive learning rate (&amp;amp;eta;k) that actively freezes template updates during severe environmental degradation conditions. Evaluated on a custom Mecanum-wheeled robot, the proposed method achieves a peak precision of 94.2% and a tracking success rate of 93.4%. Extensive experiments in extreme rainy night scenarios demonstrate a 35% reduction in average tracking error, maintaining a Center Location Error (CLE) below 11 pixels. Furthermore, the system achieves a rapid target re-identification response of 72.83 ms during occlusion phases. Ultimately, this framework delivers a highly robust and real-time solution for autonomous navigation in complex dynamic environments.</p>
	]]></content:encoded>

	<dc:title>Robust Rear-View Human Tracking for Robotic Visual Sensing: A Spatiotemporal Prediction and Multi-Modal Fusion Approach</dc:title>
			<dc:creator>Xu Jia</dc:creator>
			<dc:creator>Jia Xie</dc:creator>
			<dc:creator>Yongguo Li</dc:creator>
			<dc:creator>Jintao Liang</dc:creator>
			<dc:creator>Zengmin Zhang</dc:creator>
		<dc:identifier>doi: 10.3390/s26092884</dc:identifier>
	<dc:source>Sensors</dc:source>
	<dc:date>2026-05-05</dc:date>

	<prism:publicationName>Sensors</prism:publicationName>
	<prism:publicationDate>2026-05-05</prism:publicationDate>
	<prism:volume>26</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>2884</prism:startingPage>
		<prism:doi>10.3390/s26092884</prism:doi>
	<prism:url>https://www.mdpi.com/1424-8220/26/9/2884</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1424-8220/26/9/2885">

	<title>Sensors, Vol. 26, Pages 2885: Communication-Efficient Federated Learning with Dual-Sided Sparse Aggregation for Edge Sensing Systems</title>
	<link>https://www.mdpi.com/1424-8220/26/9/2885</link>
	<description>Distributed edge sensing systems, such as IoT monitoring nodes, wearable devices, and camera-based sensing terminals, continuously generate privacy-sensitive data that are costly to transmit to a central server. Federated learning (FL) provides a promising solution for collaborative model training without raw-data sharing; however, its practical deployment in edge sensing systems is challenged by non-IID local observations, limited uplink/downlink resources, and restricted on-device computation. To address these issues, this paper proposes a Dual-Sided Sparse Aggregation (DSSA) mechanism integrated with FedProx for resource-constrained edge sensing environments. In the proposed framework, the server prunes the global model after each communication round and transmits only the retained parameters, while clients update the complementary parameters and upload sparse local gradients. This fixed-structure sparse training strategy reduces bidirectional communication overhead and local computation cost, while FedProx improves robustness under heterogeneous data distributions. Experiments on CIFAR-10 and SVHN with varying non-IID degrees, pruning ratios, and hyperparameter settings show that the proposed method achieves a favorable resource-performance trade-off, reducing communication cost by up to 73.0% and computation cost by up to 34.9% while maintaining competitive accuracy. Under controlled benchmark settings, the proposed method demonstrates substantial resource savings compared with FedAvg, particularly in mildly heterogeneous scenarios, indicating a favorable benchmark-level resource-performance trade-off for resource-constrained edge sensing scenarios under the evaluated settings.</description>
	<pubDate>2026-05-05</pubDate>

	<content:encoded><![CDATA[
	<p><b>Sensors, Vol. 26, Pages 2885: Communication-Efficient Federated Learning with Dual-Sided Sparse Aggregation for Edge Sensing Systems</b></p>
	<p>Sensors <a href="https://www.mdpi.com/1424-8220/26/9/2885">doi: 10.3390/s26092885</a></p>
	<p>Authors:
		He Zhao
		Jingwei Li
		</p>
	<p>Distributed edge sensing systems, such as IoT monitoring nodes, wearable devices, and camera-based sensing terminals, continuously generate privacy-sensitive data that are costly to transmit to a central server. Federated learning (FL) provides a promising solution for collaborative model training without raw-data sharing; however, its practical deployment in edge sensing systems is challenged by non-IID local observations, limited uplink/downlink resources, and restricted on-device computation. To address these issues, this paper proposes a Dual-Sided Sparse Aggregation (DSSA) mechanism integrated with FedProx for resource-constrained edge sensing environments. In the proposed framework, the server prunes the global model after each communication round and transmits only the retained parameters, while clients update the complementary parameters and upload sparse local gradients. This fixed-structure sparse training strategy reduces bidirectional communication overhead and local computation cost, while FedProx improves robustness under heterogeneous data distributions. Experiments on CIFAR-10 and SVHN with varying non-IID degrees, pruning ratios, and hyperparameter settings show that the proposed method achieves a favorable resource-performance trade-off, reducing communication cost by up to 73.0% and computation cost by up to 34.9% while maintaining competitive accuracy. Under controlled benchmark settings, the proposed method demonstrates substantial resource savings compared with FedAvg, particularly in mildly heterogeneous scenarios, indicating a favorable benchmark-level resource-performance trade-off for resource-constrained edge sensing scenarios under the evaluated settings.</p>
	]]></content:encoded>

	<dc:title>Communication-Efficient Federated Learning with Dual-Sided Sparse Aggregation for Edge Sensing Systems</dc:title>
			<dc:creator>He Zhao</dc:creator>
			<dc:creator>Jingwei Li</dc:creator>
		<dc:identifier>doi: 10.3390/s26092885</dc:identifier>
	<dc:source>Sensors</dc:source>
	<dc:date>2026-05-05</dc:date>

	<prism:publicationName>Sensors</prism:publicationName>
	<prism:publicationDate>2026-05-05</prism:publicationDate>
	<prism:volume>26</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>2885</prism:startingPage>
		<prism:doi>10.3390/s26092885</prism:doi>
	<prism:url>https://www.mdpi.com/1424-8220/26/9/2885</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1424-8220/26/9/2883">

	<title>Sensors, Vol. 26, Pages 2883: A 33 GHz Conformal Phased-Array Radar with Linearly Constrained Minimum Variance Digital Beamforming, Circular- Polarization Filtering, and Neural-Network Micro-Doppler Classification for Counter-UAS Applications</title>
	<link>https://www.mdpi.com/1424-8220/26/9/2883</link>
	<description>A compact millimeter-wave radar system operating at 33 GHz is presented for integration on small unmanned aerial systems (UAS) and for ground-based counter-UAS reconnaissance. The design is specifically motivated by civil-sector agricultural applications, where large-payload crop-dusting and precision-spraying drones operating under FAA 14 CFR Part 137 require lightweight sense-and-avoid radar that conforms aerodynamically to existing aircraft or ground vehicles. The system is based on a 36-element hemispherical conformal phased array of crossed half-wave dipole radiators that generate right-hand circular polarization (RHCP) on transmit and selectively receives left-hand circular polarization (LHCP) echoes from targets, providing passive first-stage suppression of co-polarized rain and ground clutter. A Linearly Constrained Minimum Variance (LCMV) digital beamformer, applied to per-element analog-to-digital converter (ADC) outputs, delivers closed-form beam weights that enforce a distortionless response at each scan direction while globally minimizing sidelobe power. The formulation resolves the main-beam drift caused by the ill-conditioned re-scaling step in iterative Chebyshev tapering, achieving sidelobe levels below &amp;amp;minus;20 dB with main-beam peaks within 0.1&amp;amp;deg; of their commanded angles across all evaluated positions. Mutual coupling between array elements is modeled analytically using the induced-EMF method, yielding a 36&amp;amp;times;36 impedance matrix whose off-diagonal entries are at most 8.2% of the element self-impedance at the minimum inter-element separation of 2.70 &amp;amp;lambda;. A closed-form decoupling matrix is applied to the receive manifold prior to LCMV weight computation. Seven simultaneous independent receive beams covering 0&amp;amp;deg;&amp;amp;ndash;60&amp;amp;deg; elevation are formed from a single data snapshot. A Scaled Conjugate Gradient neural network classifier, trained on radar-equation-scaled micro-Doppler features following Swerling I&amp;amp;ndash;IV radar cross-section (RCS) fluctuation statistics, achieves overall classification accuracy above 85% across five target classes. The five classes comprise two bird-signature classes (SW-I and SW-II), two UAV-signature classes (SW-III and SW-IV), and a clutter class. The design is entirely simulation-based; experimental validation using a sub-array prototype is identified as the primary direction for future work.</description>
	<pubDate>2026-05-05</pubDate>

	<content:encoded><![CDATA[
	<p><b>Sensors, Vol. 26, Pages 2883: A 33 GHz Conformal Phased-Array Radar with Linearly Constrained Minimum Variance Digital Beamforming, Circular- Polarization Filtering, and Neural-Network Micro-Doppler Classification for Counter-UAS Applications</b></p>
	<p>Sensors <a href="https://www.mdpi.com/1424-8220/26/9/2883">doi: 10.3390/s26092883</a></p>
	<p>Authors:
		Michael Baginski
		</p>
	<p>A compact millimeter-wave radar system operating at 33 GHz is presented for integration on small unmanned aerial systems (UAS) and for ground-based counter-UAS reconnaissance. The design is specifically motivated by civil-sector agricultural applications, where large-payload crop-dusting and precision-spraying drones operating under FAA 14 CFR Part 137 require lightweight sense-and-avoid radar that conforms aerodynamically to existing aircraft or ground vehicles. The system is based on a 36-element hemispherical conformal phased array of crossed half-wave dipole radiators that generate right-hand circular polarization (RHCP) on transmit and selectively receives left-hand circular polarization (LHCP) echoes from targets, providing passive first-stage suppression of co-polarized rain and ground clutter. A Linearly Constrained Minimum Variance (LCMV) digital beamformer, applied to per-element analog-to-digital converter (ADC) outputs, delivers closed-form beam weights that enforce a distortionless response at each scan direction while globally minimizing sidelobe power. The formulation resolves the main-beam drift caused by the ill-conditioned re-scaling step in iterative Chebyshev tapering, achieving sidelobe levels below &amp;amp;minus;20 dB with main-beam peaks within 0.1&amp;amp;deg; of their commanded angles across all evaluated positions. Mutual coupling between array elements is modeled analytically using the induced-EMF method, yielding a 36&amp;amp;times;36 impedance matrix whose off-diagonal entries are at most 8.2% of the element self-impedance at the minimum inter-element separation of 2.70 &amp;amp;lambda;. A closed-form decoupling matrix is applied to the receive manifold prior to LCMV weight computation. Seven simultaneous independent receive beams covering 0&amp;amp;deg;&amp;amp;ndash;60&amp;amp;deg; elevation are formed from a single data snapshot. A Scaled Conjugate Gradient neural network classifier, trained on radar-equation-scaled micro-Doppler features following Swerling I&amp;amp;ndash;IV radar cross-section (RCS) fluctuation statistics, achieves overall classification accuracy above 85% across five target classes. The five classes comprise two bird-signature classes (SW-I and SW-II), two UAV-signature classes (SW-III and SW-IV), and a clutter class. The design is entirely simulation-based; experimental validation using a sub-array prototype is identified as the primary direction for future work.</p>
	]]></content:encoded>

	<dc:title>A 33 GHz Conformal Phased-Array Radar with Linearly Constrained Minimum Variance Digital Beamforming, Circular- Polarization Filtering, and Neural-Network Micro-Doppler Classification for Counter-UAS Applications</dc:title>
			<dc:creator>Michael Baginski</dc:creator>
		<dc:identifier>doi: 10.3390/s26092883</dc:identifier>
	<dc:source>Sensors</dc:source>
	<dc:date>2026-05-05</dc:date>

	<prism:publicationName>Sensors</prism:publicationName>
	<prism:publicationDate>2026-05-05</prism:publicationDate>
	<prism:volume>26</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>2883</prism:startingPage>
		<prism:doi>10.3390/s26092883</prism:doi>
	<prism:url>https://www.mdpi.com/1424-8220/26/9/2883</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1424-8220/26/9/2882">

	<title>Sensors, Vol. 26, Pages 2882: Machine Learning for Intelligent and Adaptive Communication Systems: From Optimization to Emerging Paradigms</title>
	<link>https://www.mdpi.com/1424-8220/26/9/2882</link>
	<description>Machine learning (ML) has been increasingly considered for various communication applications, demonstrating promising feasibility and effectiveness in enhancing system intelligence, adaptability, and operational efficiency [...]</description>
	<pubDate>2026-05-05</pubDate>

	<content:encoded><![CDATA[
	<p><b>Sensors, Vol. 26, Pages 2882: Machine Learning for Intelligent and Adaptive Communication Systems: From Optimization to Emerging Paradigms</b></p>
	<p>Sensors <a href="https://www.mdpi.com/1424-8220/26/9/2882">doi: 10.3390/s26092882</a></p>
	<p>Authors:
		Haeyoung Lee
		Yichuang Sun
		Oluyomi Simpson
		</p>
	<p>Machine learning (ML) has been increasingly considered for various communication applications, demonstrating promising feasibility and effectiveness in enhancing system intelligence, adaptability, and operational efficiency [...]</p>
	]]></content:encoded>

	<dc:title>Machine Learning for Intelligent and Adaptive Communication Systems: From Optimization to Emerging Paradigms</dc:title>
			<dc:creator>Haeyoung Lee</dc:creator>
			<dc:creator>Yichuang Sun</dc:creator>
			<dc:creator>Oluyomi Simpson</dc:creator>
		<dc:identifier>doi: 10.3390/s26092882</dc:identifier>
	<dc:source>Sensors</dc:source>
	<dc:date>2026-05-05</dc:date>

	<prism:publicationName>Sensors</prism:publicationName>
	<prism:publicationDate>2026-05-05</prism:publicationDate>
	<prism:volume>26</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Editorial</prism:section>
	<prism:startingPage>2882</prism:startingPage>
		<prism:doi>10.3390/s26092882</prism:doi>
	<prism:url>https://www.mdpi.com/1424-8220/26/9/2882</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1424-8220/26/9/2881">

	<title>Sensors, Vol. 26, Pages 2881: Adaptive Control of the Redundant Axis of a Surgical Robot for Operating Room Workspace Optimization Using Reinforcement Learning</title>
	<link>https://www.mdpi.com/1424-8220/26/9/2881</link>
	<description>Laparoscopy is one of the most widely used surgical techniques in clinical practice. However, its practice is associated with medium- and long-term musculoskeletal disorders in surgeons. In this context, robot-assisted surgery has emerged as a promising approach for mitigating ergonomic constraints while enhancing control and precision during laparoscope manipulation. Despite these advances, existing research predominantly focuses on robotic control strategies, whereas the study of human&amp;amp;ndash;robot interaction in the operating room remains comparatively underexplored. This paper presents a proof-of-concept framework for workspace-aware posture adaptation in collaborative surgical robotics. The proposed approach combines vision-based human activity recognition with reinforcement learning to control the shoulder&amp;amp;ndash;elbow&amp;amp;ndash;wrist redundant angle of a seven-degree-of-freedom manipulator holding a laparoscope. Based on the detected interaction context, the system distinguishes between controlling, observing, cutting, and blocked states. During the observation and cutting phases, the controller allows the robot&amp;amp;rsquo;s posture to be reconfigured so that it tilts away from the human operator while maintaining the position of the laparoscope; when the surgeon moves away, the robot gradually returns to its default configuration. Two reward formulations, dense and fuzzy, are compared. Real-world experiments show that both approaches learn the desired reflexive behavior, while the fuzzy reward yields improved training stability and more consistent real-system performance, increasing workspace availability around the surgeon.</description>
	<pubDate>2026-05-05</pubDate>

	<content:encoded><![CDATA[
	<p><b>Sensors, Vol. 26, Pages 2881: Adaptive Control of the Redundant Axis of a Surgical Robot for Operating Room Workspace Optimization Using Reinforcement Learning</b></p>
	<p>Sensors <a href="https://www.mdpi.com/1424-8220/26/9/2881">doi: 10.3390/s26092881</a></p>
	<p>Authors:
		Irati Renedo-Alonso
		Juan A. Sánchez-Margallo
		Nestor Arana-Arexolaleiba
		Íñigo Elguea-Aguinaco
		</p>
	<p>Laparoscopy is one of the most widely used surgical techniques in clinical practice. However, its practice is associated with medium- and long-term musculoskeletal disorders in surgeons. In this context, robot-assisted surgery has emerged as a promising approach for mitigating ergonomic constraints while enhancing control and precision during laparoscope manipulation. Despite these advances, existing research predominantly focuses on robotic control strategies, whereas the study of human&amp;amp;ndash;robot interaction in the operating room remains comparatively underexplored. This paper presents a proof-of-concept framework for workspace-aware posture adaptation in collaborative surgical robotics. The proposed approach combines vision-based human activity recognition with reinforcement learning to control the shoulder&amp;amp;ndash;elbow&amp;amp;ndash;wrist redundant angle of a seven-degree-of-freedom manipulator holding a laparoscope. Based on the detected interaction context, the system distinguishes between controlling, observing, cutting, and blocked states. During the observation and cutting phases, the controller allows the robot&amp;amp;rsquo;s posture to be reconfigured so that it tilts away from the human operator while maintaining the position of the laparoscope; when the surgeon moves away, the robot gradually returns to its default configuration. Two reward formulations, dense and fuzzy, are compared. Real-world experiments show that both approaches learn the desired reflexive behavior, while the fuzzy reward yields improved training stability and more consistent real-system performance, increasing workspace availability around the surgeon.</p>
	]]></content:encoded>

	<dc:title>Adaptive Control of the Redundant Axis of a Surgical Robot for Operating Room Workspace Optimization Using Reinforcement Learning</dc:title>
			<dc:creator>Irati Renedo-Alonso</dc:creator>
			<dc:creator>Juan A. Sánchez-Margallo</dc:creator>
			<dc:creator>Nestor Arana-Arexolaleiba</dc:creator>
			<dc:creator>Íñigo Elguea-Aguinaco</dc:creator>
		<dc:identifier>doi: 10.3390/s26092881</dc:identifier>
	<dc:source>Sensors</dc:source>
	<dc:date>2026-05-05</dc:date>

	<prism:publicationName>Sensors</prism:publicationName>
	<prism:publicationDate>2026-05-05</prism:publicationDate>
	<prism:volume>26</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>2881</prism:startingPage>
		<prism:doi>10.3390/s26092881</prism:doi>
	<prism:url>https://www.mdpi.com/1424-8220/26/9/2881</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1424-8220/26/9/2879">

	<title>Sensors, Vol. 26, Pages 2879: Fine-Grained Perception for Fundus and Prostate Medical Image Segmentation</title>
	<link>https://www.mdpi.com/1424-8220/26/9/2879</link>
	<description>Traditional deep learning-based models have achieved promising results in medical image segmentation. However, their performance degrades severely when applied to unseen domains due to variations in imaging protocols, acquisition devices, and patient populations across medical centers, which lead to significant distribution shifts. With the emergence of the Segment Anything Model (SAM), a single model now exhibits significantly improved generalization and adaptability to various image types. Nevertheless, while SAM has learned structure representations from large-scale natural images, it lacks fine-grained structural knowledge specific to the medical imaging domain, remaining relatively invariant across imaging domains. In addition, its structural enhancement is vulnerable to unreliable prompts, and patch-wise inference disrupts structural continuity, leading to suboptimal performance in capturing anatomical details. To address this, we propose a novel Medical Fine-grained Segment Anything Model (termed MedFineSAM), which integrates three key modules: shared fine-grained structural enhancement, which extracts and selectively enhances fine-grained structural features shared between prompts and image embeddings via a structural dictionary; a prompt gating mechanism, which estimates prompt confidence and dynamically adjusts prompt weights to avoid erroneous enhancement; and a structural continuity diffusion in frequency domain (SCFD), which performs frequency-domain smoothing during decoding to alleviate structural discontinuity caused by patch aggregation. Experiments on the fundus benchmark and prostate MRI benchmark demonstrate superior generalization performance, offering new insights into leveraging SAM for single-source domain generalization in medical image segmentation.</description>
	<pubDate>2026-05-05</pubDate>

	<content:encoded><![CDATA[
	<p><b>Sensors, Vol. 26, Pages 2879: Fine-Grained Perception for Fundus and Prostate Medical Image Segmentation</b></p>
	<p>Sensors <a href="https://www.mdpi.com/1424-8220/26/9/2879">doi: 10.3390/s26092879</a></p>
	<p>Authors:
		Qiao Ba
		Jia-Xuan Jiang
		Yuee Li
		Zhong Wang
		</p>
	<p>Traditional deep learning-based models have achieved promising results in medical image segmentation. However, their performance degrades severely when applied to unseen domains due to variations in imaging protocols, acquisition devices, and patient populations across medical centers, which lead to significant distribution shifts. With the emergence of the Segment Anything Model (SAM), a single model now exhibits significantly improved generalization and adaptability to various image types. Nevertheless, while SAM has learned structure representations from large-scale natural images, it lacks fine-grained structural knowledge specific to the medical imaging domain, remaining relatively invariant across imaging domains. In addition, its structural enhancement is vulnerable to unreliable prompts, and patch-wise inference disrupts structural continuity, leading to suboptimal performance in capturing anatomical details. To address this, we propose a novel Medical Fine-grained Segment Anything Model (termed MedFineSAM), which integrates three key modules: shared fine-grained structural enhancement, which extracts and selectively enhances fine-grained structural features shared between prompts and image embeddings via a structural dictionary; a prompt gating mechanism, which estimates prompt confidence and dynamically adjusts prompt weights to avoid erroneous enhancement; and a structural continuity diffusion in frequency domain (SCFD), which performs frequency-domain smoothing during decoding to alleviate structural discontinuity caused by patch aggregation. Experiments on the fundus benchmark and prostate MRI benchmark demonstrate superior generalization performance, offering new insights into leveraging SAM for single-source domain generalization in medical image segmentation.</p>
	]]></content:encoded>

	<dc:title>Fine-Grained Perception for Fundus and Prostate Medical Image Segmentation</dc:title>
			<dc:creator>Qiao Ba</dc:creator>
			<dc:creator>Jia-Xuan Jiang</dc:creator>
			<dc:creator>Yuee Li</dc:creator>
			<dc:creator>Zhong Wang</dc:creator>
		<dc:identifier>doi: 10.3390/s26092879</dc:identifier>
	<dc:source>Sensors</dc:source>
	<dc:date>2026-05-05</dc:date>

	<prism:publicationName>Sensors</prism:publicationName>
	<prism:publicationDate>2026-05-05</prism:publicationDate>
	<prism:volume>26</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>2879</prism:startingPage>
		<prism:doi>10.3390/s26092879</prism:doi>
	<prism:url>https://www.mdpi.com/1424-8220/26/9/2879</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1424-8220/26/9/2880">

	<title>Sensors, Vol. 26, Pages 2880: Eye Drift Signal Analysis Caused by Goggle Slippage in vHIT Measurements: A Signal Processing Perspective</title>
	<link>https://www.mdpi.com/1424-8220/26/9/2880</link>
	<description>This Technical Report presents a quantitative signal processing approach to analyze and correct eye drift during vestibulo-ocular reflex (VOR) measurements using the video Head Impulse Test (vHIT). The objective is to determine the extent of drift caused by goggle slippage&amp;amp;mdash;a technical artifact that can distort the VOR gain index. A total of 57 impulses were categorized into three protocols: Lateral, LARP, and RALP. For each impulse, peak head velocity and eye drift (estimated from the average velocity during the pre- and post-impulse rest periods) were extracted using a custom signal processing pipeline implemented in MATLAB R2020b and Python 3.11 64 bit. Results showed the highest drift in the RALP group (&amp;amp;minus;7.41 deg/s) and the lowest in the LARP group (&amp;amp;minus;3.08 deg/s). The correlation between head velocity and drift was most prominent in the RALP group (r &amp;amp;gt; 0.7), highlighting the impact of stimulation direction on goggle stability. This study proposes a drift detection method to be integrated into VOR correction algorithms, thereby enhancing gain analysis and saccade detection in automated systems.</description>
	<pubDate>2026-05-05</pubDate>

	<content:encoded><![CDATA[
	<p><b>Sensors, Vol. 26, Pages 2880: Eye Drift Signal Analysis Caused by Goggle Slippage in vHIT Measurements: A Signal Processing Perspective</b></p>
	<p>Sensors <a href="https://www.mdpi.com/1424-8220/26/9/2880">doi: 10.3390/s26092880</a></p>
	<p>Authors:
		Ha Ngoc Khoan
		Le Ky Bien
		Tran Thi Nhan
		Tran Van Nghia
		</p>
	<p>This Technical Report presents a quantitative signal processing approach to analyze and correct eye drift during vestibulo-ocular reflex (VOR) measurements using the video Head Impulse Test (vHIT). The objective is to determine the extent of drift caused by goggle slippage&amp;amp;mdash;a technical artifact that can distort the VOR gain index. A total of 57 impulses were categorized into three protocols: Lateral, LARP, and RALP. For each impulse, peak head velocity and eye drift (estimated from the average velocity during the pre- and post-impulse rest periods) were extracted using a custom signal processing pipeline implemented in MATLAB R2020b and Python 3.11 64 bit. Results showed the highest drift in the RALP group (&amp;amp;minus;7.41 deg/s) and the lowest in the LARP group (&amp;amp;minus;3.08 deg/s). The correlation between head velocity and drift was most prominent in the RALP group (r &amp;amp;gt; 0.7), highlighting the impact of stimulation direction on goggle stability. This study proposes a drift detection method to be integrated into VOR correction algorithms, thereby enhancing gain analysis and saccade detection in automated systems.</p>
	]]></content:encoded>

	<dc:title>Eye Drift Signal Analysis Caused by Goggle Slippage in vHIT Measurements: A Signal Processing Perspective</dc:title>
			<dc:creator>Ha Ngoc Khoan</dc:creator>
			<dc:creator>Le Ky Bien</dc:creator>
			<dc:creator>Tran Thi Nhan</dc:creator>
			<dc:creator>Tran Van Nghia</dc:creator>
		<dc:identifier>doi: 10.3390/s26092880</dc:identifier>
	<dc:source>Sensors</dc:source>
	<dc:date>2026-05-05</dc:date>

	<prism:publicationName>Sensors</prism:publicationName>
	<prism:publicationDate>2026-05-05</prism:publicationDate>
	<prism:volume>26</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>2880</prism:startingPage>
		<prism:doi>10.3390/s26092880</prism:doi>
	<prism:url>https://www.mdpi.com/1424-8220/26/9/2880</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1424-8220/26/9/2878">

	<title>Sensors, Vol. 26, Pages 2878: Data-Driven Slip Prediction in Web Processing Machines Using Virtual Sensors and Ensemble Machine Learning</title>
	<link>https://www.mdpi.com/1424-8220/26/9/2878</link>
	<description>In roll-to-roll (R2R) web processing systems, traction rollers impose precise velocity profiles on the moving web. Ideally, the web follows this trajectory without deviation, but slip can occur during rapid acceleration or deceleration, leading to tension loss and degraded product quality. Although slip can be detected directly using high-resolution encoders that track the actual web speed, such sensors are expensive and require machine downtime for installation, making them impractical for large-scale industrial deployment. To overcome this limitation, we developed a virtual slip sensor that estimates slip using existing machine signals only. A temporary encoder was used to collect ground-truth data, enabling the training of predictive models that eliminate the need for a permanent physical sensor. The proposed system employs an ensemble modeling approach: a CatBoost model captures low-slip behavior where data is abundant, while a linear model extrapolates to high-slip, out-of-distribution conditions. Targeted feature engineering ensures generalization across varying ramp times and web speeds. Despite being trained primarily on data containing limited slip, the models successfully generalized to scenarios with severe slip, demonstrating robust predictive performance. The ensemble reduces the regular CatBoost model&amp;amp;rsquo;s MSE at 60 m/min by approximately 54% in the speed-based evaluation and by approximately 68% in the quantile-based evaluation while maintaining comparable performance in the low-speed regimes. The resulting virtual sensor enables continuous real-time slip monitoring, providing operators with timely insights to prevent quality degradation and operate at higher acceleration profiles to increase throughput, even on machines that have not previously experienced extreme slip.</description>
	<pubDate>2026-05-05</pubDate>

	<content:encoded><![CDATA[
	<p><b>Sensors, Vol. 26, Pages 2878: Data-Driven Slip Prediction in Web Processing Machines Using Virtual Sensors and Ensemble Machine Learning</b></p>
	<p>Sensors <a href="https://www.mdpi.com/1424-8220/26/9/2878">doi: 10.3390/s26092878</a></p>
	<p>Authors:
		Colin Soete
		Jonas Van Der Donckt
		Nathan Vandemoortele
		Jasper De Viaene
		Jeroen De Maeyer
		Sofie Van Hoecke
		</p>
	<p>In roll-to-roll (R2R) web processing systems, traction rollers impose precise velocity profiles on the moving web. Ideally, the web follows this trajectory without deviation, but slip can occur during rapid acceleration or deceleration, leading to tension loss and degraded product quality. Although slip can be detected directly using high-resolution encoders that track the actual web speed, such sensors are expensive and require machine downtime for installation, making them impractical for large-scale industrial deployment. To overcome this limitation, we developed a virtual slip sensor that estimates slip using existing machine signals only. A temporary encoder was used to collect ground-truth data, enabling the training of predictive models that eliminate the need for a permanent physical sensor. The proposed system employs an ensemble modeling approach: a CatBoost model captures low-slip behavior where data is abundant, while a linear model extrapolates to high-slip, out-of-distribution conditions. Targeted feature engineering ensures generalization across varying ramp times and web speeds. Despite being trained primarily on data containing limited slip, the models successfully generalized to scenarios with severe slip, demonstrating robust predictive performance. The ensemble reduces the regular CatBoost model&amp;amp;rsquo;s MSE at 60 m/min by approximately 54% in the speed-based evaluation and by approximately 68% in the quantile-based evaluation while maintaining comparable performance in the low-speed regimes. The resulting virtual sensor enables continuous real-time slip monitoring, providing operators with timely insights to prevent quality degradation and operate at higher acceleration profiles to increase throughput, even on machines that have not previously experienced extreme slip.</p>
	]]></content:encoded>

	<dc:title>Data-Driven Slip Prediction in Web Processing Machines Using Virtual Sensors and Ensemble Machine Learning</dc:title>
			<dc:creator>Colin Soete</dc:creator>
			<dc:creator>Jonas Van Der Donckt</dc:creator>
			<dc:creator>Nathan Vandemoortele</dc:creator>
			<dc:creator>Jasper De Viaene</dc:creator>
			<dc:creator>Jeroen De Maeyer</dc:creator>
			<dc:creator>Sofie Van Hoecke</dc:creator>
		<dc:identifier>doi: 10.3390/s26092878</dc:identifier>
	<dc:source>Sensors</dc:source>
	<dc:date>2026-05-05</dc:date>

	<prism:publicationName>Sensors</prism:publicationName>
	<prism:publicationDate>2026-05-05</prism:publicationDate>
	<prism:volume>26</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>2878</prism:startingPage>
		<prism:doi>10.3390/s26092878</prism:doi>
	<prism:url>https://www.mdpi.com/1424-8220/26/9/2878</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1424-8220/26/9/2877">

	<title>Sensors, Vol. 26, Pages 2877: A Multimodal UAV-IoT Sensing Framework for Intelligent Pest Density Estimation in Smart Agricultural Systems</title>
	<link>https://www.mdpi.com/1424-8220/26/9/2877</link>
	<description>Accurate estimation of dynamic environmental phenomena through intelligent sensing systems plays a critical role in enabling reliable monitoring and decision-making in complex real-world scenarios. With the rapid development of artificial intelligence-driven sensing technologies and Internet of Things systems, modern agricultural monitoring is evolving from isolated data acquisition toward intelligent, multimodal perception and decision-making. However, traditional approaches predominantly rely on single data sources, making it difficult to simultaneously capture plant phenotypic variations and environment-driven mechanisms, thereby limiting model applicability in complex field scenarios. To address this issue, a multimodal pest density estimation framework, namely the Pest Density Estimation Framework (PDEF), is proposed, which integrates UAV-based imagery, trap monitoring data, and environmental sensor measurements. In this framework, crop canopy damage features are extracted using convolutional neural networks, while temporal encoding is employed to model dynamic environmental variations. Cross-modal feature alignment and environment-aware enhancement mechanisms are further introduced to achieve deep integration of multi-source information, enabling the construction of a unified feature representation space and improving estimation accuracy. Extensive experiments conducted on a constructed multimodal agricultural dataset demonstrate that the proposed method achieves MAE, RMSE, and MAPE values of 5.47, 7.62, and 14.9%, respectively, significantly outperforming the Transformer-based fusion model (MAE 6.01, RMSE 8.16). Meanwhile, the coefficient of determination reaches R2=0.84, indicating superior fitting capability and stability. In multimodal combination experiments, the three-modality fusion reduces error metrics by more than 20% on average compared with single-modality models, validating the effectiveness of multi-source collaborative modeling. From the perspective of integrating plant phenotypic analysis and environmental perception, this study provides a novel AI-driven intelligent sensing framework for pest monitoring and crop management, contributing to improved pest prediction capability and enhanced intelligence in agricultural production systems. This study further provides practical implications for agricultural economics and supply chain optimization by enabling data-driven decision-making through intelligent sensing systems.</description>
	<pubDate>2026-05-05</pubDate>

	<content:encoded><![CDATA[
	<p><b>Sensors, Vol. 26, Pages 2877: A Multimodal UAV-IoT Sensing Framework for Intelligent Pest Density Estimation in Smart Agricultural Systems</b></p>
	<p>Sensors <a href="https://www.mdpi.com/1424-8220/26/9/2877">doi: 10.3390/s26092877</a></p>
	<p>Authors:
		Yida Zhang
		Jianxi Chen
		Xin Zeng
		Runxi Chen
		Lirui Chen
		Shanhe Xiao
		Yihong Song
		</p>
	<p>Accurate estimation of dynamic environmental phenomena through intelligent sensing systems plays a critical role in enabling reliable monitoring and decision-making in complex real-world scenarios. With the rapid development of artificial intelligence-driven sensing technologies and Internet of Things systems, modern agricultural monitoring is evolving from isolated data acquisition toward intelligent, multimodal perception and decision-making. However, traditional approaches predominantly rely on single data sources, making it difficult to simultaneously capture plant phenotypic variations and environment-driven mechanisms, thereby limiting model applicability in complex field scenarios. To address this issue, a multimodal pest density estimation framework, namely the Pest Density Estimation Framework (PDEF), is proposed, which integrates UAV-based imagery, trap monitoring data, and environmental sensor measurements. In this framework, crop canopy damage features are extracted using convolutional neural networks, while temporal encoding is employed to model dynamic environmental variations. Cross-modal feature alignment and environment-aware enhancement mechanisms are further introduced to achieve deep integration of multi-source information, enabling the construction of a unified feature representation space and improving estimation accuracy. Extensive experiments conducted on a constructed multimodal agricultural dataset demonstrate that the proposed method achieves MAE, RMSE, and MAPE values of 5.47, 7.62, and 14.9%, respectively, significantly outperforming the Transformer-based fusion model (MAE 6.01, RMSE 8.16). Meanwhile, the coefficient of determination reaches R2=0.84, indicating superior fitting capability and stability. In multimodal combination experiments, the three-modality fusion reduces error metrics by more than 20% on average compared with single-modality models, validating the effectiveness of multi-source collaborative modeling. From the perspective of integrating plant phenotypic analysis and environmental perception, this study provides a novel AI-driven intelligent sensing framework for pest monitoring and crop management, contributing to improved pest prediction capability and enhanced intelligence in agricultural production systems. This study further provides practical implications for agricultural economics and supply chain optimization by enabling data-driven decision-making through intelligent sensing systems.</p>
	]]></content:encoded>

	<dc:title>A Multimodal UAV-IoT Sensing Framework for Intelligent Pest Density Estimation in Smart Agricultural Systems</dc:title>
			<dc:creator>Yida Zhang</dc:creator>
			<dc:creator>Jianxi Chen</dc:creator>
			<dc:creator>Xin Zeng</dc:creator>
			<dc:creator>Runxi Chen</dc:creator>
			<dc:creator>Lirui Chen</dc:creator>
			<dc:creator>Shanhe Xiao</dc:creator>
			<dc:creator>Yihong Song</dc:creator>
		<dc:identifier>doi: 10.3390/s26092877</dc:identifier>
	<dc:source>Sensors</dc:source>
	<dc:date>2026-05-05</dc:date>

	<prism:publicationName>Sensors</prism:publicationName>
	<prism:publicationDate>2026-05-05</prism:publicationDate>
	<prism:volume>26</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>2877</prism:startingPage>
		<prism:doi>10.3390/s26092877</prism:doi>
	<prism:url>https://www.mdpi.com/1424-8220/26/9/2877</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1424-8220/26/9/2876">

	<title>Sensors, Vol. 26, Pages 2876: End-to-End Image Demosaicking via Region-Level Non-Local Modeling and Residual Aggregation</title>
	<link>https://www.mdpi.com/1424-8220/26/9/2876</link>
	<description>Image demosaicking aims to reconstruct a full-resolution color image from spatially sparse and interleaved color filter array observations. Despite the significant progress achieved by deep learning-based methods, existing approaches have not fully addressed the sampling-structure-constrained nature of demosaicking. In particular, four-channel half-resolution packing may disrupt the CFA spatial phase relationships, while local convolutions and global non-local matching struggle to model reconstruction-relevant cross-position dependencies. To address these issues, this paper proposes an end-to-end image demosaicking network with region-level non-local modeling and residual aggregation (RNRA-Net). Instead of packing Bayer RAW data into a four-channel half-resolution representation, RNRA-Net decomposes the original mosaic image into a three-channel representation at the original resolution, thereby preserving the spatial arrangement of CFA sampling. To capture structurally related information, a region-level non-local module is introduced to compute feature correlations within spatially bounded regions, enabling aggregation of reconstruction-relevant contextual information. In addition, a residual aggregation module is developed to explicitly collect and refine early residual compensation features, facilitating the recovery of edges, textures, and high-frequency details. Extensive experiments on benchmark and high-resolution datasets demonstrate the effectiveness of RNRA-Net.</description>
	<pubDate>2026-05-05</pubDate>

	<content:encoded><![CDATA[
	<p><b>Sensors, Vol. 26, Pages 2876: End-to-End Image Demosaicking via Region-Level Non-Local Modeling and Residual Aggregation</b></p>
	<p>Sensors <a href="https://www.mdpi.com/1424-8220/26/9/2876">doi: 10.3390/s26092876</a></p>
	<p>Authors:
		Lingyun Wei
		Han Liu
		</p>
	<p>Image demosaicking aims to reconstruct a full-resolution color image from spatially sparse and interleaved color filter array observations. Despite the significant progress achieved by deep learning-based methods, existing approaches have not fully addressed the sampling-structure-constrained nature of demosaicking. In particular, four-channel half-resolution packing may disrupt the CFA spatial phase relationships, while local convolutions and global non-local matching struggle to model reconstruction-relevant cross-position dependencies. To address these issues, this paper proposes an end-to-end image demosaicking network with region-level non-local modeling and residual aggregation (RNRA-Net). Instead of packing Bayer RAW data into a four-channel half-resolution representation, RNRA-Net decomposes the original mosaic image into a three-channel representation at the original resolution, thereby preserving the spatial arrangement of CFA sampling. To capture structurally related information, a region-level non-local module is introduced to compute feature correlations within spatially bounded regions, enabling aggregation of reconstruction-relevant contextual information. In addition, a residual aggregation module is developed to explicitly collect and refine early residual compensation features, facilitating the recovery of edges, textures, and high-frequency details. Extensive experiments on benchmark and high-resolution datasets demonstrate the effectiveness of RNRA-Net.</p>
	]]></content:encoded>

	<dc:title>End-to-End Image Demosaicking via Region-Level Non-Local Modeling and Residual Aggregation</dc:title>
			<dc:creator>Lingyun Wei</dc:creator>
			<dc:creator>Han Liu</dc:creator>
		<dc:identifier>doi: 10.3390/s26092876</dc:identifier>
	<dc:source>Sensors</dc:source>
	<dc:date>2026-05-05</dc:date>

	<prism:publicationName>Sensors</prism:publicationName>
	<prism:publicationDate>2026-05-05</prism:publicationDate>
	<prism:volume>26</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>2876</prism:startingPage>
		<prism:doi>10.3390/s26092876</prism:doi>
	<prism:url>https://www.mdpi.com/1424-8220/26/9/2876</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1424-8220/26/9/2875">

	<title>Sensors, Vol. 26, Pages 2875: Colorimetric Detection of Arsenic (III) and Mercury (II) Ions in Human Serum Albumin Samples Using Cysteine-Capped Gold Nanoparticles</title>
	<link>https://www.mdpi.com/1424-8220/26/9/2875</link>
	<description>A continued interest in developing a low-cost, rapid screening method for quantifying Hg (II) and As (III) in biological samples stems from the toxic effects of human exposure to these heavy metal ions. This study reports the use of cysteine-capped gold nanoparticles (CysAuNPs) for chemical sensing, colorimetric detection, and quantification of As (III) and Hg (II) ions in human serum albumin (HSA) under physiological conditions. Zeta potential measurements indicated that the CysAuNPs have a negative surface charge, which was decreased in the presence of HSA and reversed to a positive value upon binding of As (III) and Hg (II) metal ions. Circular dichroism (CD) spectroscopy revealed changes in HSA conformation upon binding to As (III) and Hg (II) ions. X-ray fluorescence enables rapid qualitative screening for As (III) and Hg (II) ions before colorimetric quantification. The figures of merit (R2 &amp;amp;ge; 0.940) and the low detection limits (0.05 ppm for As (III) ions and 0.02 ppm for Hg (II)) in serum albumin demonstrate the high sensitivity of the method. The developed calibration curves correctly quantified the concentration of As (III) and Hg (II) ions of independently prepared test validation samples in HSA with an accuracy of &amp;amp;ge;95% over a period of seven months without recalibrations, demonstrating the stability of CysAuNPs in solution and the robustness of the method for analysis of As (III) and Hg (II) ions in serum albumin.</description>
	<pubDate>2026-05-04</pubDate>

	<content:encoded><![CDATA[
	<p><b>Sensors, Vol. 26, Pages 2875: Colorimetric Detection of Arsenic (III) and Mercury (II) Ions in Human Serum Albumin Samples Using Cysteine-Capped Gold Nanoparticles</b></p>
	<p>Sensors <a href="https://www.mdpi.com/1424-8220/26/9/2875">doi: 10.3390/s26092875</a></p>
	<p>Authors:
		Sayo O. Fakayode
		David K. Bwambok
		Eris Arth
		Ufuoma Benjamin
		Rebecca Huisman
		Allison Lugue
		Alex Tokos
		Kayley Owens
		Peter Rosado Flores
		</p>
	<p>A continued interest in developing a low-cost, rapid screening method for quantifying Hg (II) and As (III) in biological samples stems from the toxic effects of human exposure to these heavy metal ions. This study reports the use of cysteine-capped gold nanoparticles (CysAuNPs) for chemical sensing, colorimetric detection, and quantification of As (III) and Hg (II) ions in human serum albumin (HSA) under physiological conditions. Zeta potential measurements indicated that the CysAuNPs have a negative surface charge, which was decreased in the presence of HSA and reversed to a positive value upon binding of As (III) and Hg (II) metal ions. Circular dichroism (CD) spectroscopy revealed changes in HSA conformation upon binding to As (III) and Hg (II) ions. X-ray fluorescence enables rapid qualitative screening for As (III) and Hg (II) ions before colorimetric quantification. The figures of merit (R2 &amp;amp;ge; 0.940) and the low detection limits (0.05 ppm for As (III) ions and 0.02 ppm for Hg (II)) in serum albumin demonstrate the high sensitivity of the method. The developed calibration curves correctly quantified the concentration of As (III) and Hg (II) ions of independently prepared test validation samples in HSA with an accuracy of &amp;amp;ge;95% over a period of seven months without recalibrations, demonstrating the stability of CysAuNPs in solution and the robustness of the method for analysis of As (III) and Hg (II) ions in serum albumin.</p>
	]]></content:encoded>

	<dc:title>Colorimetric Detection of Arsenic (III) and Mercury (II) Ions in Human Serum Albumin Samples Using Cysteine-Capped Gold Nanoparticles</dc:title>
			<dc:creator>Sayo O. Fakayode</dc:creator>
			<dc:creator>David K. Bwambok</dc:creator>
			<dc:creator>Eris Arth</dc:creator>
			<dc:creator>Ufuoma Benjamin</dc:creator>
			<dc:creator>Rebecca Huisman</dc:creator>
			<dc:creator>Allison Lugue</dc:creator>
			<dc:creator>Alex Tokos</dc:creator>
			<dc:creator>Kayley Owens</dc:creator>
			<dc:creator>Peter Rosado Flores</dc:creator>
		<dc:identifier>doi: 10.3390/s26092875</dc:identifier>
	<dc:source>Sensors</dc:source>
	<dc:date>2026-05-04</dc:date>

	<prism:publicationName>Sensors</prism:publicationName>
	<prism:publicationDate>2026-05-04</prism:publicationDate>
	<prism:volume>26</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>2875</prism:startingPage>
		<prism:doi>10.3390/s26092875</prism:doi>
	<prism:url>https://www.mdpi.com/1424-8220/26/9/2875</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1424-8220/26/9/2873">

	<title>Sensors, Vol. 26, Pages 2873: Individual-Tree DBH Estimation from Airborne LiDAR Data Using MSFS&amp;ndash;XGBoost</title>
	<link>https://www.mdpi.com/1424-8220/26/9/2873</link>
	<description>Diameter at breast height (DBH) is a fundamental structural parameter for forest inventory and ecological analysis. However, field-based measurements (e.g., diameter tape surveys) are labor-intensive and inefficient for large-scale applications. Airborne light detection and ranging (LiDAR) provides an efficient alternative for individual-tree DBH estimation. Nevertheless, LiDAR-derived features&amp;amp;mdash;defined as statistical descriptors of point cloud structure and radiometric properties&amp;amp;mdash;are typically high-dimensional and redundant, which may degrade model performance. To address this issue, this study proposes an integrated framework combining Multi-Stage Feature Selection (MSFS) and Extreme Gradient Boosting (XGBoost) for DBH estimation. A total of 104 variables, including LiDAR-derived features (height, density, intensity, and canopy structure metrics) and structural parameters (tree height, crown diameter, and crown area), were used as predictors. The MSFS framework was applied to progressively reduce feature redundancy and identify an optimal subset, which was then used to train the XGBoost model. The results demonstrate that the MSFS&amp;amp;ndash;XGBoost model achieved the best performance, with a coefficient of determination (R2) of 0.901 and a root mean square error (RMSE) of 1.647 cm. Compared with models using the original feature set, R2 increased by 0.384 and RMSE decreased by 1.146 cm. These findings indicate that the proposed framework effectively improves DBH estimation accuracy and provides a reliable approach for individual-tree parameter estimation and large-scale forest resource monitoring using airborne LiDAR data.</description>
	<pubDate>2026-05-04</pubDate>

	<content:encoded><![CDATA[
	<p><b>Sensors, Vol. 26, Pages 2873: Individual-Tree DBH Estimation from Airborne LiDAR Data Using MSFS&amp;ndash;XGBoost</b></p>
	<p>Sensors <a href="https://www.mdpi.com/1424-8220/26/9/2873">doi: 10.3390/s26092873</a></p>
	<p>Authors:
		Pengfei Li
		Yue Jia
		</p>
	<p>Diameter at breast height (DBH) is a fundamental structural parameter for forest inventory and ecological analysis. However, field-based measurements (e.g., diameter tape surveys) are labor-intensive and inefficient for large-scale applications. Airborne light detection and ranging (LiDAR) provides an efficient alternative for individual-tree DBH estimation. Nevertheless, LiDAR-derived features&amp;amp;mdash;defined as statistical descriptors of point cloud structure and radiometric properties&amp;amp;mdash;are typically high-dimensional and redundant, which may degrade model performance. To address this issue, this study proposes an integrated framework combining Multi-Stage Feature Selection (MSFS) and Extreme Gradient Boosting (XGBoost) for DBH estimation. A total of 104 variables, including LiDAR-derived features (height, density, intensity, and canopy structure metrics) and structural parameters (tree height, crown diameter, and crown area), were used as predictors. The MSFS framework was applied to progressively reduce feature redundancy and identify an optimal subset, which was then used to train the XGBoost model. The results demonstrate that the MSFS&amp;amp;ndash;XGBoost model achieved the best performance, with a coefficient of determination (R2) of 0.901 and a root mean square error (RMSE) of 1.647 cm. Compared with models using the original feature set, R2 increased by 0.384 and RMSE decreased by 1.146 cm. These findings indicate that the proposed framework effectively improves DBH estimation accuracy and provides a reliable approach for individual-tree parameter estimation and large-scale forest resource monitoring using airborne LiDAR data.</p>
	]]></content:encoded>

	<dc:title>Individual-Tree DBH Estimation from Airborne LiDAR Data Using MSFS&amp;amp;ndash;XGBoost</dc:title>
			<dc:creator>Pengfei Li</dc:creator>
			<dc:creator>Yue Jia</dc:creator>
		<dc:identifier>doi: 10.3390/s26092873</dc:identifier>
	<dc:source>Sensors</dc:source>
	<dc:date>2026-05-04</dc:date>

	<prism:publicationName>Sensors</prism:publicationName>
	<prism:publicationDate>2026-05-04</prism:publicationDate>
	<prism:volume>26</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>2873</prism:startingPage>
		<prism:doi>10.3390/s26092873</prism:doi>
	<prism:url>https://www.mdpi.com/1424-8220/26/9/2873</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1424-8220/26/9/2874">

	<title>Sensors, Vol. 26, Pages 2874: USF-Net: Infrared-Visible Image Fusion via Unified Semantics and Context Modulation</title>
	<link>https://www.mdpi.com/1424-8220/26/9/2874</link>
	<description>Infrared&amp;amp;ndash;visible image fusion aims to integrate structural details, natural appearance, and thermal target information from two source modalities, thereby improving visual perception in complex scenes. However, under challenging conditions such as low illumination, noise, low contrast, and overexposure, existing methods often struggle to stably preserve cross-modal shared features (CMSF) while effectively highlighting single-modal specific features (SMSF). In addition, the absence of real fusion labels limits effective supervised learning. To address these issues, this paper proposes a unified semantic-guided fusion network, termed USF-Net, which jointly models the shared and specific features of infrared and visible images under a unified semantic representation and dynamically adjusts the fusion strategy according to imaging contexts. Specifically, the Shared Feature Alignment and Enhancement (SFAE) module is designed to strengthen consistent modeling of common features across modalities, while the Specific Feature Reweighting Fusion (SFRF) module selectively enhances modality-specific features to achieve stable and controllable fusion. Moreover, the constructed real fusion labels are incorporated into the loss function for collaborative training. Experimental results on multiple public datasets demonstrate that USF-Net achieves superior fusion performance under diverse complex imaging conditions.</description>
	<pubDate>2026-05-04</pubDate>

	<content:encoded><![CDATA[
	<p><b>Sensors, Vol. 26, Pages 2874: USF-Net: Infrared-Visible Image Fusion via Unified Semantics and Context Modulation</b></p>
	<p>Sensors <a href="https://www.mdpi.com/1424-8220/26/9/2874">doi: 10.3390/s26092874</a></p>
	<p>Authors:
		Dingding Fu
		Zhongguo Li
		Wenbin Fan
		Qi Wang
		</p>
	<p>Infrared&amp;amp;ndash;visible image fusion aims to integrate structural details, natural appearance, and thermal target information from two source modalities, thereby improving visual perception in complex scenes. However, under challenging conditions such as low illumination, noise, low contrast, and overexposure, existing methods often struggle to stably preserve cross-modal shared features (CMSF) while effectively highlighting single-modal specific features (SMSF). In addition, the absence of real fusion labels limits effective supervised learning. To address these issues, this paper proposes a unified semantic-guided fusion network, termed USF-Net, which jointly models the shared and specific features of infrared and visible images under a unified semantic representation and dynamically adjusts the fusion strategy according to imaging contexts. Specifically, the Shared Feature Alignment and Enhancement (SFAE) module is designed to strengthen consistent modeling of common features across modalities, while the Specific Feature Reweighting Fusion (SFRF) module selectively enhances modality-specific features to achieve stable and controllable fusion. Moreover, the constructed real fusion labels are incorporated into the loss function for collaborative training. Experimental results on multiple public datasets demonstrate that USF-Net achieves superior fusion performance under diverse complex imaging conditions.</p>
	]]></content:encoded>

	<dc:title>USF-Net: Infrared-Visible Image Fusion via Unified Semantics and Context Modulation</dc:title>
			<dc:creator>Dingding Fu</dc:creator>
			<dc:creator>Zhongguo Li</dc:creator>
			<dc:creator>Wenbin Fan</dc:creator>
			<dc:creator>Qi Wang</dc:creator>
		<dc:identifier>doi: 10.3390/s26092874</dc:identifier>
	<dc:source>Sensors</dc:source>
	<dc:date>2026-05-04</dc:date>

	<prism:publicationName>Sensors</prism:publicationName>
	<prism:publicationDate>2026-05-04</prism:publicationDate>
	<prism:volume>26</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>2874</prism:startingPage>
		<prism:doi>10.3390/s26092874</prism:doi>
	<prism:url>https://www.mdpi.com/1424-8220/26/9/2874</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1424-8220/26/9/2872">

	<title>Sensors, Vol. 26, Pages 2872: A Fluorescence-Based Sensor Combined with Chemometric and Deep Learning Approaches for Detecting and Quantifying Coconut Milk Fraud in Bovine Milk</title>
	<link>https://www.mdpi.com/1424-8220/26/9/2872</link>
	<description>Bovine milk adulteration with coconut milk poses a significant threat to food safety, as both liquids are visually similar yet nutritionally distinct. This study presents an integrated analytical framework combining excitation&amp;amp;ndash;emission matrix (EEM) fluorescence spectroscopy with chemometric and deep learning techniques to detect and quantify coconut milk adulteration in bovine milk across nine concentration levels (0&amp;amp;ndash;100% v/v). Parallel factor analysis (PARAFAC) resolved two dominant fluorescent components, tryptophan (&amp;amp;lambda; ex/em: 290/350 nm) and riboflavin (&amp;amp;lambda; ex/em: 450/525 nm), whose scores decreased monotonically with increasing adulteration, confirming their role as key chemical biomarkers. For quantitative prediction, PLSR and 1D-CNN models were developed using emission spectra at three excitation wavelengths, with best performances achieved at 450 nm (PLSR: R2P = 0.97, RMSEP = 5.00%; 1D-CNN: R2P = 0.94, RMSEP = 6.75%). A lightweight 2D-CNN utilizing full EEM contour maps as image inputs outperformed all quantitative models, achieving R2P = 0.99, RMSEP = 2.36%, and RPD = 12.97, demonstrating the advantage of preserving the full two-dimensional fluorescence topology over discrete wavelength selection. These results confirm that EEM combined with 2D-CNN provides a highly accurate and non-destructive tool for dairy authentication.</description>
	<pubDate>2026-05-04</pubDate>

	<content:encoded><![CDATA[
	<p><b>Sensors, Vol. 26, Pages 2872: A Fluorescence-Based Sensor Combined with Chemometric and Deep Learning Approaches for Detecting and Quantifying Coconut Milk Fraud in Bovine Milk</b></p>
	<p>Sensors <a href="https://www.mdpi.com/1424-8220/26/9/2872">doi: 10.3390/s26092872</a></p>
	<p>Authors:
		Stella Maria Dyah Cahyarani
		Hoonsoo Lee
		</p>
	<p>Bovine milk adulteration with coconut milk poses a significant threat to food safety, as both liquids are visually similar yet nutritionally distinct. This study presents an integrated analytical framework combining excitation&amp;amp;ndash;emission matrix (EEM) fluorescence spectroscopy with chemometric and deep learning techniques to detect and quantify coconut milk adulteration in bovine milk across nine concentration levels (0&amp;amp;ndash;100% v/v). Parallel factor analysis (PARAFAC) resolved two dominant fluorescent components, tryptophan (&amp;amp;lambda; ex/em: 290/350 nm) and riboflavin (&amp;amp;lambda; ex/em: 450/525 nm), whose scores decreased monotonically with increasing adulteration, confirming their role as key chemical biomarkers. For quantitative prediction, PLSR and 1D-CNN models were developed using emission spectra at three excitation wavelengths, with best performances achieved at 450 nm (PLSR: R2P = 0.97, RMSEP = 5.00%; 1D-CNN: R2P = 0.94, RMSEP = 6.75%). A lightweight 2D-CNN utilizing full EEM contour maps as image inputs outperformed all quantitative models, achieving R2P = 0.99, RMSEP = 2.36%, and RPD = 12.97, demonstrating the advantage of preserving the full two-dimensional fluorescence topology over discrete wavelength selection. These results confirm that EEM combined with 2D-CNN provides a highly accurate and non-destructive tool for dairy authentication.</p>
	]]></content:encoded>

	<dc:title>A Fluorescence-Based Sensor Combined with Chemometric and Deep Learning Approaches for Detecting and Quantifying Coconut Milk Fraud in Bovine Milk</dc:title>
			<dc:creator>Stella Maria Dyah Cahyarani</dc:creator>
			<dc:creator>Hoonsoo Lee</dc:creator>
		<dc:identifier>doi: 10.3390/s26092872</dc:identifier>
	<dc:source>Sensors</dc:source>
	<dc:date>2026-05-04</dc:date>

	<prism:publicationName>Sensors</prism:publicationName>
	<prism:publicationDate>2026-05-04</prism:publicationDate>
	<prism:volume>26</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>2872</prism:startingPage>
		<prism:doi>10.3390/s26092872</prism:doi>
	<prism:url>https://www.mdpi.com/1424-8220/26/9/2872</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1424-8220/26/9/2871">

	<title>Sensors, Vol. 26, Pages 2871: Spatiotemporal Locality-Aware Adaptive Hybrid Optoelectronic Interconnect for Reconfigurable Array Processors</title>
	<link>https://www.mdpi.com/1424-8220/26/9/2871</link>
	<description>As data-intensive applications continue to scale reconfigurable array processors (RAPs), electrical networks-on-chip (NoCs) are increasingly constrained by energy-delay bottlenecks due to RC-delay constraints. Hybrid optoelectronic NoCs (HONoCs) suffer from a fundamental medium-selection dilemma: optical circuit switching incurs microsecond-scale setup overheads for long flows, whereas static distance thresholds fail to capture the spatiotemporal heterogeneity of traffic, causing wavelength waste for bursty flows and congestion diffusion under non-stationary loads. This paper presents an adaptive switching framework that is aware of spatiotemporal locality. We introduce the Temporal-Spatial Locality Index (TSLI) to classify flows into Electrophilic (EF), Photophilic (PF), and Hybrid-sensitive (HF) categories, and propose Cross-layer Congestion Entropy (CCE) to unify electrical and optical resource states. Based on these metrics, an Adaptive Medium Selection State Machine (AMSSM) dynamically switches among Electro-Dominant (EDM), Electro-Optical Synergistic (EOSM), and Optical-Dominant (ODM) modes, while a Weighted Multi-dimensional Medium Matching (WMMM) algorithm performs fine-grained channel selection. A Predictive Optical Path Provisioning (POPP) mechanism further amortizes setup latencies via trend-aware pre-establishment. Evaluation on an 8 &amp;amp;times; 8 mesh HONoCs demonstrates 22% higher saturation throughput, 38% lower energy-delay product (EDP), and 57% reduction in average latency under non-stationary traffic, compared to static thresholds. The proposed mechanisms provide a theoretical foundation and engineering paradigm for efficient on-chip interconnects.</description>
	<pubDate>2026-05-04</pubDate>

	<content:encoded><![CDATA[
	<p><b>Sensors, Vol. 26, Pages 2871: Spatiotemporal Locality-Aware Adaptive Hybrid Optoelectronic Interconnect for Reconfigurable Array Processors</b></p>
	<p>Sensors <a href="https://www.mdpi.com/1424-8220/26/9/2871">doi: 10.3390/s26092871</a></p>
	<p>Authors:
		Bowen Yang
		Yong Li
		Rui Shan
		Junyong Deng
		Yu Feng
		</p>
	<p>As data-intensive applications continue to scale reconfigurable array processors (RAPs), electrical networks-on-chip (NoCs) are increasingly constrained by energy-delay bottlenecks due to RC-delay constraints. Hybrid optoelectronic NoCs (HONoCs) suffer from a fundamental medium-selection dilemma: optical circuit switching incurs microsecond-scale setup overheads for long flows, whereas static distance thresholds fail to capture the spatiotemporal heterogeneity of traffic, causing wavelength waste for bursty flows and congestion diffusion under non-stationary loads. This paper presents an adaptive switching framework that is aware of spatiotemporal locality. We introduce the Temporal-Spatial Locality Index (TSLI) to classify flows into Electrophilic (EF), Photophilic (PF), and Hybrid-sensitive (HF) categories, and propose Cross-layer Congestion Entropy (CCE) to unify electrical and optical resource states. Based on these metrics, an Adaptive Medium Selection State Machine (AMSSM) dynamically switches among Electro-Dominant (EDM), Electro-Optical Synergistic (EOSM), and Optical-Dominant (ODM) modes, while a Weighted Multi-dimensional Medium Matching (WMMM) algorithm performs fine-grained channel selection. A Predictive Optical Path Provisioning (POPP) mechanism further amortizes setup latencies via trend-aware pre-establishment. Evaluation on an 8 &amp;amp;times; 8 mesh HONoCs demonstrates 22% higher saturation throughput, 38% lower energy-delay product (EDP), and 57% reduction in average latency under non-stationary traffic, compared to static thresholds. The proposed mechanisms provide a theoretical foundation and engineering paradigm for efficient on-chip interconnects.</p>
	]]></content:encoded>

	<dc:title>Spatiotemporal Locality-Aware Adaptive Hybrid Optoelectronic Interconnect for Reconfigurable Array Processors</dc:title>
			<dc:creator>Bowen Yang</dc:creator>
			<dc:creator>Yong Li</dc:creator>
			<dc:creator>Rui Shan</dc:creator>
			<dc:creator>Junyong Deng</dc:creator>
			<dc:creator>Yu Feng</dc:creator>
		<dc:identifier>doi: 10.3390/s26092871</dc:identifier>
	<dc:source>Sensors</dc:source>
	<dc:date>2026-05-04</dc:date>

	<prism:publicationName>Sensors</prism:publicationName>
	<prism:publicationDate>2026-05-04</prism:publicationDate>
	<prism:volume>26</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>2871</prism:startingPage>
		<prism:doi>10.3390/s26092871</prism:doi>
	<prism:url>https://www.mdpi.com/1424-8220/26/9/2871</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1424-8220/26/9/2869">

	<title>Sensors, Vol. 26, Pages 2869: Assessing the Diagnostic Performance of a Smart Bra Using Temperature and Bioimpedance for Breast Cancer Detection: A First-in-Human Study</title>
	<link>https://www.mdpi.com/1424-8220/26/9/2869</link>
	<description>(1) Background: Breast cancer screening remains limited by mammography, particularly in younger women, in dense breast tissue, and in the detection of interval cancers. The PHI-BRA Smart Bra was developed as a wearable, non-invasive device combining thermography and bioimpedance for frequent breast monitoring. This first-in-human study aimed to assess the feasibility and in vivo diagnostic performance of the PHI-BRA system in discriminating between women with and without breast lesions. (2) Methods: A prospective feasibility study was conducted between March 2023 and February 2024. A calibration cohort (n = 15) was used to define the discrimination model, followed by an analysis cohort (n = 26; 13 with breast lesions and 13 without). Thermal and bioimpedance signals were acquired using the PHI-BRA device. Diagnostic performance was evaluated using receiver operating characteristic (ROC) analysis, with mammography as the reference standard. (3) Results: In the analysis cohort, the temperature-based model achieved an area under the ROC curve (AUC) of 80.8% (95% CI [63.2&amp;amp;ndash;98.3]). At the optimal threshold, sensitivity was 84.6% (95% CI [61.5&amp;amp;ndash;100]) and specificity was 76.9% (95% CI [53.8&amp;amp;ndash;100]). Exploratory bioimpedance analyses showed lower sensitivity but high specificity, mainly limited by sensor contact stability. No adverse events were reported. (4) Conclusions: This first-in-human study demonstrates an initial exploration of the feasibility and safety of a wearable thermography-based approach for breast lesion discrimination. The results support further clinical validation of a multimodal wearable system as a complementary tool to existing breast cancer screening strategies.</description>
	<pubDate>2026-05-04</pubDate>

	<content:encoded><![CDATA[
	<p><b>Sensors, Vol. 26, Pages 2869: Assessing the Diagnostic Performance of a Smart Bra Using Temperature and Bioimpedance for Breast Cancer Detection: A First-in-Human Study</b></p>
	<p>Sensors <a href="https://www.mdpi.com/1424-8220/26/9/2869">doi: 10.3390/s26092869</a></p>
	<p>Authors:
		Anne-Sophie Belmont
		Marie-Valérie Moreno
		Eloise Aubret
		Justine Dubreuil
		Nathalie Piazon
		Julien Berthiller
		Maxime Bonjour
		Emmanuelle Dantony
		Audrey Haquin
		Marion Cortet
		</p>
	<p>(1) Background: Breast cancer screening remains limited by mammography, particularly in younger women, in dense breast tissue, and in the detection of interval cancers. The PHI-BRA Smart Bra was developed as a wearable, non-invasive device combining thermography and bioimpedance for frequent breast monitoring. This first-in-human study aimed to assess the feasibility and in vivo diagnostic performance of the PHI-BRA system in discriminating between women with and without breast lesions. (2) Methods: A prospective feasibility study was conducted between March 2023 and February 2024. A calibration cohort (n = 15) was used to define the discrimination model, followed by an analysis cohort (n = 26; 13 with breast lesions and 13 without). Thermal and bioimpedance signals were acquired using the PHI-BRA device. Diagnostic performance was evaluated using receiver operating characteristic (ROC) analysis, with mammography as the reference standard. (3) Results: In the analysis cohort, the temperature-based model achieved an area under the ROC curve (AUC) of 80.8% (95% CI [63.2&amp;amp;ndash;98.3]). At the optimal threshold, sensitivity was 84.6% (95% CI [61.5&amp;amp;ndash;100]) and specificity was 76.9% (95% CI [53.8&amp;amp;ndash;100]). Exploratory bioimpedance analyses showed lower sensitivity but high specificity, mainly limited by sensor contact stability. No adverse events were reported. (4) Conclusions: This first-in-human study demonstrates an initial exploration of the feasibility and safety of a wearable thermography-based approach for breast lesion discrimination. The results support further clinical validation of a multimodal wearable system as a complementary tool to existing breast cancer screening strategies.</p>
	]]></content:encoded>

	<dc:title>Assessing the Diagnostic Performance of a Smart Bra Using Temperature and Bioimpedance for Breast Cancer Detection: A First-in-Human Study</dc:title>
			<dc:creator>Anne-Sophie Belmont</dc:creator>
			<dc:creator>Marie-Valérie Moreno</dc:creator>
			<dc:creator>Eloise Aubret</dc:creator>
			<dc:creator>Justine Dubreuil</dc:creator>
			<dc:creator>Nathalie Piazon</dc:creator>
			<dc:creator>Julien Berthiller</dc:creator>
			<dc:creator>Maxime Bonjour</dc:creator>
			<dc:creator>Emmanuelle Dantony</dc:creator>
			<dc:creator>Audrey Haquin</dc:creator>
			<dc:creator>Marion Cortet</dc:creator>
		<dc:identifier>doi: 10.3390/s26092869</dc:identifier>
	<dc:source>Sensors</dc:source>
	<dc:date>2026-05-04</dc:date>

	<prism:publicationName>Sensors</prism:publicationName>
	<prism:publicationDate>2026-05-04</prism:publicationDate>
	<prism:volume>26</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>2869</prism:startingPage>
		<prism:doi>10.3390/s26092869</prism:doi>
	<prism:url>https://www.mdpi.com/1424-8220/26/9/2869</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1424-8220/26/9/2870">

	<title>Sensors, Vol. 26, Pages 2870: LLMs in the Loop: A Survey of Language-Driven Driver Monitoring and Assistance Systems</title>
	<link>https://www.mdpi.com/1424-8220/26/9/2870</link>
	<description>In recent years we have seen large language models (LLMs) demonstrating robust reasoning capabilities comparable to human performance. This makes them increasingly appealing for driver assistance, where adaptation to dynamic human context is essential. Yet, research in this area remains fragmented, often focusing on isolated applications, lacking utilization of LLM&amp;amp;rsquo;s full potential to deliver integrated, context-specific support and action. This survey synthesizes recent advancements in LLM-driven occupant monitoring systems, focusing on their capabilities for interpreting driver states and acting appropriately, enabling a new generation of intelligent driver assistance. We critically examine pioneering frameworks, benchmarks, and foundational datasets that employ techniques like reasoning chains, multimodality, and human-in-the-loop feedback to create personalized and safe driving experiences. We lay out the current trends, limitations, and emerging patterns, in addition to a novel human-centered evaluation of the field, providing researchers with a roadmap towards transparent and trustworthy in-cabin systems that bridge safety with driver experience.</description>
	<pubDate>2026-05-04</pubDate>

	<content:encoded><![CDATA[
	<p><b>Sensors, Vol. 26, Pages 2870: LLMs in the Loop: A Survey of Language-Driven Driver Monitoring and Assistance Systems</b></p>
	<p>Sensors <a href="https://www.mdpi.com/1424-8220/26/9/2870">doi: 10.3390/s26092870</a></p>
	<p>Authors:
		Vanchha Chandrayan
		Ignacio Alvarez
		</p>
	<p>In recent years we have seen large language models (LLMs) demonstrating robust reasoning capabilities comparable to human performance. This makes them increasingly appealing for driver assistance, where adaptation to dynamic human context is essential. Yet, research in this area remains fragmented, often focusing on isolated applications, lacking utilization of LLM&amp;amp;rsquo;s full potential to deliver integrated, context-specific support and action. This survey synthesizes recent advancements in LLM-driven occupant monitoring systems, focusing on their capabilities for interpreting driver states and acting appropriately, enabling a new generation of intelligent driver assistance. We critically examine pioneering frameworks, benchmarks, and foundational datasets that employ techniques like reasoning chains, multimodality, and human-in-the-loop feedback to create personalized and safe driving experiences. We lay out the current trends, limitations, and emerging patterns, in addition to a novel human-centered evaluation of the field, providing researchers with a roadmap towards transparent and trustworthy in-cabin systems that bridge safety with driver experience.</p>
	]]></content:encoded>

	<dc:title>LLMs in the Loop: A Survey of Language-Driven Driver Monitoring and Assistance Systems</dc:title>
			<dc:creator>Vanchha Chandrayan</dc:creator>
			<dc:creator>Ignacio Alvarez</dc:creator>
		<dc:identifier>doi: 10.3390/s26092870</dc:identifier>
	<dc:source>Sensors</dc:source>
	<dc:date>2026-05-04</dc:date>

	<prism:publicationName>Sensors</prism:publicationName>
	<prism:publicationDate>2026-05-04</prism:publicationDate>
	<prism:volume>26</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Review</prism:section>
	<prism:startingPage>2870</prism:startingPage>
		<prism:doi>10.3390/s26092870</prism:doi>
	<prism:url>https://www.mdpi.com/1424-8220/26/9/2870</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1424-8220/26/9/2867">

	<title>Sensors, Vol. 26, Pages 2867: Location Tracking of a Radio-Wave Antenna Utilizing the Radiation Pattern Recognized by Deep Network</title>
	<link>https://www.mdpi.com/1424-8220/26/9/2867</link>
	<description>This paper will introduce a radio frequency system to track the location of a stent designed to work inside a human artery. The stent is designed as a hemostasis aid tool for emergency situations where common surgical equipment, such as fluoroscopy systems, is not available, such as on the battlefield. In the application of interest, the stent must be guided to the correct location to achieve effective hemostasis and prevent complications. The locating approach uses the radiation pattern from the transmitter as the reference. When the transmitting frequency changes over a certain range, the measurement amplitude from a receiver depends on its relative location with respect to the transmitter. However, when the input frequency is unequal to the resonance frequency, the radiation pattern varies in an unpredictable way. To solve this problem, a deep learning model was trained to recognize variations in the radiation pattern and predict the receiver&amp;amp;rsquo;s location as one of the classes in the reference grid. The deep learning model also reduces the impact of noise and disturbing signals, which effectively improves the system&amp;amp;rsquo;s robustness.</description>
	<pubDate>2026-05-04</pubDate>

	<content:encoded><![CDATA[
	<p><b>Sensors, Vol. 26, Pages 2867: Location Tracking of a Radio-Wave Antenna Utilizing the Radiation Pattern Recognized by Deep Network</b></p>
	<p>Sensors <a href="https://www.mdpi.com/1424-8220/26/9/2867">doi: 10.3390/s26092867</a></p>
	<p>Authors:
		Yifan Zhang
		William W. Clark
		Bryan Tillman
		Young Jae Chun
		Sung Kwon Cho
		</p>
	<p>This paper will introduce a radio frequency system to track the location of a stent designed to work inside a human artery. The stent is designed as a hemostasis aid tool for emergency situations where common surgical equipment, such as fluoroscopy systems, is not available, such as on the battlefield. In the application of interest, the stent must be guided to the correct location to achieve effective hemostasis and prevent complications. The locating approach uses the radiation pattern from the transmitter as the reference. When the transmitting frequency changes over a certain range, the measurement amplitude from a receiver depends on its relative location with respect to the transmitter. However, when the input frequency is unequal to the resonance frequency, the radiation pattern varies in an unpredictable way. To solve this problem, a deep learning model was trained to recognize variations in the radiation pattern and predict the receiver&amp;amp;rsquo;s location as one of the classes in the reference grid. The deep learning model also reduces the impact of noise and disturbing signals, which effectively improves the system&amp;amp;rsquo;s robustness.</p>
	]]></content:encoded>

	<dc:title>Location Tracking of a Radio-Wave Antenna Utilizing the Radiation Pattern Recognized by Deep Network</dc:title>
			<dc:creator>Yifan Zhang</dc:creator>
			<dc:creator>William W. Clark</dc:creator>
			<dc:creator>Bryan Tillman</dc:creator>
			<dc:creator>Young Jae Chun</dc:creator>
			<dc:creator>Sung Kwon Cho</dc:creator>
		<dc:identifier>doi: 10.3390/s26092867</dc:identifier>
	<dc:source>Sensors</dc:source>
	<dc:date>2026-05-04</dc:date>

	<prism:publicationName>Sensors</prism:publicationName>
	<prism:publicationDate>2026-05-04</prism:publicationDate>
	<prism:volume>26</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>2867</prism:startingPage>
		<prism:doi>10.3390/s26092867</prism:doi>
	<prism:url>https://www.mdpi.com/1424-8220/26/9/2867</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1424-8220/26/9/2868">

	<title>Sensors, Vol. 26, Pages 2868: Multi-Scale Spatiotemporal Graph Neural Network Using Brain Partitioning for Major Depressive Disorder Detection</title>
	<link>https://www.mdpi.com/1424-8220/26/9/2868</link>
	<description>Major depressive disorder (MDD) is a prevalent and severe mental disorder, and EEG-based automated detection has become a promising approach for auxiliary screening diagnosis. In this work, we propose a novel multiscale spatiotemporal graph neural network for MDD detection from multichannel EEG signals. Specifically, a left&amp;amp;ndash;right hemispheric partitioning prior is used to encode brain functional organization. Based on this partitioning, adaptive graphs are then constructed and graph message passing is performed to model intra-hemispheric interactions. The approach not only incorporates brain functional organization into the learning process but also enhances the extraction of discriminative features related to depressive brain dynamics. The proposed method was validated in a cross-subject scenario on a private resting-state EEG dataset including 54 adult participants (27 MDD patients and 27 healthy controls; age range: 27&amp;amp;ndash;48 years). Experimental results on the dataset achieve an accuracy of 92.21%, surpassing the baseline models. Meanwhile, ablation experiments demonstrate the effectiveness of our proposed method.</description>
	<pubDate>2026-05-04</pubDate>

	<content:encoded><![CDATA[
	<p><b>Sensors, Vol. 26, Pages 2868: Multi-Scale Spatiotemporal Graph Neural Network Using Brain Partitioning for Major Depressive Disorder Detection</b></p>
	<p>Sensors <a href="https://www.mdpi.com/1424-8220/26/9/2868">doi: 10.3390/s26092868</a></p>
	<p>Authors:
		Zhao Geng
		Wei Guo
		Jiale Wang
		Yonghua Ma
		Yongbao Zhu
		</p>
	<p>Major depressive disorder (MDD) is a prevalent and severe mental disorder, and EEG-based automated detection has become a promising approach for auxiliary screening diagnosis. In this work, we propose a novel multiscale spatiotemporal graph neural network for MDD detection from multichannel EEG signals. Specifically, a left&amp;amp;ndash;right hemispheric partitioning prior is used to encode brain functional organization. Based on this partitioning, adaptive graphs are then constructed and graph message passing is performed to model intra-hemispheric interactions. The approach not only incorporates brain functional organization into the learning process but also enhances the extraction of discriminative features related to depressive brain dynamics. The proposed method was validated in a cross-subject scenario on a private resting-state EEG dataset including 54 adult participants (27 MDD patients and 27 healthy controls; age range: 27&amp;amp;ndash;48 years). Experimental results on the dataset achieve an accuracy of 92.21%, surpassing the baseline models. Meanwhile, ablation experiments demonstrate the effectiveness of our proposed method.</p>
	]]></content:encoded>

	<dc:title>Multi-Scale Spatiotemporal Graph Neural Network Using Brain Partitioning for Major Depressive Disorder Detection</dc:title>
			<dc:creator>Zhao Geng</dc:creator>
			<dc:creator>Wei Guo</dc:creator>
			<dc:creator>Jiale Wang</dc:creator>
			<dc:creator>Yonghua Ma</dc:creator>
			<dc:creator>Yongbao Zhu</dc:creator>
		<dc:identifier>doi: 10.3390/s26092868</dc:identifier>
	<dc:source>Sensors</dc:source>
	<dc:date>2026-05-04</dc:date>

	<prism:publicationName>Sensors</prism:publicationName>
	<prism:publicationDate>2026-05-04</prism:publicationDate>
	<prism:volume>26</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>2868</prism:startingPage>
		<prism:doi>10.3390/s26092868</prism:doi>
	<prism:url>https://www.mdpi.com/1424-8220/26/9/2868</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1424-8220/26/9/2866">

	<title>Sensors, Vol. 26, Pages 2866: An Optimized Clustering Routing Algorithm for Wireless Sensor Networks Based on Spotted Hyena and Improved Energy-Efficient Non-Uniform Clustering</title>
	<link>https://www.mdpi.com/1424-8220/26/9/2866</link>
	<description>Wireless Sensor Networks (WSNs) are widely used in environmental monitoring, disaster early warning, and smart grids. However, sensor nodes face strict energy limitations. Unbalanced energy consumption and hotspots severely shorten the network lifetime. To address these problems, this paper proposes an optimized Spotted Hyena Optimization-Energy-Efficient Non-Uniform Clustering algorithm (SHOE) for cluster head selection and data transmission. The algorithm has three main innovations: combining a bio-inspired metaheuristic with an improved EEUC (Energy-Efficient Unequal Clustering) multi-hop relay and a Gaussian distribution model for non-uniform node deployment; designing a multi-dimensional fitness function considering energy, distance, and node location; and introducing empty cluster and isolated node repair mechanisms to balance exploration and exploitation. Specifically, the multi-dimensional fitness function guides the heuristic search process towards high-quality cluster head candidates, while the empty cluster and isolated node repair mechanisms dynamically rectify abnormal network structures, ensuring the robustness of the final architecture optimized by the bio-inspired framework. Simulations in MATLAB show that SHOE outperforms LEACH (Low-Energy Adaptive Clustering Hierarchy), PSOE (Particle Swarm Optimization with Evolutionary Strategy), PL-EBC (Probabilistic Localized Energy-Balanced Clustering), and CGWOA (Chaotic Grey Wolf Optimization Algorithm) in reducing node death, saving energy, and extending network lifetime. It improves adaptability to non-uniform distribution and optimizes energy balance, thus enhancing the efficiency and stability of WSNs.</description>
	<pubDate>2026-05-03</pubDate>

	<content:encoded><![CDATA[
	<p><b>Sensors, Vol. 26, Pages 2866: An Optimized Clustering Routing Algorithm for Wireless Sensor Networks Based on Spotted Hyena and Improved Energy-Efficient Non-Uniform Clustering</b></p>
	<p>Sensors <a href="https://www.mdpi.com/1424-8220/26/9/2866">doi: 10.3390/s26092866</a></p>
	<p>Authors:
		Songhao Jia
		Shuya Jia
		Wenqian Shao
		Fangfang Li
		</p>
	<p>Wireless Sensor Networks (WSNs) are widely used in environmental monitoring, disaster early warning, and smart grids. However, sensor nodes face strict energy limitations. Unbalanced energy consumption and hotspots severely shorten the network lifetime. To address these problems, this paper proposes an optimized Spotted Hyena Optimization-Energy-Efficient Non-Uniform Clustering algorithm (SHOE) for cluster head selection and data transmission. The algorithm has three main innovations: combining a bio-inspired metaheuristic with an improved EEUC (Energy-Efficient Unequal Clustering) multi-hop relay and a Gaussian distribution model for non-uniform node deployment; designing a multi-dimensional fitness function considering energy, distance, and node location; and introducing empty cluster and isolated node repair mechanisms to balance exploration and exploitation. Specifically, the multi-dimensional fitness function guides the heuristic search process towards high-quality cluster head candidates, while the empty cluster and isolated node repair mechanisms dynamically rectify abnormal network structures, ensuring the robustness of the final architecture optimized by the bio-inspired framework. Simulations in MATLAB show that SHOE outperforms LEACH (Low-Energy Adaptive Clustering Hierarchy), PSOE (Particle Swarm Optimization with Evolutionary Strategy), PL-EBC (Probabilistic Localized Energy-Balanced Clustering), and CGWOA (Chaotic Grey Wolf Optimization Algorithm) in reducing node death, saving energy, and extending network lifetime. It improves adaptability to non-uniform distribution and optimizes energy balance, thus enhancing the efficiency and stability of WSNs.</p>
	]]></content:encoded>

	<dc:title>An Optimized Clustering Routing Algorithm for Wireless Sensor Networks Based on Spotted Hyena and Improved Energy-Efficient Non-Uniform Clustering</dc:title>
			<dc:creator>Songhao Jia</dc:creator>
			<dc:creator>Shuya Jia</dc:creator>
			<dc:creator>Wenqian Shao</dc:creator>
			<dc:creator>Fangfang Li</dc:creator>
		<dc:identifier>doi: 10.3390/s26092866</dc:identifier>
	<dc:source>Sensors</dc:source>
	<dc:date>2026-05-03</dc:date>

	<prism:publicationName>Sensors</prism:publicationName>
	<prism:publicationDate>2026-05-03</prism:publicationDate>
	<prism:volume>26</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>2866</prism:startingPage>
		<prism:doi>10.3390/s26092866</prism:doi>
	<prism:url>https://www.mdpi.com/1424-8220/26/9/2866</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1424-8220/26/9/2865">

	<title>Sensors, Vol. 26, Pages 2865: A Lightweight Vision-Based Emotion Sensing Framework for Assistive Healthcare Robotics</title>
	<link>https://www.mdpi.com/1424-8220/26/9/2865</link>
	<description>Facial expression recognition (FER) for assistive and telepresence robotics remains challenging under resource-constrained conditions because landmark normalization is often unstable, many datasets have limited variability, and full facial landmark sets introduce redundancy. This paper proposes a lightweight, privacy-preserving FER framework for assistive healthcare robotics based on geometric facial landmarks rather than raw RGB images. The objective is to improve recognition robustness and deployment suitability on low-power edge devices through two complementary contributions: a revised nose-centered landmark normalization method and an optimized Facial Feature Mapping, FFM-L03. The proposed normalization replaces the expression-sensitive upper-lip reference with a geometrically stable nose-center anchor, while FFM-L03 combines FACS-guided anatomical priors with ANOVA F-score, LASSO, PCA, and t-SNE/UMAP to retain 60 informative landmarks. In addition, a heterogeneous Freepik dataset was constructed to increase variability in lighting, background, resolution, and subject appearance. Experimental evaluation across 15 landmark groups, four datasets, and four classifiers shows that the proposed method consistently improves performance over prior landmark configurations, achieving gains of up to 22.4 percentage points over the Ciraolo baseline and 22.1 percentage points over the full-landmark baseline in accuracy, precision, recall, and F1-score, while maintaining lightweight operation. These results demonstrate that principled normalization and targeted landmark selection can substantially improve FER for real-time, privacy-aware assistive robotic systems.</description>
	<pubDate>2026-05-03</pubDate>

	<content:encoded><![CDATA[
	<p><b>Sensors, Vol. 26, Pages 2865: A Lightweight Vision-Based Emotion Sensing Framework for Assistive Healthcare Robotics</b></p>
	<p>Sensors <a href="https://www.mdpi.com/1424-8220/26/9/2865">doi: 10.3390/s26092865</a></p>
	<p>Authors:
		Hosam Zolfonoon
		Helder Jesus Araújo
		Lino Marques
		</p>
	<p>Facial expression recognition (FER) for assistive and telepresence robotics remains challenging under resource-constrained conditions because landmark normalization is often unstable, many datasets have limited variability, and full facial landmark sets introduce redundancy. This paper proposes a lightweight, privacy-preserving FER framework for assistive healthcare robotics based on geometric facial landmarks rather than raw RGB images. The objective is to improve recognition robustness and deployment suitability on low-power edge devices through two complementary contributions: a revised nose-centered landmark normalization method and an optimized Facial Feature Mapping, FFM-L03. The proposed normalization replaces the expression-sensitive upper-lip reference with a geometrically stable nose-center anchor, while FFM-L03 combines FACS-guided anatomical priors with ANOVA F-score, LASSO, PCA, and t-SNE/UMAP to retain 60 informative landmarks. In addition, a heterogeneous Freepik dataset was constructed to increase variability in lighting, background, resolution, and subject appearance. Experimental evaluation across 15 landmark groups, four datasets, and four classifiers shows that the proposed method consistently improves performance over prior landmark configurations, achieving gains of up to 22.4 percentage points over the Ciraolo baseline and 22.1 percentage points over the full-landmark baseline in accuracy, precision, recall, and F1-score, while maintaining lightweight operation. These results demonstrate that principled normalization and targeted landmark selection can substantially improve FER for real-time, privacy-aware assistive robotic systems.</p>
	]]></content:encoded>

	<dc:title>A Lightweight Vision-Based Emotion Sensing Framework for Assistive Healthcare Robotics</dc:title>
			<dc:creator>Hosam Zolfonoon</dc:creator>
			<dc:creator>Helder Jesus Araújo</dc:creator>
			<dc:creator>Lino Marques</dc:creator>
		<dc:identifier>doi: 10.3390/s26092865</dc:identifier>
	<dc:source>Sensors</dc:source>
	<dc:date>2026-05-03</dc:date>

	<prism:publicationName>Sensors</prism:publicationName>
	<prism:publicationDate>2026-05-03</prism:publicationDate>
	<prism:volume>26</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>2865</prism:startingPage>
		<prism:doi>10.3390/s26092865</prism:doi>
	<prism:url>https://www.mdpi.com/1424-8220/26/9/2865</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1424-8220/26/9/2864">

	<title>Sensors, Vol. 26, Pages 2864: A Privacy-Preserving Artificial Intelligence-Driven Sensing System for Distributed Multimodal Risk Detection</title>
	<link>https://www.mdpi.com/1424-8220/26/9/2864</link>
	<description>Withthe widespread deployment of intelligent terminals, mobile payment platforms, and Internet of Things devices, security systems are being progressively transformed from traditional transaction outcome analysis toward an intelligent perception paradigm centered on user behavior, device states, and environmental context. To address the challenges of multimodal data heterogeneity, non-independent and identically distributed data across nodes, and the difficulty of centralized modeling under privacy constraints in distributed scenarios, an artificial intelligence-driven federated multimodal security perception framework, namely FMS-LLM, is proposed. At its core, the framework introduces a Non-IID adaptive federated fusion mechanism that achieves dual-level alignment&amp;amp;mdash;structural alignment via parameter-level masks and semantic alignment via feature consistency constraints&amp;amp;mdash;to effectively mitigate cross-node distribution discrepancies. Additionally, an LLM-driven semantic enhancement module is developed, utilizing trend-guided token selection and inertia-suppression to map low-level sensing features into high-level risk semantic representations, thereby supporting logical reasoning and explainable decision-making. This framework takes user behavioral sensing data, device state information, environmental context data, and transaction behavior data as inputs, and constructs an integrated security analysis pipeline of &amp;amp;ldquo;perception&amp;amp;ndash;collaboration&amp;amp;ndash;reasoning&amp;amp;rdquo;. Experimental results on the distributed multimodal security perception task demonstrate that the proposed method achieves an Accuracy of 91.62%, a Precision of 91.04%, a Recall of 90.37%, an F1-score of 90.70%, and a ROC-AUC of 94.73%, consistently outperforming baseline methods including Logistic Regression, Random Forest, LSTM, the centralized multimodal deep model, FedAvg, FedProx, and MOON. Under strongly Non-IID conditions, when &amp;amp;alpha;=0.1, the model still maintains an Accuracy of 88.47% and an F1-score of 87.11%, demonstrating stronger cross-node robustness. The ablation study further indicates that the complete model attains the best classification performance while reducing communication cost to 18.92 MB/Round. These results demonstrate that the proposed method can effectively fuse multi-source sensing information under privacy-preserving conditions and support intelligent security perception tasks with higher accuracy, stronger robustness, and improved interpretability.</description>
	<pubDate>2026-05-03</pubDate>

	<content:encoded><![CDATA[
	<p><b>Sensors, Vol. 26, Pages 2864: A Privacy-Preserving Artificial Intelligence-Driven Sensing System for Distributed Multimodal Risk Detection</b></p>
	<p>Sensors <a href="https://www.mdpi.com/1424-8220/26/9/2864">doi: 10.3390/s26092864</a></p>
	<p>Authors:
		Yawen Zhu
		Yiwei Song
		Yikun Xuan
		Yujing Song
		Jiahong Pu
		Jiehua Li
		Manzhou Li
		</p>
	<p>Withthe widespread deployment of intelligent terminals, mobile payment platforms, and Internet of Things devices, security systems are being progressively transformed from traditional transaction outcome analysis toward an intelligent perception paradigm centered on user behavior, device states, and environmental context. To address the challenges of multimodal data heterogeneity, non-independent and identically distributed data across nodes, and the difficulty of centralized modeling under privacy constraints in distributed scenarios, an artificial intelligence-driven federated multimodal security perception framework, namely FMS-LLM, is proposed. At its core, the framework introduces a Non-IID adaptive federated fusion mechanism that achieves dual-level alignment&amp;amp;mdash;structural alignment via parameter-level masks and semantic alignment via feature consistency constraints&amp;amp;mdash;to effectively mitigate cross-node distribution discrepancies. Additionally, an LLM-driven semantic enhancement module is developed, utilizing trend-guided token selection and inertia-suppression to map low-level sensing features into high-level risk semantic representations, thereby supporting logical reasoning and explainable decision-making. This framework takes user behavioral sensing data, device state information, environmental context data, and transaction behavior data as inputs, and constructs an integrated security analysis pipeline of &amp;amp;ldquo;perception&amp;amp;ndash;collaboration&amp;amp;ndash;reasoning&amp;amp;rdquo;. Experimental results on the distributed multimodal security perception task demonstrate that the proposed method achieves an Accuracy of 91.62%, a Precision of 91.04%, a Recall of 90.37%, an F1-score of 90.70%, and a ROC-AUC of 94.73%, consistently outperforming baseline methods including Logistic Regression, Random Forest, LSTM, the centralized multimodal deep model, FedAvg, FedProx, and MOON. Under strongly Non-IID conditions, when &amp;amp;alpha;=0.1, the model still maintains an Accuracy of 88.47% and an F1-score of 87.11%, demonstrating stronger cross-node robustness. The ablation study further indicates that the complete model attains the best classification performance while reducing communication cost to 18.92 MB/Round. These results demonstrate that the proposed method can effectively fuse multi-source sensing information under privacy-preserving conditions and support intelligent security perception tasks with higher accuracy, stronger robustness, and improved interpretability.</p>
	]]></content:encoded>

	<dc:title>A Privacy-Preserving Artificial Intelligence-Driven Sensing System for Distributed Multimodal Risk Detection</dc:title>
			<dc:creator>Yawen Zhu</dc:creator>
			<dc:creator>Yiwei Song</dc:creator>
			<dc:creator>Yikun Xuan</dc:creator>
			<dc:creator>Yujing Song</dc:creator>
			<dc:creator>Jiahong Pu</dc:creator>
			<dc:creator>Jiehua Li</dc:creator>
			<dc:creator>Manzhou Li</dc:creator>
		<dc:identifier>doi: 10.3390/s26092864</dc:identifier>
	<dc:source>Sensors</dc:source>
	<dc:date>2026-05-03</dc:date>

	<prism:publicationName>Sensors</prism:publicationName>
	<prism:publicationDate>2026-05-03</prism:publicationDate>
	<prism:volume>26</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>2864</prism:startingPage>
		<prism:doi>10.3390/s26092864</prism:doi>
	<prism:url>https://www.mdpi.com/1424-8220/26/9/2864</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1424-8220/26/9/2863">

	<title>Sensors, Vol. 26, Pages 2863: Smart Exhaust Analytics: A Sensor-Based Way to Identify the Types of Engines Based on the Composition of Exhaust Gas</title>
	<link>https://www.mdpi.com/1424-8220/26/9/2863</link>
	<description>Classification of vehicle engines using the chemical composition of the exhaust from these engines can be used to identify the engine&amp;amp;rsquo;s design and verify compliance with environmental regulations through the vehicle&amp;amp;rsquo;s emissions. This paper describes a method to identify the type of vehicles using machine learning (ML), where low-cost MQ series sensors measure the gases and particle emissions from a vehicle exhaust system, while simultaneously collecting and measuring the vehicle&amp;amp;rsquo;s temperature and humidity levels. A custom-designed multi-sensor exhaust sensing module is employed to capture real-time exhaust emissions prior to entering the atmosphere. Exhaust samples are collected from vehicles representing three major engine categories: petrol, diesel, and compressed natural gas (CNG). In addition, fresh air samples are collected as a baseline environmental reference for comparison. All exhaust measurements are collected under controlled and consistent engine operating conditions to ensure comparable emission profiling across vehicle classes. To ensure consistent combustion-based emission profiling, this study focuses on conventional fuel-powered vehicles. MQ-series gas sensors are sensitive to combustion by-products emitted during engine operation, such as carbon monoxide (CO), hydrocarbons (HC), while also exhibiting cross-sensitivity to other gaseous components present in exhaust mixtures. Nevertheless, the proposed system performs pattern-based classification using relative sensor response signatures. Standardization of data is achieved through z-score normalization. The best models developed (based on three separate experimental designs) are trained and validated using six supervised machine learning algorithms such as Logistic Regression, Support Vector Machine (RBF), k-Nearest Neighbors, Random Forest, Gradient Boosting Decision Tree, and XGBoost and are compared against one another. Evaluation of the tested algorithms using various evaluation metrics demonstrated that ensemble models outperformed all other algorithms, achieving the highest accuracy of 99.5%. Furthermore, noise analysis confirms that the proposed solution maintains high classification accuracy (more than 89%) even under substantial sensor perturbations mimicking the real-world deployment. The solution proposed below illustrates how using gas sensors and advanced algorithms can provide accurate exhaust identification and identify engines in real-time.</description>
	<pubDate>2026-05-03</pubDate>

	<content:encoded><![CDATA[
	<p><b>Sensors, Vol. 26, Pages 2863: Smart Exhaust Analytics: A Sensor-Based Way to Identify the Types of Engines Based on the Composition of Exhaust Gas</b></p>
	<p>Sensors <a href="https://www.mdpi.com/1424-8220/26/9/2863">doi: 10.3390/s26092863</a></p>
	<p>Authors:
		Dharmendra Kumar
		Vibha Jain
		Ashutosh Mishra
		Rakesh Shrestha
		Navin Singh Rajput
		</p>
	<p>Classification of vehicle engines using the chemical composition of the exhaust from these engines can be used to identify the engine&amp;amp;rsquo;s design and verify compliance with environmental regulations through the vehicle&amp;amp;rsquo;s emissions. This paper describes a method to identify the type of vehicles using machine learning (ML), where low-cost MQ series sensors measure the gases and particle emissions from a vehicle exhaust system, while simultaneously collecting and measuring the vehicle&amp;amp;rsquo;s temperature and humidity levels. A custom-designed multi-sensor exhaust sensing module is employed to capture real-time exhaust emissions prior to entering the atmosphere. Exhaust samples are collected from vehicles representing three major engine categories: petrol, diesel, and compressed natural gas (CNG). In addition, fresh air samples are collected as a baseline environmental reference for comparison. All exhaust measurements are collected under controlled and consistent engine operating conditions to ensure comparable emission profiling across vehicle classes. To ensure consistent combustion-based emission profiling, this study focuses on conventional fuel-powered vehicles. MQ-series gas sensors are sensitive to combustion by-products emitted during engine operation, such as carbon monoxide (CO), hydrocarbons (HC), while also exhibiting cross-sensitivity to other gaseous components present in exhaust mixtures. Nevertheless, the proposed system performs pattern-based classification using relative sensor response signatures. Standardization of data is achieved through z-score normalization. The best models developed (based on three separate experimental designs) are trained and validated using six supervised machine learning algorithms such as Logistic Regression, Support Vector Machine (RBF), k-Nearest Neighbors, Random Forest, Gradient Boosting Decision Tree, and XGBoost and are compared against one another. Evaluation of the tested algorithms using various evaluation metrics demonstrated that ensemble models outperformed all other algorithms, achieving the highest accuracy of 99.5%. Furthermore, noise analysis confirms that the proposed solution maintains high classification accuracy (more than 89%) even under substantial sensor perturbations mimicking the real-world deployment. The solution proposed below illustrates how using gas sensors and advanced algorithms can provide accurate exhaust identification and identify engines in real-time.</p>
	]]></content:encoded>

	<dc:title>Smart Exhaust Analytics: A Sensor-Based Way to Identify the Types of Engines Based on the Composition of Exhaust Gas</dc:title>
			<dc:creator>Dharmendra Kumar</dc:creator>
			<dc:creator>Vibha Jain</dc:creator>
			<dc:creator>Ashutosh Mishra</dc:creator>
			<dc:creator>Rakesh Shrestha</dc:creator>
			<dc:creator>Navin Singh Rajput</dc:creator>
		<dc:identifier>doi: 10.3390/s26092863</dc:identifier>
	<dc:source>Sensors</dc:source>
	<dc:date>2026-05-03</dc:date>

	<prism:publicationName>Sensors</prism:publicationName>
	<prism:publicationDate>2026-05-03</prism:publicationDate>
	<prism:volume>26</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>2863</prism:startingPage>
		<prism:doi>10.3390/s26092863</prism:doi>
	<prism:url>https://www.mdpi.com/1424-8220/26/9/2863</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1424-8220/26/9/2862">

	<title>Sensors, Vol. 26, Pages 2862: An Ultralight Launch-and-Recovery System for Tethered Micro Unmanned Aerial Vehicles on Small Unmanned Ground Vehicles</title>
	<link>https://www.mdpi.com/1424-8220/26/9/2862</link>
	<description>Heterogeneous unmanned ground vehicle-unmanned aerial vehicle (UGV-UAV) collaborative systems offer clear advantages for field exploration. However, when tethered unmanned aerial vehicles (TUAVs) are introduced to extend mission capability, a major compatibility gap emerges for small and highly maneuverable UGVs: existing industrial tethered ground stations are generally too heavy and bulky to be carried by such platforms. In addition, on unstructured ground, residual station tilt can significantly complicate UAV launch and recovery. To address these issues, this paper develops an ultralight vehicle-mounted tethered ground station for micro unmanned aerial vehicles (micro-UAVs) that can be integrated directly with small UGVs. Through co-design of a 2-degree-of-freedom (2-DOF) self-leveling launch platform and a passive tether-assisted recovery scheme without visual fiducials, in which a customized UAV flight-control loop is coordinated with the state transitions of the ground tether-management system, the proposed system achieves practical tether-assisted recovery. Experiments show that the complete platform weighs only 4.1 kg and that the self-leveling mechanism compensates for ground inclinations over a total range of 24 degrees. Repeated passive-landing tests further demonstrate the feasibility of the proposed recovery scheme and its tolerance to moderate bay tilt and terminal off-axis activation. System-level flight validation confirms practical tether-assisted recovery without visual fiducials. In addition, we conduct a simplified exploratory simulation of tether-based ground-anchor localization under the proposed system architecture. Overall, these results establish a lightweight and low-cost hardware design and a practically viable recovery strategy for multimodal micro air-ground robotic systems.</description>
	<pubDate>2026-05-03</pubDate>

	<content:encoded><![CDATA[
	<p><b>Sensors, Vol. 26, Pages 2862: An Ultralight Launch-and-Recovery System for Tethered Micro Unmanned Aerial Vehicles on Small Unmanned Ground Vehicles</b></p>
	<p>Sensors <a href="https://www.mdpi.com/1424-8220/26/9/2862">doi: 10.3390/s26092862</a></p>
	<p>Authors:
		Yiding Liu
		Zhuoqun Shen
		Jingjing Xu
		Sihao Chen
		Bingao Zhang
		Shengyong Xu
		</p>
	<p>Heterogeneous unmanned ground vehicle-unmanned aerial vehicle (UGV-UAV) collaborative systems offer clear advantages for field exploration. However, when tethered unmanned aerial vehicles (TUAVs) are introduced to extend mission capability, a major compatibility gap emerges for small and highly maneuverable UGVs: existing industrial tethered ground stations are generally too heavy and bulky to be carried by such platforms. In addition, on unstructured ground, residual station tilt can significantly complicate UAV launch and recovery. To address these issues, this paper develops an ultralight vehicle-mounted tethered ground station for micro unmanned aerial vehicles (micro-UAVs) that can be integrated directly with small UGVs. Through co-design of a 2-degree-of-freedom (2-DOF) self-leveling launch platform and a passive tether-assisted recovery scheme without visual fiducials, in which a customized UAV flight-control loop is coordinated with the state transitions of the ground tether-management system, the proposed system achieves practical tether-assisted recovery. Experiments show that the complete platform weighs only 4.1 kg and that the self-leveling mechanism compensates for ground inclinations over a total range of 24 degrees. Repeated passive-landing tests further demonstrate the feasibility of the proposed recovery scheme and its tolerance to moderate bay tilt and terminal off-axis activation. System-level flight validation confirms practical tether-assisted recovery without visual fiducials. In addition, we conduct a simplified exploratory simulation of tether-based ground-anchor localization under the proposed system architecture. Overall, these results establish a lightweight and low-cost hardware design and a practically viable recovery strategy for multimodal micro air-ground robotic systems.</p>
	]]></content:encoded>

	<dc:title>An Ultralight Launch-and-Recovery System for Tethered Micro Unmanned Aerial Vehicles on Small Unmanned Ground Vehicles</dc:title>
			<dc:creator>Yiding Liu</dc:creator>
			<dc:creator>Zhuoqun Shen</dc:creator>
			<dc:creator>Jingjing Xu</dc:creator>
			<dc:creator>Sihao Chen</dc:creator>
			<dc:creator>Bingao Zhang</dc:creator>
			<dc:creator>Shengyong Xu</dc:creator>
		<dc:identifier>doi: 10.3390/s26092862</dc:identifier>
	<dc:source>Sensors</dc:source>
	<dc:date>2026-05-03</dc:date>

	<prism:publicationName>Sensors</prism:publicationName>
	<prism:publicationDate>2026-05-03</prism:publicationDate>
	<prism:volume>26</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>2862</prism:startingPage>
		<prism:doi>10.3390/s26092862</prism:doi>
	<prism:url>https://www.mdpi.com/1424-8220/26/9/2862</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1424-8220/26/9/2861">

	<title>Sensors, Vol. 26, Pages 2861: Real-Time Lightweight Weld Seam Keypoint Detection and Tracking via an Improved SimCC with a Unified Three-Keypoint Formulation</title>
	<link>https://www.mdpi.com/1424-8220/26/9/2861</link>
	<description>Reliable weld seam perception remains challenging in industrial environments, where arc light, spatter, smoke, and varying seam geometries can seriously degrade visual sensing. These disturbances make it difficult to achieve a unified representation, accurate localization, and real-time inference at the same time. To address this problem, this paper presents an end-to-end lightweight framework for weld seam keypoint detection and tracking based on an improved SimCC. A unified three-keypoint formulation is introduced to represent different weld geometries by using one seam center point and two orientation reference points, thereby supporting a perception-to-control mapping in which position control and orientation control are decoupled. In addition, a lightweight C3k2-based backbone is designed, and a non-parametric log-domain quadratic peak-refinement decoder is proposed to alleviate the discretization-induced quantization error of SimCC classification distributions without adding model parameters. Experiments show that the proposed model contains only 1.4 M parameters, achieves 17.01 ms CPU inference latency, and obtains a detection accuracy of 1.89 px MAE. In curved weld seam tracking experiments with the integrated robotic system, it further achieves an average trajectory tracking error as low as 0.159 mm and an average orientation error of 3.738&amp;amp;deg;, demonstrating its real-time accuracy and robustness for industrial welding applications.</description>
	<pubDate>2026-05-03</pubDate>

	<content:encoded><![CDATA[
	<p><b>Sensors, Vol. 26, Pages 2861: Real-Time Lightweight Weld Seam Keypoint Detection and Tracking via an Improved SimCC with a Unified Three-Keypoint Formulation</b></p>
	<p>Sensors <a href="https://www.mdpi.com/1424-8220/26/9/2861">doi: 10.3390/s26092861</a></p>
	<p>Authors:
		Shenkuo Wang
		Xiangjie Huang
		Ang Gao
		Chao Chen
		Fuxin Du
		</p>
	<p>Reliable weld seam perception remains challenging in industrial environments, where arc light, spatter, smoke, and varying seam geometries can seriously degrade visual sensing. These disturbances make it difficult to achieve a unified representation, accurate localization, and real-time inference at the same time. To address this problem, this paper presents an end-to-end lightweight framework for weld seam keypoint detection and tracking based on an improved SimCC. A unified three-keypoint formulation is introduced to represent different weld geometries by using one seam center point and two orientation reference points, thereby supporting a perception-to-control mapping in which position control and orientation control are decoupled. In addition, a lightweight C3k2-based backbone is designed, and a non-parametric log-domain quadratic peak-refinement decoder is proposed to alleviate the discretization-induced quantization error of SimCC classification distributions without adding model parameters. Experiments show that the proposed model contains only 1.4 M parameters, achieves 17.01 ms CPU inference latency, and obtains a detection accuracy of 1.89 px MAE. In curved weld seam tracking experiments with the integrated robotic system, it further achieves an average trajectory tracking error as low as 0.159 mm and an average orientation error of 3.738&amp;amp;deg;, demonstrating its real-time accuracy and robustness for industrial welding applications.</p>
	]]></content:encoded>

	<dc:title>Real-Time Lightweight Weld Seam Keypoint Detection and Tracking via an Improved SimCC with a Unified Three-Keypoint Formulation</dc:title>
			<dc:creator>Shenkuo Wang</dc:creator>
			<dc:creator>Xiangjie Huang</dc:creator>
			<dc:creator>Ang Gao</dc:creator>
			<dc:creator>Chao Chen</dc:creator>
			<dc:creator>Fuxin Du</dc:creator>
		<dc:identifier>doi: 10.3390/s26092861</dc:identifier>
	<dc:source>Sensors</dc:source>
	<dc:date>2026-05-03</dc:date>

	<prism:publicationName>Sensors</prism:publicationName>
	<prism:publicationDate>2026-05-03</prism:publicationDate>
	<prism:volume>26</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>2861</prism:startingPage>
		<prism:doi>10.3390/s26092861</prism:doi>
	<prism:url>https://www.mdpi.com/1424-8220/26/9/2861</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1424-8220/26/9/2860">

	<title>Sensors, Vol. 26, Pages 2860: A Complete Grocery Pick-and-Pack Application Using a Computationally Lightweight Vision-Based Mobile Manipulator</title>
	<link>https://www.mdpi.com/1424-8220/26/9/2860</link>
	<description>Mobile manipulators have become essential platforms for autonomous tasks that demand high-quality performance and efficient operational processes. This paper presents a complete grocery pick-and-pack system for a mobile manipulator, integrating a graphical user interface (GUI) with an end-to-end vision-based grasp detection pipeline designed for lightweight computation. The system is evaluated on the Grocery Pick-and-Pack Benchmark (Level-3), the most challenging level due to deformable objects, dimensional constraints, and strict grasp-point requirements. Experimental results demonstrate an average success rate of 92% across five item classes, with the deformable sweet bag the most challenging at 60% and an average execution time of 7.5 s on an edge device. The system achieves strong computational efficiency, reflected by a compute-to-speed ratio (CSR) of 0.008, with a total model size of only 30.9 MB. Performance is further validated across multiple hardware platforms and under real competition scenarios in the European Robotics League 2025. The findings highlight the practical impact of lightweight, vision-based mobile manipulation and provide insights into current challenges and future research directions for autonomous robotic applications.</description>
	<pubDate>2026-05-03</pubDate>

	<content:encoded><![CDATA[
	<p><b>Sensors, Vol. 26, Pages 2860: A Complete Grocery Pick-and-Pack Application Using a Computationally Lightweight Vision-Based Mobile Manipulator</b></p>
	<p>Sensors <a href="https://www.mdpi.com/1424-8220/26/9/2860">doi: 10.3390/s26092860</a></p>
	<p>Authors:
		Thanavin Mansakul
		Gilbert Tang
		Phil Webb
		Jamie Rice
		Daniel Oakley
		James Fowler
		</p>
	<p>Mobile manipulators have become essential platforms for autonomous tasks that demand high-quality performance and efficient operational processes. This paper presents a complete grocery pick-and-pack system for a mobile manipulator, integrating a graphical user interface (GUI) with an end-to-end vision-based grasp detection pipeline designed for lightweight computation. The system is evaluated on the Grocery Pick-and-Pack Benchmark (Level-3), the most challenging level due to deformable objects, dimensional constraints, and strict grasp-point requirements. Experimental results demonstrate an average success rate of 92% across five item classes, with the deformable sweet bag the most challenging at 60% and an average execution time of 7.5 s on an edge device. The system achieves strong computational efficiency, reflected by a compute-to-speed ratio (CSR) of 0.008, with a total model size of only 30.9 MB. Performance is further validated across multiple hardware platforms and under real competition scenarios in the European Robotics League 2025. The findings highlight the practical impact of lightweight, vision-based mobile manipulation and provide insights into current challenges and future research directions for autonomous robotic applications.</p>
	]]></content:encoded>

	<dc:title>A Complete Grocery Pick-and-Pack Application Using a Computationally Lightweight Vision-Based Mobile Manipulator</dc:title>
			<dc:creator>Thanavin Mansakul</dc:creator>
			<dc:creator>Gilbert Tang</dc:creator>
			<dc:creator>Phil Webb</dc:creator>
			<dc:creator>Jamie Rice</dc:creator>
			<dc:creator>Daniel Oakley</dc:creator>
			<dc:creator>James Fowler</dc:creator>
		<dc:identifier>doi: 10.3390/s26092860</dc:identifier>
	<dc:source>Sensors</dc:source>
	<dc:date>2026-05-03</dc:date>

	<prism:publicationName>Sensors</prism:publicationName>
	<prism:publicationDate>2026-05-03</prism:publicationDate>
	<prism:volume>26</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>2860</prism:startingPage>
		<prism:doi>10.3390/s26092860</prism:doi>
	<prism:url>https://www.mdpi.com/1424-8220/26/9/2860</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1424-8220/26/9/2859">

	<title>Sensors, Vol. 26, Pages 2859: UDC-SNN: An Uncertainty-Aware Dynamic Cascading Framework with Spiking Neural Network for Balancing Performance and Energy in Multimodal Emotion Recognition</title>
	<link>https://www.mdpi.com/1424-8220/26/9/2859</link>
	<description>The aim of this study is to propose an uncertainty-aware dynamic cascading framework based on spiking neural network (UDC-SNN) for multimodal emotion recognition, particularly to address the inherent trade-off between recognition performance and energy efficiency. An asymmetric dynamic routing mechanism was proposed to enable demand-driven activation of the high-power electroencephalogram (EEG) branch, coupled with preliminary inference on a low-power electrocardiogram (ECG) branch and uncertainty quantification via Shannon entropy. Meanwhile, a parameter-free log-linear aggregation strategy was developed to transform modality-specific entropy into dynamic Bayesian weights through an exponential decay function, effectively mitigating the negative transfer effects induced by unimodal noise. The UDC-SNN was evaluated on the multimodal affective dataset DREAMER, comprising 23 subjects (170,660 segments). The averaged recognition accuracy and energy consumption across the three dimensions of valence, arousal, and dominance were 90.75% and 4.62 &amp;amp;mu;J, respectively. The obtained results suggest that the proposed framework could potentially achieve a favorable balance between high emotion recognition and low energy consumption, thereby establishing its applicability for real-time monitoring in resource-constrained scenarios.</description>
	<pubDate>2026-05-03</pubDate>

	<content:encoded><![CDATA[
	<p><b>Sensors, Vol. 26, Pages 2859: UDC-SNN: An Uncertainty-Aware Dynamic Cascading Framework with Spiking Neural Network for Balancing Performance and Energy in Multimodal Emotion Recognition</b></p>
	<p>Sensors <a href="https://www.mdpi.com/1424-8220/26/9/2859">doi: 10.3390/s26092859</a></p>
	<p>Authors:
		Guihao Ran
		Shengzhe Li
		Zhiwen Jiang
		Han Zhang
		Xinyuan Long
		Dakun Lai
		</p>
	<p>The aim of this study is to propose an uncertainty-aware dynamic cascading framework based on spiking neural network (UDC-SNN) for multimodal emotion recognition, particularly to address the inherent trade-off between recognition performance and energy efficiency. An asymmetric dynamic routing mechanism was proposed to enable demand-driven activation of the high-power electroencephalogram (EEG) branch, coupled with preliminary inference on a low-power electrocardiogram (ECG) branch and uncertainty quantification via Shannon entropy. Meanwhile, a parameter-free log-linear aggregation strategy was developed to transform modality-specific entropy into dynamic Bayesian weights through an exponential decay function, effectively mitigating the negative transfer effects induced by unimodal noise. The UDC-SNN was evaluated on the multimodal affective dataset DREAMER, comprising 23 subjects (170,660 segments). The averaged recognition accuracy and energy consumption across the three dimensions of valence, arousal, and dominance were 90.75% and 4.62 &amp;amp;mu;J, respectively. The obtained results suggest that the proposed framework could potentially achieve a favorable balance between high emotion recognition and low energy consumption, thereby establishing its applicability for real-time monitoring in resource-constrained scenarios.</p>
	]]></content:encoded>

	<dc:title>UDC-SNN: An Uncertainty-Aware Dynamic Cascading Framework with Spiking Neural Network for Balancing Performance and Energy in Multimodal Emotion Recognition</dc:title>
			<dc:creator>Guihao Ran</dc:creator>
			<dc:creator>Shengzhe Li</dc:creator>
			<dc:creator>Zhiwen Jiang</dc:creator>
			<dc:creator>Han Zhang</dc:creator>
			<dc:creator>Xinyuan Long</dc:creator>
			<dc:creator>Dakun Lai</dc:creator>
		<dc:identifier>doi: 10.3390/s26092859</dc:identifier>
	<dc:source>Sensors</dc:source>
	<dc:date>2026-05-03</dc:date>

	<prism:publicationName>Sensors</prism:publicationName>
	<prism:publicationDate>2026-05-03</prism:publicationDate>
	<prism:volume>26</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>2859</prism:startingPage>
		<prism:doi>10.3390/s26092859</prism:doi>
	<prism:url>https://www.mdpi.com/1424-8220/26/9/2859</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1424-8220/26/9/2858">

	<title>Sensors, Vol. 26, Pages 2858: C-Axis Oriented LiNbO3 Thin Film Grown by Chemical Beam Epitaxy for Surface Acoustic Wave Device Applications</title>
	<link>https://www.mdpi.com/1424-8220/26/9/2858</link>
	<description>High-frequency surface acoustic wave (SAW) devices require piezoelectric thin films combining strong electromechanical coupling, high acoustic velocity, and compatibility with scalable fabrication. Lithium niobate (LiNbO3) is a promising material, but the growth of high-quality thin films remains challenging because of lithium volatility and process-control issues. In this work, chemical beam epitaxy (CBE) was investigated as an alternative route for the deposition of c-axis-oriented LiNbO3 thin films on C-plane sapphire at a relatively low growth temperature of 400 &amp;amp;deg;C. Structural characterization confirmed high crystalline quality, with clear (006) and (0012) XRD reflections and a rocking-curve full width at half maximum of 0.04&amp;amp;deg;. To evaluate acoustic performance, a SAW delay line and a one-port resonator were fabricated on 350 nm thick films using e-beam lithography. The devices operated in the 1&amp;amp;ndash;3 GHz range and exhibited electromechanical coupling factors of about 0.3% for the Rayleigh mode at 1.7 GHz and 3% for the Sezawa mode at 2.75 GHz. Propagation velocities ranged from 5094 to 8250 m/s, and the Rayleigh-mode resonator quality factor reached about 500. These results demonstrate the feasibility of CBE-grown LiNbO3 films for SAW device applications.</description>
	<pubDate>2026-05-02</pubDate>

	<content:encoded><![CDATA[
	<p><b>Sensors, Vol. 26, Pages 2858: C-Axis Oriented LiNbO3 Thin Film Grown by Chemical Beam Epitaxy for Surface Acoustic Wave Device Applications</b></p>
	<p>Sensors <a href="https://www.mdpi.com/1424-8220/26/9/2858">doi: 10.3390/s26092858</a></p>
	<p>Authors:
		Nikolay Smagin
		Thanh Ngoc Kim Bui
		Zakariae Oumekloul
		Rahma Moalla
		William Maudez
		Estelle Wagner
		Marc Duquennoy
		Rayen Kalai Mathlouthi
		Yves Deblock
		Hatem Dahmani
		Denis Remiens
		Julien Carlier
		Giacomo Benvenuti
		</p>
	<p>High-frequency surface acoustic wave (SAW) devices require piezoelectric thin films combining strong electromechanical coupling, high acoustic velocity, and compatibility with scalable fabrication. Lithium niobate (LiNbO3) is a promising material, but the growth of high-quality thin films remains challenging because of lithium volatility and process-control issues. In this work, chemical beam epitaxy (CBE) was investigated as an alternative route for the deposition of c-axis-oriented LiNbO3 thin films on C-plane sapphire at a relatively low growth temperature of 400 &amp;amp;deg;C. Structural characterization confirmed high crystalline quality, with clear (006) and (0012) XRD reflections and a rocking-curve full width at half maximum of 0.04&amp;amp;deg;. To evaluate acoustic performance, a SAW delay line and a one-port resonator were fabricated on 350 nm thick films using e-beam lithography. The devices operated in the 1&amp;amp;ndash;3 GHz range and exhibited electromechanical coupling factors of about 0.3% for the Rayleigh mode at 1.7 GHz and 3% for the Sezawa mode at 2.75 GHz. Propagation velocities ranged from 5094 to 8250 m/s, and the Rayleigh-mode resonator quality factor reached about 500. These results demonstrate the feasibility of CBE-grown LiNbO3 films for SAW device applications.</p>
	]]></content:encoded>

	<dc:title>C-Axis Oriented LiNbO3 Thin Film Grown by Chemical Beam Epitaxy for Surface Acoustic Wave Device Applications</dc:title>
			<dc:creator>Nikolay Smagin</dc:creator>
			<dc:creator>Thanh Ngoc Kim Bui</dc:creator>
			<dc:creator>Zakariae Oumekloul</dc:creator>
			<dc:creator>Rahma Moalla</dc:creator>
			<dc:creator>William Maudez</dc:creator>
			<dc:creator>Estelle Wagner</dc:creator>
			<dc:creator>Marc Duquennoy</dc:creator>
			<dc:creator>Rayen Kalai Mathlouthi</dc:creator>
			<dc:creator>Yves Deblock</dc:creator>
			<dc:creator>Hatem Dahmani</dc:creator>
			<dc:creator>Denis Remiens</dc:creator>
			<dc:creator>Julien Carlier</dc:creator>
			<dc:creator>Giacomo Benvenuti</dc:creator>
		<dc:identifier>doi: 10.3390/s26092858</dc:identifier>
	<dc:source>Sensors</dc:source>
	<dc:date>2026-05-02</dc:date>

	<prism:publicationName>Sensors</prism:publicationName>
	<prism:publicationDate>2026-05-02</prism:publicationDate>
	<prism:volume>26</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>2858</prism:startingPage>
		<prism:doi>10.3390/s26092858</prism:doi>
	<prism:url>https://www.mdpi.com/1424-8220/26/9/2858</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1424-8220/26/9/2857">

	<title>Sensors, Vol. 26, Pages 2857: Unobtrusive Human Activity Recognition Using Multivariate Indoor Air Quality Sensing and Hierarchical Event Detection</title>
	<link>https://www.mdpi.com/1424-8220/26/9/2857</link>
	<description>Recent studies have shown that common household activities produce characteristic patterns in indoor air pollutants, enabling activity inference using environmental measurements alone. However, pollutant-based approaches are usually formulated as flat multi-class classification problems, even though indoor environments are dominated by long baseline periods with no emission-generating activity, leading to false alarms and unstable predictions. This work proposes a gated hierarchical inference framework for recognizing activities from indoor air quality data. A first-stage gate detects whether a time window contains activity-induced pollutant dynamics, while a second-stage classifier conditionally identifies the specific activity only when activity relevance is detected. Multivariate time-series measurements of particulate matter, volatile organic compounds, nitrogen oxides, carbon dioxide, temperature and relative humidity were collected using a portable monitoring system during controlled household cooking and cleaning experiments. Temporal windows were processed using recurrent neural network models in both stages. By separating activity detection from activity identification, the proposed method aligns inference with the physical generation of indoor pollutant signals and improves robustness in baseline-dominated monitoring scenarios while maintaining reliable discrimination among activities. The framework supports unobtrusive activity recognition and enables applications in exposure-aware monitoring and intelligent indoor environmental management.</description>
	<pubDate>2026-05-02</pubDate>

	<content:encoded><![CDATA[
	<p><b>Sensors, Vol. 26, Pages 2857: Unobtrusive Human Activity Recognition Using Multivariate Indoor Air Quality Sensing and Hierarchical Event Detection</b></p>
	<p>Sensors <a href="https://www.mdpi.com/1424-8220/26/9/2857">doi: 10.3390/s26092857</a></p>
	<p>Authors:
		Grigoriοs Protopsaltis
		Christos Mountzouris
		Gerasimos Theodorou
		John Gialelis
		</p>
	<p>Recent studies have shown that common household activities produce characteristic patterns in indoor air pollutants, enabling activity inference using environmental measurements alone. However, pollutant-based approaches are usually formulated as flat multi-class classification problems, even though indoor environments are dominated by long baseline periods with no emission-generating activity, leading to false alarms and unstable predictions. This work proposes a gated hierarchical inference framework for recognizing activities from indoor air quality data. A first-stage gate detects whether a time window contains activity-induced pollutant dynamics, while a second-stage classifier conditionally identifies the specific activity only when activity relevance is detected. Multivariate time-series measurements of particulate matter, volatile organic compounds, nitrogen oxides, carbon dioxide, temperature and relative humidity were collected using a portable monitoring system during controlled household cooking and cleaning experiments. Temporal windows were processed using recurrent neural network models in both stages. By separating activity detection from activity identification, the proposed method aligns inference with the physical generation of indoor pollutant signals and improves robustness in baseline-dominated monitoring scenarios while maintaining reliable discrimination among activities. The framework supports unobtrusive activity recognition and enables applications in exposure-aware monitoring and intelligent indoor environmental management.</p>
	]]></content:encoded>

	<dc:title>Unobtrusive Human Activity Recognition Using Multivariate Indoor Air Quality Sensing and Hierarchical Event Detection</dc:title>
			<dc:creator>Grigoriοs Protopsaltis</dc:creator>
			<dc:creator>Christos Mountzouris</dc:creator>
			<dc:creator>Gerasimos Theodorou</dc:creator>
			<dc:creator>John Gialelis</dc:creator>
		<dc:identifier>doi: 10.3390/s26092857</dc:identifier>
	<dc:source>Sensors</dc:source>
	<dc:date>2026-05-02</dc:date>

	<prism:publicationName>Sensors</prism:publicationName>
	<prism:publicationDate>2026-05-02</prism:publicationDate>
	<prism:volume>26</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>2857</prism:startingPage>
		<prism:doi>10.3390/s26092857</prism:doi>
	<prism:url>https://www.mdpi.com/1424-8220/26/9/2857</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1424-8220/26/9/2856">

	<title>Sensors, Vol. 26, Pages 2856: Hydraulic Seal Wear Classification by Fine-Tuning a Transformer-Based Audio Model Using Acoustic Emission</title>
	<link>https://www.mdpi.com/1424-8220/26/9/2856</link>
	<description>Accurate classification of seal wear is essential for condition-based and predictive maintenance of hydraulic cylinders, where seal degradation can cause fluid leakage and impair normal system operation. This study investigates the adaptation of a Transformer-based audio model for classifying seal wear conditions using acoustic emission (AE) signals. Specifically, we adapt the Audio Spectrogram Transformer (AST), a convolution-free, purely attention-based model that operates directly on audio spectrograms. The Transformer architecture enables the modeling of long-range dependencies, while the model learns discriminative representations directly from AE data without relying on manually engineered features. A selective fine-tuning strategy was implemented by adding layer-freezing functionality to the AST training pipeline, enabling different freezing configurations during fine-tuning. This allowed earlier pretrained representations to be preserved while adapting the later layers to the target AE signals, thereby reducing the risk of overfitting in the small-data setting. In addition, validation-driven early stopping was implemented to further improve generalization during fine-tuning. The model was initialized with ImageNet and AudioSet pretrained weights to exploit general-purpose representations learned from large-scale datasets. The AE data were acquired under varying pressure conditions on a hydraulic test rig designed to simulate hydraulic cylinder leakage. The datasets were partitioned into fine-tuning, validation, and evaluation subsets and labeled into three wear states: unworn, semi-worn, and worn. In addition, data augmentation techniques were applied to the fine-tuning data to increase diversity and mitigate class imbalance. The adapted model achieved 97.92% classification accuracy across all wear conditions and pressure settings, demonstrating its ability to learn discriminative wear-related patterns directly from AE data. Furthermore, the framework&amp;amp;rsquo;s versatility was further assessed on a bearing strip dataset acquired from the same hydraulic test rig. Using the same fine-tuning configuration, the model achieved 95.65% accuracy and 100% recall for the worn state. These findings highlight the potential of transformer-based architectures for data-efficient, end-to-end AE-based diagnostics across hydraulic system components.</description>
	<pubDate>2026-05-02</pubDate>

	<content:encoded><![CDATA[
	<p><b>Sensors, Vol. 26, Pages 2856: Hydraulic Seal Wear Classification by Fine-Tuning a Transformer-Based Audio Model Using Acoustic Emission</b></p>
	<p>Sensors <a href="https://www.mdpi.com/1424-8220/26/9/2856">doi: 10.3390/s26092856</a></p>
	<p>Authors:
		Lisa Maria Svendsen
		Vignesh V. Shanbhag
		Rune Schlanbusch
		</p>
	<p>Accurate classification of seal wear is essential for condition-based and predictive maintenance of hydraulic cylinders, where seal degradation can cause fluid leakage and impair normal system operation. This study investigates the adaptation of a Transformer-based audio model for classifying seal wear conditions using acoustic emission (AE) signals. Specifically, we adapt the Audio Spectrogram Transformer (AST), a convolution-free, purely attention-based model that operates directly on audio spectrograms. The Transformer architecture enables the modeling of long-range dependencies, while the model learns discriminative representations directly from AE data without relying on manually engineered features. A selective fine-tuning strategy was implemented by adding layer-freezing functionality to the AST training pipeline, enabling different freezing configurations during fine-tuning. This allowed earlier pretrained representations to be preserved while adapting the later layers to the target AE signals, thereby reducing the risk of overfitting in the small-data setting. In addition, validation-driven early stopping was implemented to further improve generalization during fine-tuning. The model was initialized with ImageNet and AudioSet pretrained weights to exploit general-purpose representations learned from large-scale datasets. The AE data were acquired under varying pressure conditions on a hydraulic test rig designed to simulate hydraulic cylinder leakage. The datasets were partitioned into fine-tuning, validation, and evaluation subsets and labeled into three wear states: unworn, semi-worn, and worn. In addition, data augmentation techniques were applied to the fine-tuning data to increase diversity and mitigate class imbalance. The adapted model achieved 97.92% classification accuracy across all wear conditions and pressure settings, demonstrating its ability to learn discriminative wear-related patterns directly from AE data. Furthermore, the framework&amp;amp;rsquo;s versatility was further assessed on a bearing strip dataset acquired from the same hydraulic test rig. Using the same fine-tuning configuration, the model achieved 95.65% accuracy and 100% recall for the worn state. These findings highlight the potential of transformer-based architectures for data-efficient, end-to-end AE-based diagnostics across hydraulic system components.</p>
	]]></content:encoded>

	<dc:title>Hydraulic Seal Wear Classification by Fine-Tuning a Transformer-Based Audio Model Using Acoustic Emission</dc:title>
			<dc:creator>Lisa Maria Svendsen</dc:creator>
			<dc:creator>Vignesh V. Shanbhag</dc:creator>
			<dc:creator>Rune Schlanbusch</dc:creator>
		<dc:identifier>doi: 10.3390/s26092856</dc:identifier>
	<dc:source>Sensors</dc:source>
	<dc:date>2026-05-02</dc:date>

	<prism:publicationName>Sensors</prism:publicationName>
	<prism:publicationDate>2026-05-02</prism:publicationDate>
	<prism:volume>26</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>2856</prism:startingPage>
		<prism:doi>10.3390/s26092856</prism:doi>
	<prism:url>https://www.mdpi.com/1424-8220/26/9/2856</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1424-8220/26/9/2854">

	<title>Sensors, Vol. 26, Pages 2854: A Federated Approach for Adaptive Urban Sound Classification on TinyML Edge Devices</title>
	<link>https://www.mdpi.com/1424-8220/26/9/2854</link>
	<description>Cities exhibit sound patterns that vary across locations and time, while transmitting raw audio introduces communication and privacy concerns. We present a federated TinyML architecture for real-time urban sound classification on microcontroller-class edge devices. A compact audio embedding network is deployed as a frozen feature extractor, while a lightweight classifier head is trained on-device and shared via MQTT, enabling communication-efficient collaborative learning. The system is evaluated on ESP32 (Espressif Systems, Shanghai, China) hardware under cross-dataset transfer from UrbanSound8K to SONYC. Domain shift reduces baseline accuracy from 90.39% to 78.27%, while local adaptation and federated aggregation improve accuracy to approximately 85%, recovering most of the performance loss. Repeated aggregation further improves macro-F1 and class balance across heterogeneous data. Embedded measurements confirm real-time inference (~250 ms per window) with negligible overhead, while each update exchanges only a compact classifier head (~1.2 kB). These results demonstrate that adaptive classification can be achieved on resource-constrained nodes in distributed smart-city networks.</description>
	<pubDate>2026-05-02</pubDate>

	<content:encoded><![CDATA[
	<p><b>Sensors, Vol. 26, Pages 2854: A Federated Approach for Adaptive Urban Sound Classification on TinyML Edge Devices</b></p>
	<p>Sensors <a href="https://www.mdpi.com/1424-8220/26/9/2854">doi: 10.3390/s26092854</a></p>
	<p>Authors:
		Athanasios Trigkas
		Dimitrios Piromalis
		Panagiotis Papageorgas
		</p>
	<p>Cities exhibit sound patterns that vary across locations and time, while transmitting raw audio introduces communication and privacy concerns. We present a federated TinyML architecture for real-time urban sound classification on microcontroller-class edge devices. A compact audio embedding network is deployed as a frozen feature extractor, while a lightweight classifier head is trained on-device and shared via MQTT, enabling communication-efficient collaborative learning. The system is evaluated on ESP32 (Espressif Systems, Shanghai, China) hardware under cross-dataset transfer from UrbanSound8K to SONYC. Domain shift reduces baseline accuracy from 90.39% to 78.27%, while local adaptation and federated aggregation improve accuracy to approximately 85%, recovering most of the performance loss. Repeated aggregation further improves macro-F1 and class balance across heterogeneous data. Embedded measurements confirm real-time inference (~250 ms per window) with negligible overhead, while each update exchanges only a compact classifier head (~1.2 kB). These results demonstrate that adaptive classification can be achieved on resource-constrained nodes in distributed smart-city networks.</p>
	]]></content:encoded>

	<dc:title>A Federated Approach for Adaptive Urban Sound Classification on TinyML Edge Devices</dc:title>
			<dc:creator>Athanasios Trigkas</dc:creator>
			<dc:creator>Dimitrios Piromalis</dc:creator>
			<dc:creator>Panagiotis Papageorgas</dc:creator>
		<dc:identifier>doi: 10.3390/s26092854</dc:identifier>
	<dc:source>Sensors</dc:source>
	<dc:date>2026-05-02</dc:date>

	<prism:publicationName>Sensors</prism:publicationName>
	<prism:publicationDate>2026-05-02</prism:publicationDate>
	<prism:volume>26</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>2854</prism:startingPage>
		<prism:doi>10.3390/s26092854</prism:doi>
	<prism:url>https://www.mdpi.com/1424-8220/26/9/2854</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1424-8220/26/9/2853">

	<title>Sensors, Vol. 26, Pages 2853: Towards Secure Embodied Communication Management in AI Era: Reputation-Guided Agent Message Exchange</title>
	<link>https://www.mdpi.com/1424-8220/26/9/2853</link>
	<description>For large-scale embedded sensor-actuator networks, such as robotic swarms deployed over vast areas and other embedded intelligent devices, end-to-end message exchange is often impossible due to their limited communication range, power constraints, and device mobility. Devices, thus, rely on multi-hop relaying, exposing them to Man-in-the-Middle (MitM) attacks where compromised relays tamper with, forge, or inject false messages. The existing countermeasures, including end-to-end encryption or Byzantine consensus, involve high overhead while requiring global coordination and, thus, renders them impractical for time-sensitive message exchange in embedded intelligence. Security management on communication among embodied devices is highly desired. To address this challenge, we propose Reputation-Guided Dynamic Relay Selection (RDRS), a lightweight, distributed countermeasure against MitM attacks that leverages interactive feedback to evaluate reputation of embedded devices. Specifically, each device maintains reputation scores updated via recent interaction success rates with decay factors to counter dynamic adversaries. During exchanging messages, embedded devices select next-hop neighbors weighted by reputation scores, effectively bypassing malicious devices without explicit detection or in-path verification. Comprehensive simulations in embedded sensor-actuator networks demonstrate that RDRS reduces tampering success rate (TSR) by 80&amp;amp;ndash;95% compared to the baselines, martians request satisfaction rate (RSR) above 79% even at 40% malicious nodes, and achieves lower delay 64% with comparable overhead.</description>
	<pubDate>2026-05-02</pubDate>

	<content:encoded><![CDATA[
	<p><b>Sensors, Vol. 26, Pages 2853: Towards Secure Embodied Communication Management in AI Era: Reputation-Guided Agent Message Exchange</b></p>
	<p>Sensors <a href="https://www.mdpi.com/1424-8220/26/9/2853">doi: 10.3390/s26092853</a></p>
	<p>Authors:
		Jiangtao Mu
		Li Wan
		Zehui Dong
		Yong Wei
		Zhiwei Xu
		</p>
	<p>For large-scale embedded sensor-actuator networks, such as robotic swarms deployed over vast areas and other embedded intelligent devices, end-to-end message exchange is often impossible due to their limited communication range, power constraints, and device mobility. Devices, thus, rely on multi-hop relaying, exposing them to Man-in-the-Middle (MitM) attacks where compromised relays tamper with, forge, or inject false messages. The existing countermeasures, including end-to-end encryption or Byzantine consensus, involve high overhead while requiring global coordination and, thus, renders them impractical for time-sensitive message exchange in embedded intelligence. Security management on communication among embodied devices is highly desired. To address this challenge, we propose Reputation-Guided Dynamic Relay Selection (RDRS), a lightweight, distributed countermeasure against MitM attacks that leverages interactive feedback to evaluate reputation of embedded devices. Specifically, each device maintains reputation scores updated via recent interaction success rates with decay factors to counter dynamic adversaries. During exchanging messages, embedded devices select next-hop neighbors weighted by reputation scores, effectively bypassing malicious devices without explicit detection or in-path verification. Comprehensive simulations in embedded sensor-actuator networks demonstrate that RDRS reduces tampering success rate (TSR) by 80&amp;amp;ndash;95% compared to the baselines, martians request satisfaction rate (RSR) above 79% even at 40% malicious nodes, and achieves lower delay 64% with comparable overhead.</p>
	]]></content:encoded>

	<dc:title>Towards Secure Embodied Communication Management in AI Era: Reputation-Guided Agent Message Exchange</dc:title>
			<dc:creator>Jiangtao Mu</dc:creator>
			<dc:creator>Li Wan</dc:creator>
			<dc:creator>Zehui Dong</dc:creator>
			<dc:creator>Yong Wei</dc:creator>
			<dc:creator>Zhiwei Xu</dc:creator>
		<dc:identifier>doi: 10.3390/s26092853</dc:identifier>
	<dc:source>Sensors</dc:source>
	<dc:date>2026-05-02</dc:date>

	<prism:publicationName>Sensors</prism:publicationName>
	<prism:publicationDate>2026-05-02</prism:publicationDate>
	<prism:volume>26</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>2853</prism:startingPage>
		<prism:doi>10.3390/s26092853</prism:doi>
	<prism:url>https://www.mdpi.com/1424-8220/26/9/2853</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1424-8220/26/9/2855">

	<title>Sensors, Vol. 26, Pages 2855: LPFG Biosensor for IL-6 Detection in Murine Serum Samples Associated with Ischemic Disease</title>
	<link>https://www.mdpi.com/1424-8220/26/9/2855</link>
	<description>Nowadays, optical fiber-based biosensors are widely used in various fields, particularly in medical diagnostics and the selection of appropriate treatments for certain diseases. One example is cerebral ischemic disease, where many biomarkers are released and provide valuable information about the severity and progression of the disease. In this study, a long-period fiber grating (LPFG) biosensor was developed using a standard single-mode optical fiber and monoclonal antibody (IL-6 mAb) as the biological recognition element to detect IL-6, which is a protein associated with the inflammatory process. The assembly of the LPFG biosensor was characterized through optical and electronic microscopy to observe morphological changes at different stages of fabrication and the detection process. Additionally, micro-infrared spectroscopy was employed to identify functional groups in the protein region linked to the presence of IL-6. Experimental data were analyzed using principal component analysis, confirming the biosensor&amp;amp;rsquo;s ability to detect IL-6 and providing insights into its fabrication process.</description>
	<pubDate>2026-05-02</pubDate>

	<content:encoded><![CDATA[
	<p><b>Sensors, Vol. 26, Pages 2855: LPFG Biosensor for IL-6 Detection in Murine Serum Samples Associated with Ischemic Disease</b></p>
	<p>Sensors <a href="https://www.mdpi.com/1424-8220/26/9/2855">doi: 10.3390/s26092855</a></p>
	<p>Authors:
		Brenda Vertti-Cervantes
		Karina González-León
		Marcos García-Juárez
		Georgina Beltrán-Pérez
		Omar Montes-Narváez
		Valentín López-Gayou
		Oscar González-Flores
		Hugo Martínez-Gutiérrez
		Raúl Jacobo Delgado Macuil
		</p>
	<p>Nowadays, optical fiber-based biosensors are widely used in various fields, particularly in medical diagnostics and the selection of appropriate treatments for certain diseases. One example is cerebral ischemic disease, where many biomarkers are released and provide valuable information about the severity and progression of the disease. In this study, a long-period fiber grating (LPFG) biosensor was developed using a standard single-mode optical fiber and monoclonal antibody (IL-6 mAb) as the biological recognition element to detect IL-6, which is a protein associated with the inflammatory process. The assembly of the LPFG biosensor was characterized through optical and electronic microscopy to observe morphological changes at different stages of fabrication and the detection process. Additionally, micro-infrared spectroscopy was employed to identify functional groups in the protein region linked to the presence of IL-6. Experimental data were analyzed using principal component analysis, confirming the biosensor&amp;amp;rsquo;s ability to detect IL-6 and providing insights into its fabrication process.</p>
	]]></content:encoded>

	<dc:title>LPFG Biosensor for IL-6 Detection in Murine Serum Samples Associated with Ischemic Disease</dc:title>
			<dc:creator>Brenda Vertti-Cervantes</dc:creator>
			<dc:creator>Karina González-León</dc:creator>
			<dc:creator>Marcos García-Juárez</dc:creator>
			<dc:creator>Georgina Beltrán-Pérez</dc:creator>
			<dc:creator>Omar Montes-Narváez</dc:creator>
			<dc:creator>Valentín López-Gayou</dc:creator>
			<dc:creator>Oscar González-Flores</dc:creator>
			<dc:creator>Hugo Martínez-Gutiérrez</dc:creator>
			<dc:creator>Raúl Jacobo Delgado Macuil</dc:creator>
		<dc:identifier>doi: 10.3390/s26092855</dc:identifier>
	<dc:source>Sensors</dc:source>
	<dc:date>2026-05-02</dc:date>

	<prism:publicationName>Sensors</prism:publicationName>
	<prism:publicationDate>2026-05-02</prism:publicationDate>
	<prism:volume>26</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>2855</prism:startingPage>
		<prism:doi>10.3390/s26092855</prism:doi>
	<prism:url>https://www.mdpi.com/1424-8220/26/9/2855</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1424-8220/26/9/2852">

	<title>Sensors, Vol. 26, Pages 2852: Reinforcement Learning-Driven Autonomous Path Planning for Unmanned Surface Vehicles: Current Status, Challenges, and Future Prospects</title>
	<link>https://www.mdpi.com/1424-8220/26/9/2852</link>
	<description>The continuous advancement of autonomy and intelligence in marine shipping has made the safe and efficient navigation of unmanned surface vehicles in complex waters a major research focus. As a key link of the autonomous decision-making system for unmanned surface vehicles (USVs), local path planning needs to achieve real-time collision avoidance and motion optimization under dynamic obstacles, multiple rule constraints, and strong environmental uncertainty. In recent years, reinforcement learning has gradually become an important technical route for local path planning of USVs by virtue of its autonomous decision-making ability in high-dimensional continuous state space and adaptability to complex nonlinear problems. Combined with the evolution of the algorithm paradigm and its functional positioning in different water scenarios, this paper systematically reviews the relevant literature by examining the evolution of algorithmic paradigms; focuses on summarizing deep Q-network (DQN), Proximal Policy Optimization (PPO), Soft Actor-Critic (SAC), and Twin Delayed Deep Deterministic Policy Gradient (TD3), along with the collaborative architectures integrated with traditional planning methods such as A* and Rapidly-exploring Random Tree (RRT); and summarizes the performance characteristics, advantages, and limitations of various methods in typical scenarios. The review shows that the main bottlenecks of current research include insufficient reward mechanism design, low sample utilization efficiency, difficulty in transferring from simulation to real ships, and insufficient safety and trustworthiness verification. This paper looks forward to the future development trends from the two directions of data fusion and security enhancement in order to provide reference for related research.</description>
	<pubDate>2026-05-02</pubDate>

	<content:encoded><![CDATA[
	<p><b>Sensors, Vol. 26, Pages 2852: Reinforcement Learning-Driven Autonomous Path Planning for Unmanned Surface Vehicles: Current Status, Challenges, and Future Prospects</b></p>
	<p>Sensors <a href="https://www.mdpi.com/1424-8220/26/9/2852">doi: 10.3390/s26092852</a></p>
	<p>Authors:
		Zexu Dong
		Jiashu Zheng
		Chenxuan Guo
		Fangming Zhao
		Yijie Chu
		Xiaojun Chen
		</p>
	<p>The continuous advancement of autonomy and intelligence in marine shipping has made the safe and efficient navigation of unmanned surface vehicles in complex waters a major research focus. As a key link of the autonomous decision-making system for unmanned surface vehicles (USVs), local path planning needs to achieve real-time collision avoidance and motion optimization under dynamic obstacles, multiple rule constraints, and strong environmental uncertainty. In recent years, reinforcement learning has gradually become an important technical route for local path planning of USVs by virtue of its autonomous decision-making ability in high-dimensional continuous state space and adaptability to complex nonlinear problems. Combined with the evolution of the algorithm paradigm and its functional positioning in different water scenarios, this paper systematically reviews the relevant literature by examining the evolution of algorithmic paradigms; focuses on summarizing deep Q-network (DQN), Proximal Policy Optimization (PPO), Soft Actor-Critic (SAC), and Twin Delayed Deep Deterministic Policy Gradient (TD3), along with the collaborative architectures integrated with traditional planning methods such as A* and Rapidly-exploring Random Tree (RRT); and summarizes the performance characteristics, advantages, and limitations of various methods in typical scenarios. The review shows that the main bottlenecks of current research include insufficient reward mechanism design, low sample utilization efficiency, difficulty in transferring from simulation to real ships, and insufficient safety and trustworthiness verification. This paper looks forward to the future development trends from the two directions of data fusion and security enhancement in order to provide reference for related research.</p>
	]]></content:encoded>

	<dc:title>Reinforcement Learning-Driven Autonomous Path Planning for Unmanned Surface Vehicles: Current Status, Challenges, and Future Prospects</dc:title>
			<dc:creator>Zexu Dong</dc:creator>
			<dc:creator>Jiashu Zheng</dc:creator>
			<dc:creator>Chenxuan Guo</dc:creator>
			<dc:creator>Fangming Zhao</dc:creator>
			<dc:creator>Yijie Chu</dc:creator>
			<dc:creator>Xiaojun Chen</dc:creator>
		<dc:identifier>doi: 10.3390/s26092852</dc:identifier>
	<dc:source>Sensors</dc:source>
	<dc:date>2026-05-02</dc:date>

	<prism:publicationName>Sensors</prism:publicationName>
	<prism:publicationDate>2026-05-02</prism:publicationDate>
	<prism:volume>26</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Review</prism:section>
	<prism:startingPage>2852</prism:startingPage>
		<prism:doi>10.3390/s26092852</prism:doi>
	<prism:url>https://www.mdpi.com/1424-8220/26/9/2852</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1424-8220/26/9/2846">

	<title>Sensors, Vol. 26, Pages 2846: RTS-SLAM: A Trajectory Consistency-Driven Multi-Constraint Dynamic Feature-Rejection Method for Visual SLAM in Dynamic Environments</title>
	<link>https://www.mdpi.com/1424-8220/26/9/2846</link>
	<description>Simultaneous Localization and Mapping (SLAM) is a fundamental methodology that underpins autonomous navigation in robotic systems. Conventional approaches perform well in static environments but rely on the assumption of environmental rigidity, which leads to significant accuracy degradation in dynamic environments. To address this challenge, this study presents RTS-SLAM, a real-time semantic visual SLAM system designed for dynamic environments. Based on the ORB-SLAM2 framework, a multi-layer, constraint-driven dynamic feature-rejection strategy is introduced. The proposed approach first removes dynamic features by combining semantic information with geometric constraints. Subsequently, residual dynamic points are eliminated via trajectory-consistency constraint analysis, thereby effectively improving localization accuracy. Furthermore, a dense mapping strategy featuring global sparsification and critical region refinement is proposed. By reducing redundancy in the dense point cloud, the method decreases memory usage while preserving important object geometries. Experimental evaluations on the TUM RGB-D and Bonn datasets indicate that RTS-SLAM reduces the average absolute trajectory error by more than 95% compared with ORB-SLAM2 in dynamic environments. Meanwhile, the system maintains real-time performance and achieves high localization accuracy in dynamic environments.</description>
	<pubDate>2026-05-02</pubDate>

	<content:encoded><![CDATA[
	<p><b>Sensors, Vol. 26, Pages 2846: RTS-SLAM: A Trajectory Consistency-Driven Multi-Constraint Dynamic Feature-Rejection Method for Visual SLAM in Dynamic Environments</b></p>
	<p>Sensors <a href="https://www.mdpi.com/1424-8220/26/9/2846">doi: 10.3390/s26092846</a></p>
	<p>Authors:
		Huailiang Wang
		Qiming Hu
		Beicheng Li
		Yuhao Geng
		Chao Su
		Shibo Zhu
		Enhui Zheng
		Weimin Chen
		</p>
	<p>Simultaneous Localization and Mapping (SLAM) is a fundamental methodology that underpins autonomous navigation in robotic systems. Conventional approaches perform well in static environments but rely on the assumption of environmental rigidity, which leads to significant accuracy degradation in dynamic environments. To address this challenge, this study presents RTS-SLAM, a real-time semantic visual SLAM system designed for dynamic environments. Based on the ORB-SLAM2 framework, a multi-layer, constraint-driven dynamic feature-rejection strategy is introduced. The proposed approach first removes dynamic features by combining semantic information with geometric constraints. Subsequently, residual dynamic points are eliminated via trajectory-consistency constraint analysis, thereby effectively improving localization accuracy. Furthermore, a dense mapping strategy featuring global sparsification and critical region refinement is proposed. By reducing redundancy in the dense point cloud, the method decreases memory usage while preserving important object geometries. Experimental evaluations on the TUM RGB-D and Bonn datasets indicate that RTS-SLAM reduces the average absolute trajectory error by more than 95% compared with ORB-SLAM2 in dynamic environments. Meanwhile, the system maintains real-time performance and achieves high localization accuracy in dynamic environments.</p>
	]]></content:encoded>

	<dc:title>RTS-SLAM: A Trajectory Consistency-Driven Multi-Constraint Dynamic Feature-Rejection Method for Visual SLAM in Dynamic Environments</dc:title>
			<dc:creator>Huailiang Wang</dc:creator>
			<dc:creator>Qiming Hu</dc:creator>
			<dc:creator>Beicheng Li</dc:creator>
			<dc:creator>Yuhao Geng</dc:creator>
			<dc:creator>Chao Su</dc:creator>
			<dc:creator>Shibo Zhu</dc:creator>
			<dc:creator>Enhui Zheng</dc:creator>
			<dc:creator>Weimin Chen</dc:creator>
		<dc:identifier>doi: 10.3390/s26092846</dc:identifier>
	<dc:source>Sensors</dc:source>
	<dc:date>2026-05-02</dc:date>

	<prism:publicationName>Sensors</prism:publicationName>
	<prism:publicationDate>2026-05-02</prism:publicationDate>
	<prism:volume>26</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>2846</prism:startingPage>
		<prism:doi>10.3390/s26092846</prism:doi>
	<prism:url>https://www.mdpi.com/1424-8220/26/9/2846</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1424-8220/26/9/2849">

	<title>Sensors, Vol. 26, Pages 2849: Chasing Ghosts: A Simulation-to-Real Olfactory Navigation Stack with Optional Vision Augmentation</title>
	<link>https://www.mdpi.com/1424-8220/26/9/2849</link>
	<description>Autonomous odor source localization remains a challenging problem for aerial robots due to turbulent airflow, sparse and delayed sensory signals, and strict payload and computation constraints. While prior unmanned aerial vehicle (UAV)-based olfaction systems have demonstrated gas distribution mapping or reactive plume tracing, they rely on predefined coverage patterns, external infrastructure, or extensive sensing and coordination. In this work, we present a complete, open-source UAV system for online odor source localization using a minimal sensor suite. The system integrates custom olfaction hardware, onboard sensing, and a learning-based navigation policy that we train in simulation and deploy on a real quadrotor. Through our minimal framework, the UAV is able to navigate directly toward an odor source without constructing an explicit gas distribution map or relying on external positioning systems. We incorporate vision as an optional complementary modality to accelerate navigation under certain conditions. We validate the proposed system through real-world flight experiments in a large indoor environment using an ethanol source, demonstrating consistent source-finding behavior under realistic airflow conditions. The primary contribution of this work is a reproducible system and methodological framework for UAV-based olfactory navigation and source finding under minimal sensing assumptions. We elaborate on our hardware design and open-source our UAV firmware, simulation code, olfaction&amp;amp;ndash;vision dataset, and circuit board to the community.</description>
	<pubDate>2026-05-02</pubDate>

	<content:encoded><![CDATA[
	<p><b>Sensors, Vol. 26, Pages 2849: Chasing Ghosts: A Simulation-to-Real Olfactory Navigation Stack with Optional Vision Augmentation</b></p>
	<p>Sensors <a href="https://www.mdpi.com/1424-8220/26/9/2849">doi: 10.3390/s26092849</a></p>
	<p>Authors:
		Kordel K. France
		Ovidiu Daescu
		Latifur Khan
		Rohith Peddi
		</p>
	<p>Autonomous odor source localization remains a challenging problem for aerial robots due to turbulent airflow, sparse and delayed sensory signals, and strict payload and computation constraints. While prior unmanned aerial vehicle (UAV)-based olfaction systems have demonstrated gas distribution mapping or reactive plume tracing, they rely on predefined coverage patterns, external infrastructure, or extensive sensing and coordination. In this work, we present a complete, open-source UAV system for online odor source localization using a minimal sensor suite. The system integrates custom olfaction hardware, onboard sensing, and a learning-based navigation policy that we train in simulation and deploy on a real quadrotor. Through our minimal framework, the UAV is able to navigate directly toward an odor source without constructing an explicit gas distribution map or relying on external positioning systems. We incorporate vision as an optional complementary modality to accelerate navigation under certain conditions. We validate the proposed system through real-world flight experiments in a large indoor environment using an ethanol source, demonstrating consistent source-finding behavior under realistic airflow conditions. The primary contribution of this work is a reproducible system and methodological framework for UAV-based olfactory navigation and source finding under minimal sensing assumptions. We elaborate on our hardware design and open-source our UAV firmware, simulation code, olfaction&amp;amp;ndash;vision dataset, and circuit board to the community.</p>
	]]></content:encoded>

	<dc:title>Chasing Ghosts: A Simulation-to-Real Olfactory Navigation Stack with Optional Vision Augmentation</dc:title>
			<dc:creator>Kordel K. France</dc:creator>
			<dc:creator>Ovidiu Daescu</dc:creator>
			<dc:creator>Latifur Khan</dc:creator>
			<dc:creator>Rohith Peddi</dc:creator>
		<dc:identifier>doi: 10.3390/s26092849</dc:identifier>
	<dc:source>Sensors</dc:source>
	<dc:date>2026-05-02</dc:date>

	<prism:publicationName>Sensors</prism:publicationName>
	<prism:publicationDate>2026-05-02</prism:publicationDate>
	<prism:volume>26</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>2849</prism:startingPage>
		<prism:doi>10.3390/s26092849</prism:doi>
	<prism:url>https://www.mdpi.com/1424-8220/26/9/2849</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1424-8220/26/9/2850">

	<title>Sensors, Vol. 26, Pages 2850: Joint Transmit&amp;ndash;Receive Weight Optimization for FDA Radar to Balance Active Detection and RF Stealth</title>
	<link>https://www.mdpi.com/1424-8220/26/9/2850</link>
	<description>Existing studies on frequency diverse array (FDA) radar sensing systems have primarily focused on radio-frequency (RF) stealth characteristics with limited attention to the balance between RF stealth and active detection performance. To address this issue, this paper proposes a joint transmit&amp;amp;ndash;receive weight optimization scheme for FDA radar systems to achieve an effective balance between active detection and RF stealth. The resulting optimization problem is non-convex, and a block coordinate descent (BCD)-based alternating optimization method with a carefully designed initialization strategy is developed to solve it efficiently. Simulation results demonstrate that the proposed method achieves improved RF stealth performance while maintaining comparable active detection capability, compared with conventional FDA radar and representative existing optimization-based benchmark methods. These results demonstrate the effectiveness of the proposed method for balancing active detection and RF stealth performance in FDA radar sensing systems.</description>
	<pubDate>2026-05-02</pubDate>

	<content:encoded><![CDATA[
	<p><b>Sensors, Vol. 26, Pages 2850: Joint Transmit&amp;ndash;Receive Weight Optimization for FDA Radar to Balance Active Detection and RF Stealth</b></p>
	<p>Sensors <a href="https://www.mdpi.com/1424-8220/26/9/2850">doi: 10.3390/s26092850</a></p>
	<p>Authors:
		Haoliang Guan
		Shunsheng Zhang
		Wen-Qin Wang
		</p>
	<p>Existing studies on frequency diverse array (FDA) radar sensing systems have primarily focused on radio-frequency (RF) stealth characteristics with limited attention to the balance between RF stealth and active detection performance. To address this issue, this paper proposes a joint transmit&amp;amp;ndash;receive weight optimization scheme for FDA radar systems to achieve an effective balance between active detection and RF stealth. The resulting optimization problem is non-convex, and a block coordinate descent (BCD)-based alternating optimization method with a carefully designed initialization strategy is developed to solve it efficiently. Simulation results demonstrate that the proposed method achieves improved RF stealth performance while maintaining comparable active detection capability, compared with conventional FDA radar and representative existing optimization-based benchmark methods. These results demonstrate the effectiveness of the proposed method for balancing active detection and RF stealth performance in FDA radar sensing systems.</p>
	]]></content:encoded>

	<dc:title>Joint Transmit&amp;amp;ndash;Receive Weight Optimization for FDA Radar to Balance Active Detection and RF Stealth</dc:title>
			<dc:creator>Haoliang Guan</dc:creator>
			<dc:creator>Shunsheng Zhang</dc:creator>
			<dc:creator>Wen-Qin Wang</dc:creator>
		<dc:identifier>doi: 10.3390/s26092850</dc:identifier>
	<dc:source>Sensors</dc:source>
	<dc:date>2026-05-02</dc:date>

	<prism:publicationName>Sensors</prism:publicationName>
	<prism:publicationDate>2026-05-02</prism:publicationDate>
	<prism:volume>26</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Communication</prism:section>
	<prism:startingPage>2850</prism:startingPage>
		<prism:doi>10.3390/s26092850</prism:doi>
	<prism:url>https://www.mdpi.com/1424-8220/26/9/2850</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1424-8220/26/9/2851">

	<title>Sensors, Vol. 26, Pages 2851: Selective Fluorescence Detection of Glyphosate Pesticide Residue Based on Fe3+ Modulated SiQDs Nanosensors</title>
	<link>https://www.mdpi.com/1424-8220/26/9/2851</link>
	<description>In this paper, SiQDs were synthesized using 3-aminopropyltrimethoxysilane, an organosilicon source, via the room temperature stirring method under atmospheric pressure. Based on the &amp;amp;ldquo;Turn-off&amp;amp;rdquo; and &amp;amp;ldquo;Turn-on&amp;amp;rdquo; fluorescence response mechanisms, the SiQDs/Fe3+ fluorescent probe was constructed to quantitatively detect glyphosate according to the interaction between Fe3+ and glyphosate. Subsequently, the impacts of pH, incubation temperature, and reaction time on the detection of glyphosate were systematically investigated. Under the optimized detection parameters, the fluorescent probe exhibited a linear range of 2&amp;amp;ndash;10 &amp;amp;mu;g/mL and a detection limit of 394.74 ng/mL. The constructed fluorescent probe demonstrated outstanding anti-interference performance. It was applied to actual samples of potato and yam, yielding satisfactory detection results with recovery values between 91.69% and 104.53%. These findings provide novel ideas and theoretical support for glyphosate residue detection.</description>
	<pubDate>2026-05-02</pubDate>

	<content:encoded><![CDATA[
	<p><b>Sensors, Vol. 26, Pages 2851: Selective Fluorescence Detection of Glyphosate Pesticide Residue Based on Fe3+ Modulated SiQDs Nanosensors</b></p>
	<p>Sensors <a href="https://www.mdpi.com/1424-8220/26/9/2851">doi: 10.3390/s26092851</a></p>
	<p>Authors:
		Ruonan Li
		Jian Xu
		Fankui Zeng
		</p>
	<p>In this paper, SiQDs were synthesized using 3-aminopropyltrimethoxysilane, an organosilicon source, via the room temperature stirring method under atmospheric pressure. Based on the &amp;amp;ldquo;Turn-off&amp;amp;rdquo; and &amp;amp;ldquo;Turn-on&amp;amp;rdquo; fluorescence response mechanisms, the SiQDs/Fe3+ fluorescent probe was constructed to quantitatively detect glyphosate according to the interaction between Fe3+ and glyphosate. Subsequently, the impacts of pH, incubation temperature, and reaction time on the detection of glyphosate were systematically investigated. Under the optimized detection parameters, the fluorescent probe exhibited a linear range of 2&amp;amp;ndash;10 &amp;amp;mu;g/mL and a detection limit of 394.74 ng/mL. The constructed fluorescent probe demonstrated outstanding anti-interference performance. It was applied to actual samples of potato and yam, yielding satisfactory detection results with recovery values between 91.69% and 104.53%. These findings provide novel ideas and theoretical support for glyphosate residue detection.</p>
	]]></content:encoded>

	<dc:title>Selective Fluorescence Detection of Glyphosate Pesticide Residue Based on Fe3+ Modulated SiQDs Nanosensors</dc:title>
			<dc:creator>Ruonan Li</dc:creator>
			<dc:creator>Jian Xu</dc:creator>
			<dc:creator>Fankui Zeng</dc:creator>
		<dc:identifier>doi: 10.3390/s26092851</dc:identifier>
	<dc:source>Sensors</dc:source>
	<dc:date>2026-05-02</dc:date>

	<prism:publicationName>Sensors</prism:publicationName>
	<prism:publicationDate>2026-05-02</prism:publicationDate>
	<prism:volume>26</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>2851</prism:startingPage>
		<prism:doi>10.3390/s26092851</prism:doi>
	<prism:url>https://www.mdpi.com/1424-8220/26/9/2851</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1424-8220/26/9/2848">

	<title>Sensors, Vol. 26, Pages 2848: Lightweight Detection and Adaptive Path Planning for Selective Hotan Rose Harvesting</title>
	<link>https://www.mdpi.com/1424-8220/26/9/2848</link>
	<description>Selective harvesting of Hotan roses requires distinguishing between buds and blooms for different industrial uses. However, balancing detection accuracy and computational efficiency for edge deployment remains a challenge. This study proposes an integrated framework combining a lightweight detection model, Rose_YOLO, with an adaptive path-planning algorithm, the ROSE algorithm, to address these issues. The Rose_YOLO model optimizes the YOLOv8n architecture by incorporating the C2f-Faster-CGLU module and a Rose_Head detection head to enhance feature extraction while reducing redundancy. The ROSE algorithm integrates an improved genetic algorithm (GA) with a reciprocating search mechanism to dynamically optimize picking sequences based on scene complexity. Experimental results demonstrate that Rose_YOLO achieves a precision of 90.4% and a mAP@0.5 of 96.6% for blooms and a precision of 88.4% with a mAP@0.5 of 91.7% for buds. Compared to the baseline YOLOv8n, the model reduces parameters by 47.46% to 1.579 million, compresses the size to 3.19 MB, and lowers computational complexity to 4.6 GFLOPs. For path planning, the ROSE algorithm generates optimal paths with an average length of 2796.94 pixels, which is 73.1% shorter than the reciprocating algorithm and 51.6% shorter than the standard GA. Furthermore, it achieves an average runtime of only 7.33 ms, significantly outperforming traditional methods with respect to computational speed. In conclusion, the proposed framework achieves a superior balance between lightweight design and detection performance. The successful deployment on edge devices validates its effectiveness in providing real-time visual guidance and efficient path planning, offering a robust technical solution for the automated selective harvesting of roses in complex field environments.</description>
	<pubDate>2026-05-02</pubDate>

	<content:encoded><![CDATA[
	<p><b>Sensors, Vol. 26, Pages 2848: Lightweight Detection and Adaptive Path Planning for Selective Hotan Rose Harvesting</b></p>
	<p>Sensors <a href="https://www.mdpi.com/1424-8220/26/9/2848">doi: 10.3390/s26092848</a></p>
	<p>Authors:
		Jijing Lin
		Yuhang Yang
		Baojian Ma
		Zhenghao Wu
		Bangbang Chen
		</p>
	<p>Selective harvesting of Hotan roses requires distinguishing between buds and blooms for different industrial uses. However, balancing detection accuracy and computational efficiency for edge deployment remains a challenge. This study proposes an integrated framework combining a lightweight detection model, Rose_YOLO, with an adaptive path-planning algorithm, the ROSE algorithm, to address these issues. The Rose_YOLO model optimizes the YOLOv8n architecture by incorporating the C2f-Faster-CGLU module and a Rose_Head detection head to enhance feature extraction while reducing redundancy. The ROSE algorithm integrates an improved genetic algorithm (GA) with a reciprocating search mechanism to dynamically optimize picking sequences based on scene complexity. Experimental results demonstrate that Rose_YOLO achieves a precision of 90.4% and a mAP@0.5 of 96.6% for blooms and a precision of 88.4% with a mAP@0.5 of 91.7% for buds. Compared to the baseline YOLOv8n, the model reduces parameters by 47.46% to 1.579 million, compresses the size to 3.19 MB, and lowers computational complexity to 4.6 GFLOPs. For path planning, the ROSE algorithm generates optimal paths with an average length of 2796.94 pixels, which is 73.1% shorter than the reciprocating algorithm and 51.6% shorter than the standard GA. Furthermore, it achieves an average runtime of only 7.33 ms, significantly outperforming traditional methods with respect to computational speed. In conclusion, the proposed framework achieves a superior balance between lightweight design and detection performance. The successful deployment on edge devices validates its effectiveness in providing real-time visual guidance and efficient path planning, offering a robust technical solution for the automated selective harvesting of roses in complex field environments.</p>
	]]></content:encoded>

	<dc:title>Lightweight Detection and Adaptive Path Planning for Selective Hotan Rose Harvesting</dc:title>
			<dc:creator>Jijing Lin</dc:creator>
			<dc:creator>Yuhang Yang</dc:creator>
			<dc:creator>Baojian Ma</dc:creator>
			<dc:creator>Zhenghao Wu</dc:creator>
			<dc:creator>Bangbang Chen</dc:creator>
		<dc:identifier>doi: 10.3390/s26092848</dc:identifier>
	<dc:source>Sensors</dc:source>
	<dc:date>2026-05-02</dc:date>

	<prism:publicationName>Sensors</prism:publicationName>
	<prism:publicationDate>2026-05-02</prism:publicationDate>
	<prism:volume>26</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>2848</prism:startingPage>
		<prism:doi>10.3390/s26092848</prism:doi>
	<prism:url>https://www.mdpi.com/1424-8220/26/9/2848</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1424-8220/26/9/2847">

	<title>Sensors, Vol. 26, Pages 2847: A Comparative Study of Signal Representations Methods and Deep Learning Architectures for PPG-Based Cuffless Blood Pressure Estimation</title>
	<link>https://www.mdpi.com/1424-8220/26/9/2847</link>
	<description>Hypertension is a major risk factor for cardiovascular disease and requires effective long-term monitoring. Photoplethysmography (PPG), acquired from wearable optical sensors, offers a convenient and non-invasive signal source for cuffless blood pressure (BP) estimation, but existing studies have mainly emphasized model architecture optimization, with limited systematic investigation of signal representation. This study systematically compares seven one-dimensional-to-two-dimensional signal transformation methods and evaluates multiple architectural variants for PPG-based cuffless BP estimation under a unified framework. Experiments were conducted using PPG and arterial BP signals from the UCI Open Blood Pressure Database. The best-performing configuration, based on continuous wavelet transform (CWT), achieved estimation errors of 3.80 &amp;amp;plusmn; 5.02 mmHg for systolic BP and 1.65 &amp;amp;plusmn; 2.70 mmHg for diastolic BP. Further real-world validation on 26 participants using an Omron cuff-based monitor as the reference showed good consistency, with correlation coefficients of R = 0.96 for SBP and R = 0.74 for DBP. The results demonstrate that appropriate signal representation, particularly CWT, plays a critical role in improving estimation accuracy and robustness, and may facilitate the development of wearable cuffless BP monitoring systems.</description>
	<pubDate>2026-05-02</pubDate>

	<content:encoded><![CDATA[
	<p><b>Sensors, Vol. 26, Pages 2847: A Comparative Study of Signal Representations Methods and Deep Learning Architectures for PPG-Based Cuffless Blood Pressure Estimation</b></p>
	<p>Sensors <a href="https://www.mdpi.com/1424-8220/26/9/2847">doi: 10.3390/s26092847</a></p>
	<p>Authors:
		Han Zhang
		Xudong Hu
		Xizhuang Zhang
		Zhencheng Chen
		Yongbo Liang
		Gang Wang
		</p>
	<p>Hypertension is a major risk factor for cardiovascular disease and requires effective long-term monitoring. Photoplethysmography (PPG), acquired from wearable optical sensors, offers a convenient and non-invasive signal source for cuffless blood pressure (BP) estimation, but existing studies have mainly emphasized model architecture optimization, with limited systematic investigation of signal representation. This study systematically compares seven one-dimensional-to-two-dimensional signal transformation methods and evaluates multiple architectural variants for PPG-based cuffless BP estimation under a unified framework. Experiments were conducted using PPG and arterial BP signals from the UCI Open Blood Pressure Database. The best-performing configuration, based on continuous wavelet transform (CWT), achieved estimation errors of 3.80 &amp;amp;plusmn; 5.02 mmHg for systolic BP and 1.65 &amp;amp;plusmn; 2.70 mmHg for diastolic BP. Further real-world validation on 26 participants using an Omron cuff-based monitor as the reference showed good consistency, with correlation coefficients of R = 0.96 for SBP and R = 0.74 for DBP. The results demonstrate that appropriate signal representation, particularly CWT, plays a critical role in improving estimation accuracy and robustness, and may facilitate the development of wearable cuffless BP monitoring systems.</p>
	]]></content:encoded>

	<dc:title>A Comparative Study of Signal Representations Methods and Deep Learning Architectures for PPG-Based Cuffless Blood Pressure Estimation</dc:title>
			<dc:creator>Han Zhang</dc:creator>
			<dc:creator>Xudong Hu</dc:creator>
			<dc:creator>Xizhuang Zhang</dc:creator>
			<dc:creator>Zhencheng Chen</dc:creator>
			<dc:creator>Yongbo Liang</dc:creator>
			<dc:creator>Gang Wang</dc:creator>
		<dc:identifier>doi: 10.3390/s26092847</dc:identifier>
	<dc:source>Sensors</dc:source>
	<dc:date>2026-05-02</dc:date>

	<prism:publicationName>Sensors</prism:publicationName>
	<prism:publicationDate>2026-05-02</prism:publicationDate>
	<prism:volume>26</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>2847</prism:startingPage>
		<prism:doi>10.3390/s26092847</prism:doi>
	<prism:url>https://www.mdpi.com/1424-8220/26/9/2847</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1424-8220/26/9/2845">

	<title>Sensors, Vol. 26, Pages 2845: A Microfluidic Method for Simultaneous Assessment of Blood Viscosity and Red Blood Cell Aggregation During Continuous Syringe Delivery</title>
	<link>https://www.mdpi.com/1424-8220/26/9/2845</link>
	<description>Accurate assessment of blood viscosity and red blood cell (RBC) aggregation under continuous flow is important for hemorheological analysis. However, simultaneous measurement remains challenging because both properties are influenced by flow conditions and RBC sedimentation. In this study, a microfluidic method is developed for the simultaneous measurement of blood viscosity and RBC aggregation index (AI) during continuous blood delivery from a driving syringe. The proposed device consists of a viscosity-sensing channel for viscosity measurement and aggregation-sensing channel for AI evaluation. The effects of flow rate, hematocrit, suspension medium, and syringe on&amp;amp;ndash;off operation are systematically investigated. Blood viscosity and AI are strongly affected by these factors, and transient flow interruption enhances RBC sedimentation in the syringe, thereby altering hemorheological properties. The proposed method is further used to evaluate thermally exposed RBCs, which reduce RBC aggregation and suppress RBC sedimentation when compared with control blood. At higher exposure temperatures and longer exposure times, blood viscosity and AI remain nearly constant over time, indicating minimal contribution of damaged RBCs to RBC sedimentation. These results demonstrate that the proposed method enables reliable simultaneous evaluation of blood viscosity and RBC aggregation and could be regarded as useful for detecting functional alterations of RBCs under continuous-flow conditions.</description>
	<pubDate>2026-05-02</pubDate>

	<content:encoded><![CDATA[
	<p><b>Sensors, Vol. 26, Pages 2845: A Microfluidic Method for Simultaneous Assessment of Blood Viscosity and Red Blood Cell Aggregation During Continuous Syringe Delivery</b></p>
	<p>Sensors <a href="https://www.mdpi.com/1424-8220/26/9/2845">doi: 10.3390/s26092845</a></p>
	<p>Authors:
		Yang Jun Kang
		</p>
	<p>Accurate assessment of blood viscosity and red blood cell (RBC) aggregation under continuous flow is important for hemorheological analysis. However, simultaneous measurement remains challenging because both properties are influenced by flow conditions and RBC sedimentation. In this study, a microfluidic method is developed for the simultaneous measurement of blood viscosity and RBC aggregation index (AI) during continuous blood delivery from a driving syringe. The proposed device consists of a viscosity-sensing channel for viscosity measurement and aggregation-sensing channel for AI evaluation. The effects of flow rate, hematocrit, suspension medium, and syringe on&amp;amp;ndash;off operation are systematically investigated. Blood viscosity and AI are strongly affected by these factors, and transient flow interruption enhances RBC sedimentation in the syringe, thereby altering hemorheological properties. The proposed method is further used to evaluate thermally exposed RBCs, which reduce RBC aggregation and suppress RBC sedimentation when compared with control blood. At higher exposure temperatures and longer exposure times, blood viscosity and AI remain nearly constant over time, indicating minimal contribution of damaged RBCs to RBC sedimentation. These results demonstrate that the proposed method enables reliable simultaneous evaluation of blood viscosity and RBC aggregation and could be regarded as useful for detecting functional alterations of RBCs under continuous-flow conditions.</p>
	]]></content:encoded>

	<dc:title>A Microfluidic Method for Simultaneous Assessment of Blood Viscosity and Red Blood Cell Aggregation During Continuous Syringe Delivery</dc:title>
			<dc:creator>Yang Jun Kang</dc:creator>
		<dc:identifier>doi: 10.3390/s26092845</dc:identifier>
	<dc:source>Sensors</dc:source>
	<dc:date>2026-05-02</dc:date>

	<prism:publicationName>Sensors</prism:publicationName>
	<prism:publicationDate>2026-05-02</prism:publicationDate>
	<prism:volume>26</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>2845</prism:startingPage>
		<prism:doi>10.3390/s26092845</prism:doi>
	<prism:url>https://www.mdpi.com/1424-8220/26/9/2845</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1424-8220/26/9/2844">

	<title>Sensors, Vol. 26, Pages 2844: FPN-Based Faster R-CNN for Fiber Distributed Acoustic Sensing Intrusion Detection in High-Speed Railway</title>
	<link>https://www.mdpi.com/1424-8220/26/9/2844</link>
	<description>With the rapid development of railway and intelligent transportation systems, the construction of security systems along high-speed railways has attracted more and more attention. In this paper, we propose a fiber distributed acoustic sensing (DAS) intrusion detection system to detect and identify the intrusion events that threaten the operational safety of high-speed railways. Firstly, we use the DAS system to collect the optical fiber signals around the high-speed railway. Then we design a window to slide the optical fiber signals along the time axis to form the intensity images with the spatio-temporal signal features. After that, we propose a novel framework that integrates the feature pyramid network (FPN) and the Faster R-CNN to extract the features from the fiber signal intensity images to improve the detection rate and recognition rate of the system for high-speed railway intrusion events. Experimental results indicate that the system can identify five kinds of intrusion events. The average detection accuracy can reach 95.51%, and the F1 score of each intrusion event is above 93% on the real dataset. In addition, the system can identify the background noise interference generated by passing trains, and the detection accuracy is 95%, which can significantly reduce the false alarm rate.</description>
	<pubDate>2026-05-02</pubDate>

	<content:encoded><![CDATA[
	<p><b>Sensors, Vol. 26, Pages 2844: FPN-Based Faster R-CNN for Fiber Distributed Acoustic Sensing Intrusion Detection in High-Speed Railway</b></p>
	<p>Sensors <a href="https://www.mdpi.com/1424-8220/26/9/2844">doi: 10.3390/s26092844</a></p>
	<p>Authors:
		Zhiguang Lei
		Zezheng Dong
		Hao Xu
		Xiao Xiao
		Xin’an Qiu
		</p>
	<p>With the rapid development of railway and intelligent transportation systems, the construction of security systems along high-speed railways has attracted more and more attention. In this paper, we propose a fiber distributed acoustic sensing (DAS) intrusion detection system to detect and identify the intrusion events that threaten the operational safety of high-speed railways. Firstly, we use the DAS system to collect the optical fiber signals around the high-speed railway. Then we design a window to slide the optical fiber signals along the time axis to form the intensity images with the spatio-temporal signal features. After that, we propose a novel framework that integrates the feature pyramid network (FPN) and the Faster R-CNN to extract the features from the fiber signal intensity images to improve the detection rate and recognition rate of the system for high-speed railway intrusion events. Experimental results indicate that the system can identify five kinds of intrusion events. The average detection accuracy can reach 95.51%, and the F1 score of each intrusion event is above 93% on the real dataset. In addition, the system can identify the background noise interference generated by passing trains, and the detection accuracy is 95%, which can significantly reduce the false alarm rate.</p>
	]]></content:encoded>

	<dc:title>FPN-Based Faster R-CNN for Fiber Distributed Acoustic Sensing Intrusion Detection in High-Speed Railway</dc:title>
			<dc:creator>Zhiguang Lei</dc:creator>
			<dc:creator>Zezheng Dong</dc:creator>
			<dc:creator>Hao Xu</dc:creator>
			<dc:creator>Xiao Xiao</dc:creator>
			<dc:creator>Xin’an Qiu</dc:creator>
		<dc:identifier>doi: 10.3390/s26092844</dc:identifier>
	<dc:source>Sensors</dc:source>
	<dc:date>2026-05-02</dc:date>

	<prism:publicationName>Sensors</prism:publicationName>
	<prism:publicationDate>2026-05-02</prism:publicationDate>
	<prism:volume>26</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>2844</prism:startingPage>
		<prism:doi>10.3390/s26092844</prism:doi>
	<prism:url>https://www.mdpi.com/1424-8220/26/9/2844</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1424-8220/26/9/2843">

	<title>Sensors, Vol. 26, Pages 2843: From Concept to Practice: Implementing a Knowledge-Driven Decision Support Platform for Sustainable Viticulture in Montenegro</title>
	<link>https://www.mdpi.com/1424-8220/26/9/2843</link>
	<description>Viticulture is highly vulnerable to weather variability and climate change. Growers increasingly face risks associated with extreme weather events, water scarcity, and emerging pests and diseases. To address these challenges, this study presents the development and implementation of the first operational digital decision support platform (DSP) tailored to Montenegrin vineyards within the MONTEVITIS project. The platform integrates IoT sensor data, national meteorological records and high-resolution global climate datasets to provide real-time monitoring and climate projections for vineyard management. The system was piloted in four vineyards representing diverse microclimatic and soil conditions of Montenegro. Key functionalities include phenology, irrigation and disease alerts supported by a user-friendly dashboard, map-based visualisation tools and data export functions. The pilot deployment demonstrated that combining heterogeneous data streams increases the reliability of outputs and enables timely, site-specific recommendations. Challenges identified during implementation include connectivity limitations, gaps in data and variable levels of digital expertise among growers; however, lessons learned point to the importance of continuous stakeholder engagement and institutional support for sustained use. The MONTEVITIS experience demonstrates how digital agriculture tools can bridge tradition and innovation in viticulture. By fostering collaboration between growers, researchers and policy makers, the platform enables adaptive strategies for climate resilience and sustainable vineyard management. Although the platform has been successfully deployed and tested under pilot conditions, a comprehensive long-term validation of its performance and impact on vineyard decision-making remains part of ongoing future work.</description>
	<pubDate>2026-05-01</pubDate>

	<content:encoded><![CDATA[
	<p><b>Sensors, Vol. 26, Pages 2843: From Concept to Practice: Implementing a Knowledge-Driven Decision Support Platform for Sustainable Viticulture in Montenegro</b></p>
	<p>Sensors <a href="https://www.mdpi.com/1424-8220/26/9/2843">doi: 10.3390/s26092843</a></p>
	<p>Authors:
		Tamara Racković
		Kruna Ratković
		Marko Simeunović
		Nataša Kovač
		Christoph Menz
		Helder Fraga
		Aureliano C. Malheiro
		António Fernandes
		João A. Santos
		</p>
	<p>Viticulture is highly vulnerable to weather variability and climate change. Growers increasingly face risks associated with extreme weather events, water scarcity, and emerging pests and diseases. To address these challenges, this study presents the development and implementation of the first operational digital decision support platform (DSP) tailored to Montenegrin vineyards within the MONTEVITIS project. The platform integrates IoT sensor data, national meteorological records and high-resolution global climate datasets to provide real-time monitoring and climate projections for vineyard management. The system was piloted in four vineyards representing diverse microclimatic and soil conditions of Montenegro. Key functionalities include phenology, irrigation and disease alerts supported by a user-friendly dashboard, map-based visualisation tools and data export functions. The pilot deployment demonstrated that combining heterogeneous data streams increases the reliability of outputs and enables timely, site-specific recommendations. Challenges identified during implementation include connectivity limitations, gaps in data and variable levels of digital expertise among growers; however, lessons learned point to the importance of continuous stakeholder engagement and institutional support for sustained use. The MONTEVITIS experience demonstrates how digital agriculture tools can bridge tradition and innovation in viticulture. By fostering collaboration between growers, researchers and policy makers, the platform enables adaptive strategies for climate resilience and sustainable vineyard management. Although the platform has been successfully deployed and tested under pilot conditions, a comprehensive long-term validation of its performance and impact on vineyard decision-making remains part of ongoing future work.</p>
	]]></content:encoded>

	<dc:title>From Concept to Practice: Implementing a Knowledge-Driven Decision Support Platform for Sustainable Viticulture in Montenegro</dc:title>
			<dc:creator>Tamara Racković</dc:creator>
			<dc:creator>Kruna Ratković</dc:creator>
			<dc:creator>Marko Simeunović</dc:creator>
			<dc:creator>Nataša Kovač</dc:creator>
			<dc:creator>Christoph Menz</dc:creator>
			<dc:creator>Helder Fraga</dc:creator>
			<dc:creator>Aureliano C. Malheiro</dc:creator>
			<dc:creator>António Fernandes</dc:creator>
			<dc:creator>João A. Santos</dc:creator>
		<dc:identifier>doi: 10.3390/s26092843</dc:identifier>
	<dc:source>Sensors</dc:source>
	<dc:date>2026-05-01</dc:date>

	<prism:publicationName>Sensors</prism:publicationName>
	<prism:publicationDate>2026-05-01</prism:publicationDate>
	<prism:volume>26</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>2843</prism:startingPage>
		<prism:doi>10.3390/s26092843</prism:doi>
	<prism:url>https://www.mdpi.com/1424-8220/26/9/2843</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1424-8220/26/9/2842">

	<title>Sensors, Vol. 26, Pages 2842: Dynamic Transformer Based on Wavelet and Diffusion Prior Guidance for Cardiac Cine MRI Reconstruction</title>
	<link>https://www.mdpi.com/1424-8220/26/9/2842</link>
	<description>Cardiac magnetic resonance imaging (CMR) is widely used for the diagnosis and functional assessment of cardiovascular diseases because of its noninvasive nature and excellent soft-tissue contrast. However, accelerated cine magnetic resonance imaging (cine MRI) acquisition usually relies on undersampling, which may lead to noise, aliasing artifacts, and detail loss in reconstructed images. To address this issue, we propose a wavelet-guided dynamic Transformer with diffusion priors for cardiac cine MRI reconstruction. Specifically, a diffusion model is introduced into a reduced latent feature space to generate high-frequency prior features with only 8 reverse sampling steps, thereby enhancing detail recovery while maintaining moderate computational cost. In addition, a wavelet-guided dynamic Transformer is designed to capture low-frequency structural information and temporal dependencies across adjacent frames. By combining wavelet-domain decomposition, diffusion priors, and dynamic spatiotemporal modeling, the proposed framework improves reconstruction quality while preserving temporal consistency. Experimental results on multiple cardiac cine MRI datasets show that the proposed method achieves superior reconstruction accuracy and temporal consistency over several competing approaches, while maintaining a favorable balance between computational efficiency and reconstruction performance. These findings indicate that the proposed framework is an effective and robust solution for accelerated cardiac cine MRI reconstruction.</description>
	<pubDate>2026-05-01</pubDate>

	<content:encoded><![CDATA[
	<p><b>Sensors, Vol. 26, Pages 2842: Dynamic Transformer Based on Wavelet and Diffusion Prior Guidance for Cardiac Cine MRI Reconstruction</b></p>
	<p>Sensors <a href="https://www.mdpi.com/1424-8220/26/9/2842">doi: 10.3390/s26092842</a></p>
	<p>Authors:
		Bolun Zhao
		Jun Lyu
		</p>
	<p>Cardiac magnetic resonance imaging (CMR) is widely used for the diagnosis and functional assessment of cardiovascular diseases because of its noninvasive nature and excellent soft-tissue contrast. However, accelerated cine magnetic resonance imaging (cine MRI) acquisition usually relies on undersampling, which may lead to noise, aliasing artifacts, and detail loss in reconstructed images. To address this issue, we propose a wavelet-guided dynamic Transformer with diffusion priors for cardiac cine MRI reconstruction. Specifically, a diffusion model is introduced into a reduced latent feature space to generate high-frequency prior features with only 8 reverse sampling steps, thereby enhancing detail recovery while maintaining moderate computational cost. In addition, a wavelet-guided dynamic Transformer is designed to capture low-frequency structural information and temporal dependencies across adjacent frames. By combining wavelet-domain decomposition, diffusion priors, and dynamic spatiotemporal modeling, the proposed framework improves reconstruction quality while preserving temporal consistency. Experimental results on multiple cardiac cine MRI datasets show that the proposed method achieves superior reconstruction accuracy and temporal consistency over several competing approaches, while maintaining a favorable balance between computational efficiency and reconstruction performance. These findings indicate that the proposed framework is an effective and robust solution for accelerated cardiac cine MRI reconstruction.</p>
	]]></content:encoded>

	<dc:title>Dynamic Transformer Based on Wavelet and Diffusion Prior Guidance for Cardiac Cine MRI Reconstruction</dc:title>
			<dc:creator>Bolun Zhao</dc:creator>
			<dc:creator>Jun Lyu</dc:creator>
		<dc:identifier>doi: 10.3390/s26092842</dc:identifier>
	<dc:source>Sensors</dc:source>
	<dc:date>2026-05-01</dc:date>

	<prism:publicationName>Sensors</prism:publicationName>
	<prism:publicationDate>2026-05-01</prism:publicationDate>
	<prism:volume>26</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>2842</prism:startingPage>
		<prism:doi>10.3390/s26092842</prism:doi>
	<prism:url>https://www.mdpi.com/1424-8220/26/9/2842</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1424-8220/26/9/2841">

	<title>Sensors, Vol. 26, Pages 2841: RFD-BiSeNet V2: A Lightweight Floodwater Segmentation Network for Vision-Based Environmental Sensing</title>
	<link>https://www.mdpi.com/1424-8220/26/9/2841</link>
	<description>Flood disasters pose significant threats to human life and infrastructure, creating an urgent need for reliable vision-based environmental sensing technologies for rapid floodwater identification. Vision-based platforms such as unmanned surface vehicles (USVs) provide an effective solution for monitoring inland water environments; however, accurate floodwater segmentation remains challenging due to complex water boundaries, reflections, and background interference. To address these issues, we propose RFD-BiSeNet V2, a lightweight semantic segmentation network. Building upon BiSeNet V2, our model integrates an edge-aware learning strategy to track dynamic contours, a feature refinement module to suppress reflection noise, and a multi-scale feature fusion module to accommodate varying morphological scales. Evaluated on a comprehensive dataset incorporating USV data, UAV imagery, and diverse real-world scenes, RFD-BiSeNet V2 achieves an mIoU of 97.10%, outperforming the baseline by 6.68%. Crucially, the results demonstrate the practical implications of our architectural advancements: the edge-aware and feature refinement modules successfully sharpen ambiguous water boundaries and effectively filter out severe surface reflections, directly driving the segmentation accuracy. With a compact size of 5.95M parameters and real-time inference capabilities, the model offers a robust and highly efficient solution suitable for resource-constrained deployments across diverse intelligent environmental sensing systems.</description>
	<pubDate>2026-05-01</pubDate>

	<content:encoded><![CDATA[
	<p><b>Sensors, Vol. 26, Pages 2841: RFD-BiSeNet V2: A Lightweight Floodwater Segmentation Network for Vision-Based Environmental Sensing</b></p>
	<p>Sensors <a href="https://www.mdpi.com/1424-8220/26/9/2841">doi: 10.3390/s26092841</a></p>
	<p>Authors:
		Xinyan Li
		Yining Shi
		Sijie Wang
		Jinghui Xu
		</p>
	<p>Flood disasters pose significant threats to human life and infrastructure, creating an urgent need for reliable vision-based environmental sensing technologies for rapid floodwater identification. Vision-based platforms such as unmanned surface vehicles (USVs) provide an effective solution for monitoring inland water environments; however, accurate floodwater segmentation remains challenging due to complex water boundaries, reflections, and background interference. To address these issues, we propose RFD-BiSeNet V2, a lightweight semantic segmentation network. Building upon BiSeNet V2, our model integrates an edge-aware learning strategy to track dynamic contours, a feature refinement module to suppress reflection noise, and a multi-scale feature fusion module to accommodate varying morphological scales. Evaluated on a comprehensive dataset incorporating USV data, UAV imagery, and diverse real-world scenes, RFD-BiSeNet V2 achieves an mIoU of 97.10%, outperforming the baseline by 6.68%. Crucially, the results demonstrate the practical implications of our architectural advancements: the edge-aware and feature refinement modules successfully sharpen ambiguous water boundaries and effectively filter out severe surface reflections, directly driving the segmentation accuracy. With a compact size of 5.95M parameters and real-time inference capabilities, the model offers a robust and highly efficient solution suitable for resource-constrained deployments across diverse intelligent environmental sensing systems.</p>
	]]></content:encoded>

	<dc:title>RFD-BiSeNet V2: A Lightweight Floodwater Segmentation Network for Vision-Based Environmental Sensing</dc:title>
			<dc:creator>Xinyan Li</dc:creator>
			<dc:creator>Yining Shi</dc:creator>
			<dc:creator>Sijie Wang</dc:creator>
			<dc:creator>Jinghui Xu</dc:creator>
		<dc:identifier>doi: 10.3390/s26092841</dc:identifier>
	<dc:source>Sensors</dc:source>
	<dc:date>2026-05-01</dc:date>

	<prism:publicationName>Sensors</prism:publicationName>
	<prism:publicationDate>2026-05-01</prism:publicationDate>
	<prism:volume>26</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>2841</prism:startingPage>
		<prism:doi>10.3390/s26092841</prism:doi>
	<prism:url>https://www.mdpi.com/1424-8220/26/9/2841</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1424-8220/26/9/2837">

	<title>Sensors, Vol. 26, Pages 2837: Ultra-Broadband and Compact Polarization Beam Splitter Based on a Hybrid Nodal&amp;ndash;Nodeless Dual Hollow-Core Anti-Resonant Fiber</title>
	<link>https://www.mdpi.com/1424-8220/26/9/2837</link>
	<description>Hollow-core anti-resonant fibers (HC-ARFs) have emerged as a promising platform for next-generation optical systems, offering attractive advantages in low-latency, low-nonlinearity, and high-power handling. However, the development of high-performance functional components, such as polarization beam splitters (PBSs), within this platform faces a significant challenge: the simultaneous achievement of ultra-broad bandwidth, compact device length, high polarization selectivity, and strict single-mode operation remains elusive. To address this challenge, we propose and numerically investigate a novel dual hollow-core anti-resonant fiber (DHC-ARF) based on a hybrid nodal&amp;amp;ndash;nodeless architecture. The design integrates three functional units: (1) an asymmetric nested semi-elliptical tube pair that defines the dual cores and serves as the primary wavelength-insensitive coupling channel; (2) nodeless nested circular tubes positioned peripherally to effectively suppress higher-order mode propagation while maintaining low fundamental mode loss; and (3) a selective localized thick-wall region that introduces a polarization-dependent perturbation to the x-polarized supermodes, whose observed behavior is physically consistent with a phase-mismatch effect associated with anti-crossing-like modal interaction near the target wavelength. Through synergistic optimization of these elements, we numerically demonstrate a combination of performance metrics. At the central wavelength of 1.55 &amp;amp;micro;m, the coupling length for the y-polarization (Lcy) is reduced to 6.35 cm, while the coupling length ratio (CLR = Lcx/Lcy) equals 2.001, indicating effective polarization selectivity. Consequently, a device length of 12.7 cm is numerically demonstrated, which is comparable to or shorter than existing ultra-broadband DHC-ARF PBS designs. The proposed PBS is numerically shown to exhibit an ultra-broad bandwidth of 460 nm (spanning 1320 to 1780 nm) with a polarization extinction ratio better than 20 dB, peaking at 53 dB. Furthermore, HOMER (&amp;amp;lambda;) remains above 100 throughout the operating band and exceeds 200 over most of the band, indicating robust single-mode operation. This work not only presents a PBS design with competitive overall performance but also provides a versatile structural paradigm for developing functional components in hollow-core fiber-based integrated optical systems for high-speed communications and precision sensing. It should be noted that this work is based on numerical simulations, and experimental fabrication and validation will be pursued in future work.</description>
	<pubDate>2026-05-01</pubDate>

	<content:encoded><![CDATA[
	<p><b>Sensors, Vol. 26, Pages 2837: Ultra-Broadband and Compact Polarization Beam Splitter Based on a Hybrid Nodal&amp;ndash;Nodeless Dual Hollow-Core Anti-Resonant Fiber</b></p>
	<p>Sensors <a href="https://www.mdpi.com/1424-8220/26/9/2837">doi: 10.3390/s26092837</a></p>
	<p>Authors:
		Zifan Wang
		Yifan Chen
		Hui Zou
		</p>
	<p>Hollow-core anti-resonant fibers (HC-ARFs) have emerged as a promising platform for next-generation optical systems, offering attractive advantages in low-latency, low-nonlinearity, and high-power handling. However, the development of high-performance functional components, such as polarization beam splitters (PBSs), within this platform faces a significant challenge: the simultaneous achievement of ultra-broad bandwidth, compact device length, high polarization selectivity, and strict single-mode operation remains elusive. To address this challenge, we propose and numerically investigate a novel dual hollow-core anti-resonant fiber (DHC-ARF) based on a hybrid nodal&amp;amp;ndash;nodeless architecture. The design integrates three functional units: (1) an asymmetric nested semi-elliptical tube pair that defines the dual cores and serves as the primary wavelength-insensitive coupling channel; (2) nodeless nested circular tubes positioned peripherally to effectively suppress higher-order mode propagation while maintaining low fundamental mode loss; and (3) a selective localized thick-wall region that introduces a polarization-dependent perturbation to the x-polarized supermodes, whose observed behavior is physically consistent with a phase-mismatch effect associated with anti-crossing-like modal interaction near the target wavelength. Through synergistic optimization of these elements, we numerically demonstrate a combination of performance metrics. At the central wavelength of 1.55 &amp;amp;micro;m, the coupling length for the y-polarization (Lcy) is reduced to 6.35 cm, while the coupling length ratio (CLR = Lcx/Lcy) equals 2.001, indicating effective polarization selectivity. Consequently, a device length of 12.7 cm is numerically demonstrated, which is comparable to or shorter than existing ultra-broadband DHC-ARF PBS designs. The proposed PBS is numerically shown to exhibit an ultra-broad bandwidth of 460 nm (spanning 1320 to 1780 nm) with a polarization extinction ratio better than 20 dB, peaking at 53 dB. Furthermore, HOMER (&amp;amp;lambda;) remains above 100 throughout the operating band and exceeds 200 over most of the band, indicating robust single-mode operation. This work not only presents a PBS design with competitive overall performance but also provides a versatile structural paradigm for developing functional components in hollow-core fiber-based integrated optical systems for high-speed communications and precision sensing. It should be noted that this work is based on numerical simulations, and experimental fabrication and validation will be pursued in future work.</p>
	]]></content:encoded>

	<dc:title>Ultra-Broadband and Compact Polarization Beam Splitter Based on a Hybrid Nodal&amp;amp;ndash;Nodeless Dual Hollow-Core Anti-Resonant Fiber</dc:title>
			<dc:creator>Zifan Wang</dc:creator>
			<dc:creator>Yifan Chen</dc:creator>
			<dc:creator>Hui Zou</dc:creator>
		<dc:identifier>doi: 10.3390/s26092837</dc:identifier>
	<dc:source>Sensors</dc:source>
	<dc:date>2026-05-01</dc:date>

	<prism:publicationName>Sensors</prism:publicationName>
	<prism:publicationDate>2026-05-01</prism:publicationDate>
	<prism:volume>26</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>2837</prism:startingPage>
		<prism:doi>10.3390/s26092837</prism:doi>
	<prism:url>https://www.mdpi.com/1424-8220/26/9/2837</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1424-8220/26/9/2840">

	<title>Sensors, Vol. 26, Pages 2840: Digital Twin of Coal Mine Rescue Robot&amp;mdash;Research on Intelligence and Visualization</title>
	<link>https://www.mdpi.com/1424-8220/26/9/2840</link>
	<description>Mine disasters require urgent lifeline setup in confined tunnels, but manual rescue in unstable accident zones carries huge safety risks. Coal mine rescue robots (CMRRs) have become key equipment to replace manual rescue. However, traditional remote-controlled CMRRs suffer from low autonomy and weak environmental perception capability, which have become critical bottlenecks for field application. As an emerging technology in the mining field, digital twin enables high-precision virtual-real mapping and on-site operation guidance, providing a novel solution to the above problems. To realize autonomous navigation and digital twin visualization of the CMRR, this paper first carries out targeted hardware retrofits on the CMRR platform, upgrades environmental perception, communication transmission and motion control modules, and lays a solid hardware foundation for subsequent algorithm design and system implementation. Aiming at the complex post-disaster underground environment, a digital twin-integrated CMRR system is constructed. For intelligent autonomous navigation, this study investigates a 3D point cloud&amp;amp;ndash;based autonomous navigation framework and proposes a slope-fitting method as well as a maximum arrival probability obstacle avoidance method based on B&amp;amp;eacute;zier curve trajectories. For environmental visualization, a digital twin interactive interface is built to monitor gas and other environmental parameters in real time, and accurately reconstruct underground roadway structures based on point cloud data. This design not only ensures the robot&amp;amp;rsquo;s autonomous obstacle avoidance but also helps rescuers grasp underground conditions in advance. Field tests in a simulated post-disaster mine with complex terrain show that the system can stably complete autonomous navigation tasks, maintain stable motion control under dynamic interference, and provide accurate and reliable environmental data for rescue decisions, verifying its feasibility and effectiveness in harsh mine rescue scenarios.</description>
	<pubDate>2026-05-01</pubDate>

	<content:encoded><![CDATA[
	<p><b>Sensors, Vol. 26, Pages 2840: Digital Twin of Coal Mine Rescue Robot&amp;mdash;Research on Intelligence and Visualization</b></p>
	<p>Sensors <a href="https://www.mdpi.com/1424-8220/26/9/2840">doi: 10.3390/s26092840</a></p>
	<p>Authors:
		Shaoze You
		Menggang Li
		Baolei Wu
		Jun Wang
		Chaoquan Tang
		</p>
	<p>Mine disasters require urgent lifeline setup in confined tunnels, but manual rescue in unstable accident zones carries huge safety risks. Coal mine rescue robots (CMRRs) have become key equipment to replace manual rescue. However, traditional remote-controlled CMRRs suffer from low autonomy and weak environmental perception capability, which have become critical bottlenecks for field application. As an emerging technology in the mining field, digital twin enables high-precision virtual-real mapping and on-site operation guidance, providing a novel solution to the above problems. To realize autonomous navigation and digital twin visualization of the CMRR, this paper first carries out targeted hardware retrofits on the CMRR platform, upgrades environmental perception, communication transmission and motion control modules, and lays a solid hardware foundation for subsequent algorithm design and system implementation. Aiming at the complex post-disaster underground environment, a digital twin-integrated CMRR system is constructed. For intelligent autonomous navigation, this study investigates a 3D point cloud&amp;amp;ndash;based autonomous navigation framework and proposes a slope-fitting method as well as a maximum arrival probability obstacle avoidance method based on B&amp;amp;eacute;zier curve trajectories. For environmental visualization, a digital twin interactive interface is built to monitor gas and other environmental parameters in real time, and accurately reconstruct underground roadway structures based on point cloud data. This design not only ensures the robot&amp;amp;rsquo;s autonomous obstacle avoidance but also helps rescuers grasp underground conditions in advance. Field tests in a simulated post-disaster mine with complex terrain show that the system can stably complete autonomous navigation tasks, maintain stable motion control under dynamic interference, and provide accurate and reliable environmental data for rescue decisions, verifying its feasibility and effectiveness in harsh mine rescue scenarios.</p>
	]]></content:encoded>

	<dc:title>Digital Twin of Coal Mine Rescue Robot&amp;amp;mdash;Research on Intelligence and Visualization</dc:title>
			<dc:creator>Shaoze You</dc:creator>
			<dc:creator>Menggang Li</dc:creator>
			<dc:creator>Baolei Wu</dc:creator>
			<dc:creator>Jun Wang</dc:creator>
			<dc:creator>Chaoquan Tang</dc:creator>
		<dc:identifier>doi: 10.3390/s26092840</dc:identifier>
	<dc:source>Sensors</dc:source>
	<dc:date>2026-05-01</dc:date>

	<prism:publicationName>Sensors</prism:publicationName>
	<prism:publicationDate>2026-05-01</prism:publicationDate>
	<prism:volume>26</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>2840</prism:startingPage>
		<prism:doi>10.3390/s26092840</prism:doi>
	<prism:url>https://www.mdpi.com/1424-8220/26/9/2840</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1424-8220/26/9/2839">

	<title>Sensors, Vol. 26, Pages 2839: EACCO: Optimizing the Computation and Communication in Resource-Constrained IoT Devices for Energy-Efficient Swarm Robotics</title>
	<link>https://www.mdpi.com/1424-8220/26/9/2839</link>
	<description>Energy consumption is a critical concern for Internet of Things (IoT) platforms lacking abundant resources, particularly for swarm robotic systems that rely on numerous devices operating collaboratively over extended periods. This study presents a comprehensive design strategy for improving processing and communication to enhance system efficiency and reduce energy consumption. We incorporate energy harvesting (photovoltaic and RF), dynamic power management, and energy-efficient communication protocols (e.g., duty cycle, power control, data compression) into two complementary platforms built for swarm robotics: MCU-based nodes (TI MSP430 with LoRa transceiver), which serve as the experimental prototype for validating energy-aware communication, compression, and scheduling mechanisms; edge platforms (Jetson Nano and TX2), which are used for high-level power profiling and system-level evaluation, particularly for computation intensive workloads and comparative analysis. Our technique involves analyzing the device&amp;amp;rsquo;s energy usage and harvesting processes, developing efficient communication protocols, and validating the system through simulations and hardware prototypes. Experimental results under outdoor and indoor conditions show that the device maintains an energy neutrality ratio well above unity, even with limited ambient energy. Key findings include significant reductions in energy per bit transmitted and reliable long-term operation. These insights pave the way for deploying swarms of autonomous IoT-based robots with minimal maintenance and maximal longevity.</description>
	<pubDate>2026-05-01</pubDate>

	<content:encoded><![CDATA[
	<p><b>Sensors, Vol. 26, Pages 2839: EACCO: Optimizing the Computation and Communication in Resource-Constrained IoT Devices for Energy-Efficient Swarm Robotics</b></p>
	<p>Sensors <a href="https://www.mdpi.com/1424-8220/26/9/2839">doi: 10.3390/s26092839</a></p>
	<p>Authors:
		Amir Ijaz
		Hashem Haghbayan
		Ethiopia Nigussie
		Abdul Malik
		Juha Plosila
		</p>
	<p>Energy consumption is a critical concern for Internet of Things (IoT) platforms lacking abundant resources, particularly for swarm robotic systems that rely on numerous devices operating collaboratively over extended periods. This study presents a comprehensive design strategy for improving processing and communication to enhance system efficiency and reduce energy consumption. We incorporate energy harvesting (photovoltaic and RF), dynamic power management, and energy-efficient communication protocols (e.g., duty cycle, power control, data compression) into two complementary platforms built for swarm robotics: MCU-based nodes (TI MSP430 with LoRa transceiver), which serve as the experimental prototype for validating energy-aware communication, compression, and scheduling mechanisms; edge platforms (Jetson Nano and TX2), which are used for high-level power profiling and system-level evaluation, particularly for computation intensive workloads and comparative analysis. Our technique involves analyzing the device&amp;amp;rsquo;s energy usage and harvesting processes, developing efficient communication protocols, and validating the system through simulations and hardware prototypes. Experimental results under outdoor and indoor conditions show that the device maintains an energy neutrality ratio well above unity, even with limited ambient energy. Key findings include significant reductions in energy per bit transmitted and reliable long-term operation. These insights pave the way for deploying swarms of autonomous IoT-based robots with minimal maintenance and maximal longevity.</p>
	]]></content:encoded>

	<dc:title>EACCO: Optimizing the Computation and Communication in Resource-Constrained IoT Devices for Energy-Efficient Swarm Robotics</dc:title>
			<dc:creator>Amir Ijaz</dc:creator>
			<dc:creator>Hashem Haghbayan</dc:creator>
			<dc:creator>Ethiopia Nigussie</dc:creator>
			<dc:creator>Abdul Malik</dc:creator>
			<dc:creator>Juha Plosila</dc:creator>
		<dc:identifier>doi: 10.3390/s26092839</dc:identifier>
	<dc:source>Sensors</dc:source>
	<dc:date>2026-05-01</dc:date>

	<prism:publicationName>Sensors</prism:publicationName>
	<prism:publicationDate>2026-05-01</prism:publicationDate>
	<prism:volume>26</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>2839</prism:startingPage>
		<prism:doi>10.3390/s26092839</prism:doi>
	<prism:url>https://www.mdpi.com/1424-8220/26/9/2839</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1424-8220/26/9/2838">

	<title>Sensors, Vol. 26, Pages 2838: A Biomimetic Tympanic Cavity PVDF Hydrophone for Low-Frequency Bioacoustic Monitoring in Marine Aquaculture</title>
	<link>https://www.mdpi.com/1424-8220/26/9/2838</link>
	<description>Underwater acoustic monitoring is a critical technology for marine resource development and modern aquaculture. The performance of acoustic sensors directly determines the effectiveness of biological behavior tracking in complex marine environments. This paper presents the design, fabrication, and characterization of a custom hydrophone utilizing a polyvinylidene fluoride (PVDF) piezoelectric film configured in a biomimetic tympanic cavity structure. Operating on the direct piezoelectric effect, the device employs a pre-tensioned PVDF diaphragm integrated with a dedicated charge amplifier circuit to condition high-impedance signals. Laboratory calibrations demonstrate a stable frequency response (with a sensitivity variation within &amp;amp;plusmn;1 dB) in the low-frequency range (1&amp;amp;ndash;200 Hz) with an average acoustic pressure sensitivity of approximately &amp;amp;minus;206 dB (re 1 V/&amp;amp;mu;Pa), providing a higher relative voltage gain compared to a commercial reference hydrophone with a nominal sensitivity of &amp;amp;minus;210 dB (re 1 V/&amp;amp;mu;Pa). Furthermore, extensive field evaluations were conducted in a marine net pen to analyze acoustic data across multiple fish feeding scenarios (baseline, pre-feeding, active feeding, and post-feeding). The proposed custom hydrophone exhibited a superior dynamic range and successfully locked onto a 24.4 Hz Golden Pompano (Trachinotus blochii) bioacoustic signature, maintaining remarkable feature stability even after active feeding ceased. This study validates the efficacy of the biomimetic PVDF hydrophone for low-frequency acoustic detection, providing a robust hardware foundation for automated behavioral recognition systems in aquaculture.</description>
	<pubDate>2026-05-01</pubDate>

	<content:encoded><![CDATA[
	<p><b>Sensors, Vol. 26, Pages 2838: A Biomimetic Tympanic Cavity PVDF Hydrophone for Low-Frequency Bioacoustic Monitoring in Marine Aquaculture</b></p>
	<p>Sensors <a href="https://www.mdpi.com/1424-8220/26/9/2838">doi: 10.3390/s26092838</a></p>
	<p>Authors:
		Tianyuan Hou
		Zhenming Piao
		Yuhang Wang
		Yi Xin
		</p>
	<p>Underwater acoustic monitoring is a critical technology for marine resource development and modern aquaculture. The performance of acoustic sensors directly determines the effectiveness of biological behavior tracking in complex marine environments. This paper presents the design, fabrication, and characterization of a custom hydrophone utilizing a polyvinylidene fluoride (PVDF) piezoelectric film configured in a biomimetic tympanic cavity structure. Operating on the direct piezoelectric effect, the device employs a pre-tensioned PVDF diaphragm integrated with a dedicated charge amplifier circuit to condition high-impedance signals. Laboratory calibrations demonstrate a stable frequency response (with a sensitivity variation within &amp;amp;plusmn;1 dB) in the low-frequency range (1&amp;amp;ndash;200 Hz) with an average acoustic pressure sensitivity of approximately &amp;amp;minus;206 dB (re 1 V/&amp;amp;mu;Pa), providing a higher relative voltage gain compared to a commercial reference hydrophone with a nominal sensitivity of &amp;amp;minus;210 dB (re 1 V/&amp;amp;mu;Pa). Furthermore, extensive field evaluations were conducted in a marine net pen to analyze acoustic data across multiple fish feeding scenarios (baseline, pre-feeding, active feeding, and post-feeding). The proposed custom hydrophone exhibited a superior dynamic range and successfully locked onto a 24.4 Hz Golden Pompano (Trachinotus blochii) bioacoustic signature, maintaining remarkable feature stability even after active feeding ceased. This study validates the efficacy of the biomimetic PVDF hydrophone for low-frequency acoustic detection, providing a robust hardware foundation for automated behavioral recognition systems in aquaculture.</p>
	]]></content:encoded>

	<dc:title>A Biomimetic Tympanic Cavity PVDF Hydrophone for Low-Frequency Bioacoustic Monitoring in Marine Aquaculture</dc:title>
			<dc:creator>Tianyuan Hou</dc:creator>
			<dc:creator>Zhenming Piao</dc:creator>
			<dc:creator>Yuhang Wang</dc:creator>
			<dc:creator>Yi Xin</dc:creator>
		<dc:identifier>doi: 10.3390/s26092838</dc:identifier>
	<dc:source>Sensors</dc:source>
	<dc:date>2026-05-01</dc:date>

	<prism:publicationName>Sensors</prism:publicationName>
	<prism:publicationDate>2026-05-01</prism:publicationDate>
	<prism:volume>26</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>2838</prism:startingPage>
		<prism:doi>10.3390/s26092838</prism:doi>
	<prism:url>https://www.mdpi.com/1424-8220/26/9/2838</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1424-8220/26/9/2836">

	<title>Sensors, Vol. 26, Pages 2836: Development of a Highly Sensitive Analytical System for Measuring 17&amp;beta;-Estradiol Using Fluorescent Molecular Probes</title>
	<link>https://www.mdpi.com/1424-8220/26/9/2836</link>
	<description>Easier measurement of 17&amp;amp;beta;-estradiol could promote the early diagnosis and treatment of medical conditions in women. In this study, we developed a fluorescence-based assay using a nucleic acid aptamer labeled with a fluorescent dye for the detection of estrogen. Upon binding to 17&amp;amp;beta;-estradiol, the aptamer undergoes a conformational change, resulting in a measurable change in fluorescence intensity. The assay enables rapid detection within 30 min, with a limit of detection of 0.2 pg/mL and a linear dynamic range of 1 pg/mL&amp;amp;ndash;1000 pg/mL. High selectivity toward 17&amp;amp;beta;-estradiol was confirmed against structurally related compounds. The method was successfully applied to human saliva samples, demonstrating high sensitivity, precision, and reproducibility with recoveries of 98.8% and coefficients of variation below 3.0%. In addition, a compact desktop fluorescence detector was developed, allowing direct measurement in polymerase chain reaction tubes without sample transfer, thereby simplifying the procedure and minimizing sample loss. These results demonstrate that the proposed system provides a simple and practical platform for estrogen detection in biological samples and has potential applications in clinical and research settings.</description>
	<pubDate>2026-05-01</pubDate>

	<content:encoded><![CDATA[
	<p><b>Sensors, Vol. 26, Pages 2836: Development of a Highly Sensitive Analytical System for Measuring 17&amp;beta;-Estradiol Using Fluorescent Molecular Probes</b></p>
	<p>Sensors <a href="https://www.mdpi.com/1424-8220/26/9/2836">doi: 10.3390/s26092836</a></p>
	<p>Authors:
		Yoshio Suzuki
		</p>
	<p>Easier measurement of 17&amp;amp;beta;-estradiol could promote the early diagnosis and treatment of medical conditions in women. In this study, we developed a fluorescence-based assay using a nucleic acid aptamer labeled with a fluorescent dye for the detection of estrogen. Upon binding to 17&amp;amp;beta;-estradiol, the aptamer undergoes a conformational change, resulting in a measurable change in fluorescence intensity. The assay enables rapid detection within 30 min, with a limit of detection of 0.2 pg/mL and a linear dynamic range of 1 pg/mL&amp;amp;ndash;1000 pg/mL. High selectivity toward 17&amp;amp;beta;-estradiol was confirmed against structurally related compounds. The method was successfully applied to human saliva samples, demonstrating high sensitivity, precision, and reproducibility with recoveries of 98.8% and coefficients of variation below 3.0%. In addition, a compact desktop fluorescence detector was developed, allowing direct measurement in polymerase chain reaction tubes without sample transfer, thereby simplifying the procedure and minimizing sample loss. These results demonstrate that the proposed system provides a simple and practical platform for estrogen detection in biological samples and has potential applications in clinical and research settings.</p>
	]]></content:encoded>

	<dc:title>Development of a Highly Sensitive Analytical System for Measuring 17&amp;amp;beta;-Estradiol Using Fluorescent Molecular Probes</dc:title>
			<dc:creator>Yoshio Suzuki</dc:creator>
		<dc:identifier>doi: 10.3390/s26092836</dc:identifier>
	<dc:source>Sensors</dc:source>
	<dc:date>2026-05-01</dc:date>

	<prism:publicationName>Sensors</prism:publicationName>
	<prism:publicationDate>2026-05-01</prism:publicationDate>
	<prism:volume>26</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>2836</prism:startingPage>
		<prism:doi>10.3390/s26092836</prism:doi>
	<prism:url>https://www.mdpi.com/1424-8220/26/9/2836</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1424-8220/26/9/2833">

	<title>Sensors, Vol. 26, Pages 2833: Hierarchical Deep Learning Framework for Skin Disease and Cancer Classification Performance Enhancement</title>
	<link>https://www.mdpi.com/1424-8220/26/9/2833</link>
	<description>Currently, the number of people who have been investigated for skin cancer has increased significantly worldwide. For prior diagnosis, dermatologists can typically visually inspect skin lesions for abnormalities. However, an expert is required, and the similarity of some skin lesions remains challenging. This study aimed to address the challenge of classifying multiple images of skin conditions, including both Benign and Malignant groups, using the hierarchical method. Instead of directly performing multi-class classification using a single model, multiple binary classification models were organized to reduce task complexity and improve overall performance. In the methodology, four convolutional neural network (CNN) models, namely MobileNetV2, EfficientNet-B0, ResNet-18, and ResNet-50, were selected as candidates for this problem. The proposed hierarchical binary classification model was evaluated against conventional multi-class classification methods. As a result, various evaluation metrics were used to assess model performance, with recall as the primary metric in this study, given the emphasis on minimizing false negatives. However, some results revealed discrepancies between the highest recall and other performance metrics. Further analysis demonstrated the potential of using recall as a selection criterion for identifying the most suitable CNN models. The single model-based classification of six classes of skin lesion images achieves the highest recall of 60.27% with MobileNetV2. Meanwhile, the proposed hierarchical model achieves a higher recall of 82.62%, representing a significant increase of 22.35%. Additionally, improvements were observed across all other evaluation metrics, including accuracy (+25.46%), precision (+17.21%), F1-score (+21.34%), balanced accuracy (+12.69%), specificity (+3.03%), and G-mean (+14.25%). These improvements indicate enhanced performance in correctly identifying both positive and negative cases, while reducing misclassification rates. This outcome demonstrates the potential to improve the model&amp;amp;rsquo;s generalizability, thereby increasing its applicability across various clinical decision-support systems.</description>
	<pubDate>2026-05-01</pubDate>

	<content:encoded><![CDATA[
	<p><b>Sensors, Vol. 26, Pages 2833: Hierarchical Deep Learning Framework for Skin Disease and Cancer Classification Performance Enhancement</b></p>
	<p>Sensors <a href="https://www.mdpi.com/1424-8220/26/9/2833">doi: 10.3390/s26092833</a></p>
	<p>Authors:
		Chanapa Chaitan
		Sasithorn Tengjongdee
		Suejit Pechprasarn
		Kitsada Thadson
		</p>
	<p>Currently, the number of people who have been investigated for skin cancer has increased significantly worldwide. For prior diagnosis, dermatologists can typically visually inspect skin lesions for abnormalities. However, an expert is required, and the similarity of some skin lesions remains challenging. This study aimed to address the challenge of classifying multiple images of skin conditions, including both Benign and Malignant groups, using the hierarchical method. Instead of directly performing multi-class classification using a single model, multiple binary classification models were organized to reduce task complexity and improve overall performance. In the methodology, four convolutional neural network (CNN) models, namely MobileNetV2, EfficientNet-B0, ResNet-18, and ResNet-50, were selected as candidates for this problem. The proposed hierarchical binary classification model was evaluated against conventional multi-class classification methods. As a result, various evaluation metrics were used to assess model performance, with recall as the primary metric in this study, given the emphasis on minimizing false negatives. However, some results revealed discrepancies between the highest recall and other performance metrics. Further analysis demonstrated the potential of using recall as a selection criterion for identifying the most suitable CNN models. The single model-based classification of six classes of skin lesion images achieves the highest recall of 60.27% with MobileNetV2. Meanwhile, the proposed hierarchical model achieves a higher recall of 82.62%, representing a significant increase of 22.35%. Additionally, improvements were observed across all other evaluation metrics, including accuracy (+25.46%), precision (+17.21%), F1-score (+21.34%), balanced accuracy (+12.69%), specificity (+3.03%), and G-mean (+14.25%). These improvements indicate enhanced performance in correctly identifying both positive and negative cases, while reducing misclassification rates. This outcome demonstrates the potential to improve the model&amp;amp;rsquo;s generalizability, thereby increasing its applicability across various clinical decision-support systems.</p>
	]]></content:encoded>

	<dc:title>Hierarchical Deep Learning Framework for Skin Disease and Cancer Classification Performance Enhancement</dc:title>
			<dc:creator>Chanapa Chaitan</dc:creator>
			<dc:creator>Sasithorn Tengjongdee</dc:creator>
			<dc:creator>Suejit Pechprasarn</dc:creator>
			<dc:creator>Kitsada Thadson</dc:creator>
		<dc:identifier>doi: 10.3390/s26092833</dc:identifier>
	<dc:source>Sensors</dc:source>
	<dc:date>2026-05-01</dc:date>

	<prism:publicationName>Sensors</prism:publicationName>
	<prism:publicationDate>2026-05-01</prism:publicationDate>
	<prism:volume>26</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>2833</prism:startingPage>
		<prism:doi>10.3390/s26092833</prism:doi>
	<prism:url>https://www.mdpi.com/1424-8220/26/9/2833</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1424-8220/26/9/2835">

	<title>Sensors, Vol. 26, Pages 2835: YOLOv11-SMS: An Improved Algorithm for Impurity Detection in Seed Cotton</title>
	<link>https://www.mdpi.com/1424-8220/26/9/2835</link>
	<description>To enhance the precision of cottonseed impurity detection and address issues such as high miss-detection rates and suboptimal performance, this paper introduces an improved YOLOv11 algorithm, termed YOLOv11-SMS. Initially, the algorithm integrates a local self-attention mechanism (LRSA) to design the C2PSA-SL module, which augments the model&amp;amp;rsquo;s ability to learn local information while maintaining global feature awareness. Furthermore, the feature extraction stage and the network head incorporate a multi-branch reparameterized convolution (MBRConv) module, enhancing feature extraction capabilities while preserving the model&amp;amp;rsquo;s lightweight properties. Lastly, a spatial adaptive modulation (SAFM) module is introduced to optimize the detection of small targets. Experimental results demonstrate that YOLOv11-SMS outperforms the baseline model, with mAP@50&amp;amp;ndash;95 increasing from 79.42% to 82.49%, an improvement of 3.07 percentage points. The average mIOU increased from 90.98% to 94.18%, representing a 3.2 percentage point improvement. Moreover, the model achieves an impressive real-time inference speed of 178.63 frames per second (FPS), effectively balancing detection accuracy and speed, offering an efficient and precise solution for cottonseed impurity detection.</description>
	<pubDate>2026-05-01</pubDate>

	<content:encoded><![CDATA[
	<p><b>Sensors, Vol. 26, Pages 2835: YOLOv11-SMS: An Improved Algorithm for Impurity Detection in Seed Cotton</b></p>
	<p>Sensors <a href="https://www.mdpi.com/1424-8220/26/9/2835">doi: 10.3390/s26092835</a></p>
	<p>Authors:
		Wenyan Yuan
		Laigang Zhang
		Donghe Wang
		Zhijun Guo
		</p>
	<p>To enhance the precision of cottonseed impurity detection and address issues such as high miss-detection rates and suboptimal performance, this paper introduces an improved YOLOv11 algorithm, termed YOLOv11-SMS. Initially, the algorithm integrates a local self-attention mechanism (LRSA) to design the C2PSA-SL module, which augments the model&amp;amp;rsquo;s ability to learn local information while maintaining global feature awareness. Furthermore, the feature extraction stage and the network head incorporate a multi-branch reparameterized convolution (MBRConv) module, enhancing feature extraction capabilities while preserving the model&amp;amp;rsquo;s lightweight properties. Lastly, a spatial adaptive modulation (SAFM) module is introduced to optimize the detection of small targets. Experimental results demonstrate that YOLOv11-SMS outperforms the baseline model, with mAP@50&amp;amp;ndash;95 increasing from 79.42% to 82.49%, an improvement of 3.07 percentage points. The average mIOU increased from 90.98% to 94.18%, representing a 3.2 percentage point improvement. Moreover, the model achieves an impressive real-time inference speed of 178.63 frames per second (FPS), effectively balancing detection accuracy and speed, offering an efficient and precise solution for cottonseed impurity detection.</p>
	]]></content:encoded>

	<dc:title>YOLOv11-SMS: An Improved Algorithm for Impurity Detection in Seed Cotton</dc:title>
			<dc:creator>Wenyan Yuan</dc:creator>
			<dc:creator>Laigang Zhang</dc:creator>
			<dc:creator>Donghe Wang</dc:creator>
			<dc:creator>Zhijun Guo</dc:creator>
		<dc:identifier>doi: 10.3390/s26092835</dc:identifier>
	<dc:source>Sensors</dc:source>
	<dc:date>2026-05-01</dc:date>

	<prism:publicationName>Sensors</prism:publicationName>
	<prism:publicationDate>2026-05-01</prism:publicationDate>
	<prism:volume>26</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>2835</prism:startingPage>
		<prism:doi>10.3390/s26092835</prism:doi>
	<prism:url>https://www.mdpi.com/1424-8220/26/9/2835</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1424-8220/26/9/2834">

	<title>Sensors, Vol. 26, Pages 2834: Wireless High Rotational Speed Assessment by Exploiting an RF Sensor Tag System and Equivalent-Time Reconstruction</title>
	<link>https://www.mdpi.com/1424-8220/26/9/2834</link>
	<description>Rotational speed monitoring is essential in many industrial and electromechanical systems. This paper presents a rotational speed measurement method based on a wireless impedance sensing system leveraging the radio-frequency coupling between a passive resonant tag and a coplanar waveguide (CPW) probe. The sensing mechanism exploits periodic variations in the real part of the probe impedance caused by the relative alignment between the rotating tag and the stationary probe. While the impedance signal is inherently periodic, the usable speed range of sampling-based measurement systems is fundamentally constrained by their acquisition rate. To overcome this limitation without requiring higher-rate instrumentation, an equivalent-time sampling (ETS) reconstruction approach is proposed. Sparse and nonuniform impedance samples collected over multiple revolutions are mapped into an equivalent phase domain and combined to reconstruct the waveform associated with a single rotation period. The method is reader-agnostic in principle, as it only requires time-stamped monitoring of a periodic RF observable at a selected frequency; however, experimental validation in this work is performed using a vector network analyzer (VNA). Experimental results obtained on a rotating platform with speeds ranging from 150 RPM to 4000 RPM demonstrate that the proposed method reduces the mean relative estimation error to below 5% across the full range, compared to errors exceeding 70% for conventional peak-based estimation above 1000 RPM. These results highlight the effectiveness of the ETS approach in extending the operational range of RF impedance-based rotational sensing under severe undersampling conditions. The proposed framework is generalizable to other periodic RF sensing configurations where signal periodicity can be exploited across multiple acquisition cycles.</description>
	<pubDate>2026-05-01</pubDate>

	<content:encoded><![CDATA[
	<p><b>Sensors, Vol. 26, Pages 2834: Wireless High Rotational Speed Assessment by Exploiting an RF Sensor Tag System and Equivalent-Time Reconstruction</b></p>
	<p>Sensors <a href="https://www.mdpi.com/1424-8220/26/9/2834">doi: 10.3390/s26092834</a></p>
	<p>Authors:
		Armin Gharibi
		Filippo Costa
		Simone Genovesi
		</p>
	<p>Rotational speed monitoring is essential in many industrial and electromechanical systems. This paper presents a rotational speed measurement method based on a wireless impedance sensing system leveraging the radio-frequency coupling between a passive resonant tag and a coplanar waveguide (CPW) probe. The sensing mechanism exploits periodic variations in the real part of the probe impedance caused by the relative alignment between the rotating tag and the stationary probe. While the impedance signal is inherently periodic, the usable speed range of sampling-based measurement systems is fundamentally constrained by their acquisition rate. To overcome this limitation without requiring higher-rate instrumentation, an equivalent-time sampling (ETS) reconstruction approach is proposed. Sparse and nonuniform impedance samples collected over multiple revolutions are mapped into an equivalent phase domain and combined to reconstruct the waveform associated with a single rotation period. The method is reader-agnostic in principle, as it only requires time-stamped monitoring of a periodic RF observable at a selected frequency; however, experimental validation in this work is performed using a vector network analyzer (VNA). Experimental results obtained on a rotating platform with speeds ranging from 150 RPM to 4000 RPM demonstrate that the proposed method reduces the mean relative estimation error to below 5% across the full range, compared to errors exceeding 70% for conventional peak-based estimation above 1000 RPM. These results highlight the effectiveness of the ETS approach in extending the operational range of RF impedance-based rotational sensing under severe undersampling conditions. The proposed framework is generalizable to other periodic RF sensing configurations where signal periodicity can be exploited across multiple acquisition cycles.</p>
	]]></content:encoded>

	<dc:title>Wireless High Rotational Speed Assessment by Exploiting an RF Sensor Tag System and Equivalent-Time Reconstruction</dc:title>
			<dc:creator>Armin Gharibi</dc:creator>
			<dc:creator>Filippo Costa</dc:creator>
			<dc:creator>Simone Genovesi</dc:creator>
		<dc:identifier>doi: 10.3390/s26092834</dc:identifier>
	<dc:source>Sensors</dc:source>
	<dc:date>2026-05-01</dc:date>

	<prism:publicationName>Sensors</prism:publicationName>
	<prism:publicationDate>2026-05-01</prism:publicationDate>
	<prism:volume>26</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>2834</prism:startingPage>
		<prism:doi>10.3390/s26092834</prism:doi>
	<prism:url>https://www.mdpi.com/1424-8220/26/9/2834</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1424-8220/26/9/2832">

	<title>Sensors, Vol. 26, Pages 2832: Governing Privacy-Preserving Face Recognition in Transport Infrastructures: A Comprehensive Review</title>
	<link>https://www.mdpi.com/1424-8220/26/9/2832</link>
	<description>Face recognition technologies are increasingly deployed in transport infrastructures to improve efficiency and security, but they raise significant privacy and data protection concerns. This study reviews how privacy-preserving face recognition techniques can address these challenges in real-world settings. Using a systematic literature review approach, the paper analyses research across technical, operational, and governance perspectives. The findings show that while advanced methods such as encryption, federated learning, and de-identification can reduce data exposure, they are rarely implemented in operational systems, which tend to prioritize performance and scalability. At the same time, governance-focused studies emphasize issues such as proportionality, accountability, and fundamental rights, often without clear links to technical solutions. Overall, the review highlights a fragmented landscape and a gap between research and practice, underscoring the need for integrated approaches that align privacy-preserving techniques with practical deployment constraints and regulatory requirements.</description>
	<pubDate>2026-05-01</pubDate>

	<content:encoded><![CDATA[
	<p><b>Sensors, Vol. 26, Pages 2832: Governing Privacy-Preserving Face Recognition in Transport Infrastructures: A Comprehensive Review</b></p>
	<p>Sensors <a href="https://www.mdpi.com/1424-8220/26/9/2832">doi: 10.3390/s26092832</a></p>
	<p>Authors:
		Eva María Benito Sanz
		Alba Gonzalo Gonzalo Primo
		Gaurav Choudhary
		Nicola Dragoni
		</p>
	<p>Face recognition technologies are increasingly deployed in transport infrastructures to improve efficiency and security, but they raise significant privacy and data protection concerns. This study reviews how privacy-preserving face recognition techniques can address these challenges in real-world settings. Using a systematic literature review approach, the paper analyses research across technical, operational, and governance perspectives. The findings show that while advanced methods such as encryption, federated learning, and de-identification can reduce data exposure, they are rarely implemented in operational systems, which tend to prioritize performance and scalability. At the same time, governance-focused studies emphasize issues such as proportionality, accountability, and fundamental rights, often without clear links to technical solutions. Overall, the review highlights a fragmented landscape and a gap between research and practice, underscoring the need for integrated approaches that align privacy-preserving techniques with practical deployment constraints and regulatory requirements.</p>
	]]></content:encoded>

	<dc:title>Governing Privacy-Preserving Face Recognition in Transport Infrastructures: A Comprehensive Review</dc:title>
			<dc:creator>Eva María Benito Sanz</dc:creator>
			<dc:creator>Alba Gonzalo Gonzalo Primo</dc:creator>
			<dc:creator>Gaurav Choudhary</dc:creator>
			<dc:creator>Nicola Dragoni</dc:creator>
		<dc:identifier>doi: 10.3390/s26092832</dc:identifier>
	<dc:source>Sensors</dc:source>
	<dc:date>2026-05-01</dc:date>

	<prism:publicationName>Sensors</prism:publicationName>
	<prism:publicationDate>2026-05-01</prism:publicationDate>
	<prism:volume>26</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Review</prism:section>
	<prism:startingPage>2832</prism:startingPage>
		<prism:doi>10.3390/s26092832</prism:doi>
	<prism:url>https://www.mdpi.com/1424-8220/26/9/2832</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1424-8220/26/9/2831">

	<title>Sensors, Vol. 26, Pages 2831: ACO-CLS: Ant Colony Optimization-Based Collaborative Localization and Search for Multi-Robot Systems</title>
	<link>https://www.mdpi.com/1424-8220/26/9/2831</link>
	<description>With the rapid development of robot technology, the multi-robot cooperation system has been widely used in rescue, monitoring, logistics, and other fields. Aiming at the key problems in multi-robot cooperative localization and target search, considering the search time, search mileage, and search risk, a cooperative localization and search algorithm based on ant colony optimization (ACO-CLS) is proposed based on the analysis of the target weight factor, the sensitivity of the number of robots, the adaptability of robot formation, and the sensitivity of robot speed. Firstly, a multi-sensor fusion localization algorithm based on IMU and UWB sensors is designed, and the error-state Kalman filter (ESKF) is used to achieve high-precision position estimation. Secondly, a dynamic grouping strategy based on weight is proposed to realize intelligent grouping based on target priority and robot position. Then, the ant colony algorithm is introduced to make path decisions, and the robot search is guided by pheromone updates and heuristic information. Finally, an intelligent reallocation mechanism after target discovery is designed to realize the dynamic optimization of resource allocation. The simulation results show that the proposed algorithm is superior to the traditional methods in terms of location accuracy, search efficiency, and system robustness, and has important theoretical value and application prospects.</description>
	<pubDate>2026-05-01</pubDate>

	<content:encoded><![CDATA[
	<p><b>Sensors, Vol. 26, Pages 2831: ACO-CLS: Ant Colony Optimization-Based Collaborative Localization and Search for Multi-Robot Systems</b></p>
	<p>Sensors <a href="https://www.mdpi.com/1424-8220/26/9/2831">doi: 10.3390/s26092831</a></p>
	<p>Authors:
		Zhengyang He
		Xiaojie Tang
		Fengyun Zhang
		</p>
	<p>With the rapid development of robot technology, the multi-robot cooperation system has been widely used in rescue, monitoring, logistics, and other fields. Aiming at the key problems in multi-robot cooperative localization and target search, considering the search time, search mileage, and search risk, a cooperative localization and search algorithm based on ant colony optimization (ACO-CLS) is proposed based on the analysis of the target weight factor, the sensitivity of the number of robots, the adaptability of robot formation, and the sensitivity of robot speed. Firstly, a multi-sensor fusion localization algorithm based on IMU and UWB sensors is designed, and the error-state Kalman filter (ESKF) is used to achieve high-precision position estimation. Secondly, a dynamic grouping strategy based on weight is proposed to realize intelligent grouping based on target priority and robot position. Then, the ant colony algorithm is introduced to make path decisions, and the robot search is guided by pheromone updates and heuristic information. Finally, an intelligent reallocation mechanism after target discovery is designed to realize the dynamic optimization of resource allocation. The simulation results show that the proposed algorithm is superior to the traditional methods in terms of location accuracy, search efficiency, and system robustness, and has important theoretical value and application prospects.</p>
	]]></content:encoded>

	<dc:title>ACO-CLS: Ant Colony Optimization-Based Collaborative Localization and Search for Multi-Robot Systems</dc:title>
			<dc:creator>Zhengyang He</dc:creator>
			<dc:creator>Xiaojie Tang</dc:creator>
			<dc:creator>Fengyun Zhang</dc:creator>
		<dc:identifier>doi: 10.3390/s26092831</dc:identifier>
	<dc:source>Sensors</dc:source>
	<dc:date>2026-05-01</dc:date>

	<prism:publicationName>Sensors</prism:publicationName>
	<prism:publicationDate>2026-05-01</prism:publicationDate>
	<prism:volume>26</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>2831</prism:startingPage>
		<prism:doi>10.3390/s26092831</prism:doi>
	<prism:url>https://www.mdpi.com/1424-8220/26/9/2831</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1424-8220/26/9/2829">

	<title>Sensors, Vol. 26, Pages 2829: Two-Dimensional DOA Estimation of Coherent Sources Based on Uniform Linear Electromagnetic Vector Sensor Array</title>
	<link>https://www.mdpi.com/1424-8220/26/9/2829</link>
	<description>In this paper, the two-dimensional DOA estimation problem of coherent signals in an electromagnetic vector sensor (EMVS) array is studied. A new decorrelation method is proposed by extending the multiple-Toeplitz matrices reconstruction (MTOEP) method to a polarization sensitive array. After that, a closed-form solution is derived based on the ESPRIT algorithm for DOA and polarization parameter estimation. Pairing matching is based on the correspondence between eigenvectors obtained by independent eigen-decomposition. Simulation results verify the effectiveness of the proposed work and show the proposed algorithm has better performance than the traditional spatial smoothing (SS) method, especially in scenarios with low signal-to-noise ratios (SNRs) and small angular separations.</description>
	<pubDate>2026-05-01</pubDate>

	<content:encoded><![CDATA[
	<p><b>Sensors, Vol. 26, Pages 2829: Two-Dimensional DOA Estimation of Coherent Sources Based on Uniform Linear Electromagnetic Vector Sensor Array</b></p>
	<p>Sensors <a href="https://www.mdpi.com/1424-8220/26/9/2829">doi: 10.3390/s26092829</a></p>
	<p>Authors:
		Jingxiang Zhang
		Xiang Lan
		Xianpeng Wang
		</p>
	<p>In this paper, the two-dimensional DOA estimation problem of coherent signals in an electromagnetic vector sensor (EMVS) array is studied. A new decorrelation method is proposed by extending the multiple-Toeplitz matrices reconstruction (MTOEP) method to a polarization sensitive array. After that, a closed-form solution is derived based on the ESPRIT algorithm for DOA and polarization parameter estimation. Pairing matching is based on the correspondence between eigenvectors obtained by independent eigen-decomposition. Simulation results verify the effectiveness of the proposed work and show the proposed algorithm has better performance than the traditional spatial smoothing (SS) method, especially in scenarios with low signal-to-noise ratios (SNRs) and small angular separations.</p>
	]]></content:encoded>

	<dc:title>Two-Dimensional DOA Estimation of Coherent Sources Based on Uniform Linear Electromagnetic Vector Sensor Array</dc:title>
			<dc:creator>Jingxiang Zhang</dc:creator>
			<dc:creator>Xiang Lan</dc:creator>
			<dc:creator>Xianpeng Wang</dc:creator>
		<dc:identifier>doi: 10.3390/s26092829</dc:identifier>
	<dc:source>Sensors</dc:source>
	<dc:date>2026-05-01</dc:date>

	<prism:publicationName>Sensors</prism:publicationName>
	<prism:publicationDate>2026-05-01</prism:publicationDate>
	<prism:volume>26</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>2829</prism:startingPage>
		<prism:doi>10.3390/s26092829</prism:doi>
	<prism:url>https://www.mdpi.com/1424-8220/26/9/2829</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1424-8220/26/9/2830">

	<title>Sensors, Vol. 26, Pages 2830: Uncertainty Analysis and Metrological Validation of Raman Distributed Temperature Measurements in a Full-Scale Test Facility</title>
	<link>https://www.mdpi.com/1424-8220/26/9/2830</link>
	<description>Raman Distributed Temperature Sensing (DTS) provides spatially distributed temperature measurements along optical fibers and is increasingly used for monitoring large-scale infrastructures and experimental facilities, enabling three-dimensional reconstruction of temperature fields. However, such measurements involve specific implementation constraints and may be affected by significant errors, with uncertainties influenced by factors such as calibration, environmental conditions, spatial resolution effects, and fiber positioning. Ensuring the metrological validity of Raman-based DTS measurements therefore requires a rigorous quantification of the associated measurement uncertainties. In this work, a complete uncertainty analysis of Raman-based DTS measurements is performed following the principles of the Guide to the Expression of Uncertainty in Measurement (GUM). A measurement model describing the relationship between Raman backscattered signals and temperature is established, and all relevant uncertainty sources are identified and quantified. The methodology is applied to a full-scale experimental facility equipped with a DTS interrogator and a dedicated calibration setup. Uncertainty propagation is performed using both first-order Taylor series expansion and Monte Carlo simulation, providing consistent results. The analysis shows that calibration uncertainty, spatial dispersion of the temperature field and fiber positioning within the reconstructed temperature field represent the dominant contributions to the combined uncertainty. The proposed approach provides a rigorous framework for the metrological qualification of Raman DTS systems and offers practical guidance for improving measurement reliability in distributed temperature monitoring applications.</description>
	<pubDate>2026-05-01</pubDate>

	<content:encoded><![CDATA[
	<p><b>Sensors, Vol. 26, Pages 2830: Uncertainty Analysis and Metrological Validation of Raman Distributed Temperature Measurements in a Full-Scale Test Facility</b></p>
	<p>Sensors <a href="https://www.mdpi.com/1424-8220/26/9/2830">doi: 10.3390/s26092830</a></p>
	<p>Authors:
		Maxime Houvin
		Rafik Moulouel
		Pascal Borel
		Didier Boldo
		</p>
	<p>Raman Distributed Temperature Sensing (DTS) provides spatially distributed temperature measurements along optical fibers and is increasingly used for monitoring large-scale infrastructures and experimental facilities, enabling three-dimensional reconstruction of temperature fields. However, such measurements involve specific implementation constraints and may be affected by significant errors, with uncertainties influenced by factors such as calibration, environmental conditions, spatial resolution effects, and fiber positioning. Ensuring the metrological validity of Raman-based DTS measurements therefore requires a rigorous quantification of the associated measurement uncertainties. In this work, a complete uncertainty analysis of Raman-based DTS measurements is performed following the principles of the Guide to the Expression of Uncertainty in Measurement (GUM). A measurement model describing the relationship between Raman backscattered signals and temperature is established, and all relevant uncertainty sources are identified and quantified. The methodology is applied to a full-scale experimental facility equipped with a DTS interrogator and a dedicated calibration setup. Uncertainty propagation is performed using both first-order Taylor series expansion and Monte Carlo simulation, providing consistent results. The analysis shows that calibration uncertainty, spatial dispersion of the temperature field and fiber positioning within the reconstructed temperature field represent the dominant contributions to the combined uncertainty. The proposed approach provides a rigorous framework for the metrological qualification of Raman DTS systems and offers practical guidance for improving measurement reliability in distributed temperature monitoring applications.</p>
	]]></content:encoded>

	<dc:title>Uncertainty Analysis and Metrological Validation of Raman Distributed Temperature Measurements in a Full-Scale Test Facility</dc:title>
			<dc:creator>Maxime Houvin</dc:creator>
			<dc:creator>Rafik Moulouel</dc:creator>
			<dc:creator>Pascal Borel</dc:creator>
			<dc:creator>Didier Boldo</dc:creator>
		<dc:identifier>doi: 10.3390/s26092830</dc:identifier>
	<dc:source>Sensors</dc:source>
	<dc:date>2026-05-01</dc:date>

	<prism:publicationName>Sensors</prism:publicationName>
	<prism:publicationDate>2026-05-01</prism:publicationDate>
	<prism:volume>26</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>2830</prism:startingPage>
		<prism:doi>10.3390/s26092830</prism:doi>
	<prism:url>https://www.mdpi.com/1424-8220/26/9/2830</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1424-8220/26/9/2827">

	<title>Sensors, Vol. 26, Pages 2827: Few-Shot Fault Diagnosis of Railway Switch Machines Using Regularized Supervised Contrastive Meta-Learning</title>
	<link>https://www.mdpi.com/1424-8220/26/9/2827</link>
	<description>Railway switch machines are key devices in railway signal systems and have a critical impact on train operation safety. However, in real operating conditions, fault samples are scarce because field data collection is cumbersome and often constrained by safety requirements, which limits the diagnostic accuracy and generalization capability of traditional fault diagnosis methods in few-shot scenarios. To address the challenge posed by insufficient accuracy in railway switch machine state recognition using sensors under few-shot conditions, we propose a regularized supervised contrastive meta-learning (RSCML) fault diagnosis method for switch machines. First, the tri-axial vibration signals acquired from the throwing rod and the reducer are transformed into axis-wise STFT spectrograms and organized as a unified three-channel time-frequency representation for subsequent cross-channel feature learning. Second, channel expansion and attention enhancement are employed to obtain more informative feature representations among similar fault types under limited samples. Finally, the feature extractor is integrated into the regularized supervised contrastive ANIL framework, while multi-loss optimization and stability regularization jointly constrain the meta-learning training process. Experimental results show that the proposed method achieves a maximum accuracy of 99.73% on 3-way and 5-way few-shot tasks, together with an F1-score of up to 99.72%. In the cross-category generalization experiment, it achieves a 93.08% accuracy and a 92.84% F1-score, indicating improved robustness when the fault categories at test time differ from those used during meta-training. The proposed method shows superior classification performance and stronger generalization to unseen fault categories under the current dataset setting, which suggests promising potential for switch machine fault diagnosis under limited sample conditions.</description>
	<pubDate>2026-05-01</pubDate>

	<content:encoded><![CDATA[
	<p><b>Sensors, Vol. 26, Pages 2827: Few-Shot Fault Diagnosis of Railway Switch Machines Using Regularized Supervised Contrastive Meta-Learning</b></p>
	<p>Sensors <a href="https://www.mdpi.com/1424-8220/26/9/2827">doi: 10.3390/s26092827</a></p>
	<p>Authors:
		Shanrong Li
		Qingsheng Feng
		Zhun Han
		Shuai Xiao
		Zhi Tao
		Yafei Wang
		Yiyang Zou
		Hong Li
		</p>
	<p>Railway switch machines are key devices in railway signal systems and have a critical impact on train operation safety. However, in real operating conditions, fault samples are scarce because field data collection is cumbersome and often constrained by safety requirements, which limits the diagnostic accuracy and generalization capability of traditional fault diagnosis methods in few-shot scenarios. To address the challenge posed by insufficient accuracy in railway switch machine state recognition using sensors under few-shot conditions, we propose a regularized supervised contrastive meta-learning (RSCML) fault diagnosis method for switch machines. First, the tri-axial vibration signals acquired from the throwing rod and the reducer are transformed into axis-wise STFT spectrograms and organized as a unified three-channel time-frequency representation for subsequent cross-channel feature learning. Second, channel expansion and attention enhancement are employed to obtain more informative feature representations among similar fault types under limited samples. Finally, the feature extractor is integrated into the regularized supervised contrastive ANIL framework, while multi-loss optimization and stability regularization jointly constrain the meta-learning training process. Experimental results show that the proposed method achieves a maximum accuracy of 99.73% on 3-way and 5-way few-shot tasks, together with an F1-score of up to 99.72%. In the cross-category generalization experiment, it achieves a 93.08% accuracy and a 92.84% F1-score, indicating improved robustness when the fault categories at test time differ from those used during meta-training. The proposed method shows superior classification performance and stronger generalization to unseen fault categories under the current dataset setting, which suggests promising potential for switch machine fault diagnosis under limited sample conditions.</p>
	]]></content:encoded>

	<dc:title>Few-Shot Fault Diagnosis of Railway Switch Machines Using Regularized Supervised Contrastive Meta-Learning</dc:title>
			<dc:creator>Shanrong Li</dc:creator>
			<dc:creator>Qingsheng Feng</dc:creator>
			<dc:creator>Zhun Han</dc:creator>
			<dc:creator>Shuai Xiao</dc:creator>
			<dc:creator>Zhi Tao</dc:creator>
			<dc:creator>Yafei Wang</dc:creator>
			<dc:creator>Yiyang Zou</dc:creator>
			<dc:creator>Hong Li</dc:creator>
		<dc:identifier>doi: 10.3390/s26092827</dc:identifier>
	<dc:source>Sensors</dc:source>
	<dc:date>2026-05-01</dc:date>

	<prism:publicationName>Sensors</prism:publicationName>
	<prism:publicationDate>2026-05-01</prism:publicationDate>
	<prism:volume>26</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>2827</prism:startingPage>
		<prism:doi>10.3390/s26092827</prism:doi>
	<prism:url>https://www.mdpi.com/1424-8220/26/9/2827</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1424-8220/26/9/2828">

	<title>Sensors, Vol. 26, Pages 2828: Progress and Challenges in Joining for Precision Endoscope Fabrication</title>
	<link>https://www.mdpi.com/1424-8220/26/9/2828</link>
	<description>This review summarizes the base materials, joining methods, filler materials, and principal technical challenges in endoscope joining fabrication, and proposes practical strategies to improve joint reliability under clinical constraints. We conducted a comprehensive search in multiple databases, including Web of Science, Google Scholar, patent databases, Scopus databases, and Medline (via PubMed), for articles on the joining for precision endoscope fabrication, covering the period from 1950 to 2026. We employed the combinations of keywords, &amp;amp;ldquo;endoscopy&amp;amp;rdquo;, &amp;amp;ldquo;minimally invasive surgery&amp;amp;rdquo;, &amp;amp;ldquo;welding&amp;amp;rdquo;, &amp;amp;ldquo;joining&amp;amp;rdquo;, &amp;amp;ldquo;sealing&amp;amp;rdquo;, &amp;amp;ldquo;soldering&amp;amp;rdquo;, &amp;amp;ldquo;bonding&amp;amp;rdquo;, and &amp;amp;ldquo;brazing&amp;amp;rdquo;. Approximately 500 references were retrieved. After excluding duplicates and irrelevant studies, 158 publications met the inclusion criteria. Data on base materials, joining, processes, filler materials, and technical issues related to sterilization, corrosion, and microstructural evolution were extracted and analyzed. Endoscopes are multi-material systems, involving metallic biomaterials (stainless steels (SSs), titanium alloys, nickel-based alloys, etc.), optical functional materials (glass, sapphire, quartz, etc.), engineering plastics, ceramics, composite materials, and coatings. Joining, sealing, and functional integration have been achieved via adhesive bonding, laser soldering, laser brazing, wave soldering, reflow soldering, fusion welding, and other joining techniques. The main challenges include how to reliably join highly mismatched dissimilar materials, how to fabricate low-residual-stress joints, and how to increase the long-term resistance to sterilization-induced degradation and thermal aging over repeated 100&amp;amp;ndash;200 &amp;amp;deg;C thermal cycles. Conventional joining techniques struggle to balance mechanical integrity, joint hermeticity, and long-term stability under such harsh cyclic conditions. The resulting joints may suffer surface yellowing, interfacial debonding, microcracking, delamination, or progressive property degradation during service. We propose the following three strategies to achieve reliable, low-residual-stress, and sterilization-resistant joining of dissimilar materials for endoscopes: (1) A synergistic design that combines thin-film engineering (including evaporation, sputtering, and electroplating) with silver anti-oxidation layers is proposed to reduce residual stresses and to enhance the joint hermeticity. (2) To develop principles for the selection of multi-joining processes to achieve the multi-material integration and functional assembly of dissimilar material components. (3) To develop the laser-based joining methods (fusion, brazing, or braze-welding) for precision control of heat input, bonding quality, and the least damage to the heat-sensitive components.</description>
	<pubDate>2026-05-01</pubDate>

	<content:encoded><![CDATA[
	<p><b>Sensors, Vol. 26, Pages 2828: Progress and Challenges in Joining for Precision Endoscope Fabrication</b></p>
	<p>Sensors <a href="https://www.mdpi.com/1424-8220/26/9/2828">doi: 10.3390/s26092828</a></p>
	<p>Authors:
		Peiquan Xu
		Xiaohao Zheng
		Leijun Li
		Ziyi Wang
		</p>
	<p>This review summarizes the base materials, joining methods, filler materials, and principal technical challenges in endoscope joining fabrication, and proposes practical strategies to improve joint reliability under clinical constraints. We conducted a comprehensive search in multiple databases, including Web of Science, Google Scholar, patent databases, Scopus databases, and Medline (via PubMed), for articles on the joining for precision endoscope fabrication, covering the period from 1950 to 2026. We employed the combinations of keywords, &amp;amp;ldquo;endoscopy&amp;amp;rdquo;, &amp;amp;ldquo;minimally invasive surgery&amp;amp;rdquo;, &amp;amp;ldquo;welding&amp;amp;rdquo;, &amp;amp;ldquo;joining&amp;amp;rdquo;, &amp;amp;ldquo;sealing&amp;amp;rdquo;, &amp;amp;ldquo;soldering&amp;amp;rdquo;, &amp;amp;ldquo;bonding&amp;amp;rdquo;, and &amp;amp;ldquo;brazing&amp;amp;rdquo;. Approximately 500 references were retrieved. After excluding duplicates and irrelevant studies, 158 publications met the inclusion criteria. Data on base materials, joining, processes, filler materials, and technical issues related to sterilization, corrosion, and microstructural evolution were extracted and analyzed. Endoscopes are multi-material systems, involving metallic biomaterials (stainless steels (SSs), titanium alloys, nickel-based alloys, etc.), optical functional materials (glass, sapphire, quartz, etc.), engineering plastics, ceramics, composite materials, and coatings. Joining, sealing, and functional integration have been achieved via adhesive bonding, laser soldering, laser brazing, wave soldering, reflow soldering, fusion welding, and other joining techniques. The main challenges include how to reliably join highly mismatched dissimilar materials, how to fabricate low-residual-stress joints, and how to increase the long-term resistance to sterilization-induced degradation and thermal aging over repeated 100&amp;amp;ndash;200 &amp;amp;deg;C thermal cycles. Conventional joining techniques struggle to balance mechanical integrity, joint hermeticity, and long-term stability under such harsh cyclic conditions. The resulting joints may suffer surface yellowing, interfacial debonding, microcracking, delamination, or progressive property degradation during service. We propose the following three strategies to achieve reliable, low-residual-stress, and sterilization-resistant joining of dissimilar materials for endoscopes: (1) A synergistic design that combines thin-film engineering (including evaporation, sputtering, and electroplating) with silver anti-oxidation layers is proposed to reduce residual stresses and to enhance the joint hermeticity. (2) To develop principles for the selection of multi-joining processes to achieve the multi-material integration and functional assembly of dissimilar material components. (3) To develop the laser-based joining methods (fusion, brazing, or braze-welding) for precision control of heat input, bonding quality, and the least damage to the heat-sensitive components.</p>
	]]></content:encoded>

	<dc:title>Progress and Challenges in Joining for Precision Endoscope Fabrication</dc:title>
			<dc:creator>Peiquan Xu</dc:creator>
			<dc:creator>Xiaohao Zheng</dc:creator>
			<dc:creator>Leijun Li</dc:creator>
			<dc:creator>Ziyi Wang</dc:creator>
		<dc:identifier>doi: 10.3390/s26092828</dc:identifier>
	<dc:source>Sensors</dc:source>
	<dc:date>2026-05-01</dc:date>

	<prism:publicationName>Sensors</prism:publicationName>
	<prism:publicationDate>2026-05-01</prism:publicationDate>
	<prism:volume>26</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Review</prism:section>
	<prism:startingPage>2828</prism:startingPage>
		<prism:doi>10.3390/s26092828</prism:doi>
	<prism:url>https://www.mdpi.com/1424-8220/26/9/2828</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1424-8220/26/9/2826">

	<title>Sensors, Vol. 26, Pages 2826: Dynamic Graph Neural Network for Vehicle Trajectory Prediction and Driving Intent Recognition</title>
	<link>https://www.mdpi.com/1424-8220/26/9/2826</link>
	<description>To address the limitations of existing vehicle trajectory prediction methods, including insufficient modeling of dynamic inter-vehicle interactions, weak temporal continuity of complex driving intentions such as lane-changing, and high uncertainty in future trajectory prediction, this paper proposes a vehicle trajectory prediction method that integrates Dynamic Graph Neural Networks (DyGNN) with Transformer. Specifically, a time-varying interaction graph is constructed to model the dynamically evolving topological interaction relationships among vehicles, while a Transformer encoder is employed to extract temporal dependency features from historical trajectory sequences. In this way, the joint representation of spatial interaction information and temporal evolution information is achieved, thereby improving the accuracy and continuity of driving intention recognition in complex traffic scenarios. On this basis, driving intention is further introduced into the trajectory prediction process as a prior constraint, which effectively reduces the uncertainty of future trajectory prediction. Comparative experiments on real-world traffic datasets demonstrate that the proposed method maintains low prediction errors across different prediction horizons, showing good effectiveness and robustness.</description>
	<pubDate>2026-05-01</pubDate>

	<content:encoded><![CDATA[
	<p><b>Sensors, Vol. 26, Pages 2826: Dynamic Graph Neural Network for Vehicle Trajectory Prediction and Driving Intent Recognition</b></p>
	<p>Sensors <a href="https://www.mdpi.com/1424-8220/26/9/2826">doi: 10.3390/s26092826</a></p>
	<p>Authors:
		Shaobo Wu
		Yuxuan Wang
		Yi Gong
		</p>
	<p>To address the limitations of existing vehicle trajectory prediction methods, including insufficient modeling of dynamic inter-vehicle interactions, weak temporal continuity of complex driving intentions such as lane-changing, and high uncertainty in future trajectory prediction, this paper proposes a vehicle trajectory prediction method that integrates Dynamic Graph Neural Networks (DyGNN) with Transformer. Specifically, a time-varying interaction graph is constructed to model the dynamically evolving topological interaction relationships among vehicles, while a Transformer encoder is employed to extract temporal dependency features from historical trajectory sequences. In this way, the joint representation of spatial interaction information and temporal evolution information is achieved, thereby improving the accuracy and continuity of driving intention recognition in complex traffic scenarios. On this basis, driving intention is further introduced into the trajectory prediction process as a prior constraint, which effectively reduces the uncertainty of future trajectory prediction. Comparative experiments on real-world traffic datasets demonstrate that the proposed method maintains low prediction errors across different prediction horizons, showing good effectiveness and robustness.</p>
	]]></content:encoded>

	<dc:title>Dynamic Graph Neural Network for Vehicle Trajectory Prediction and Driving Intent Recognition</dc:title>
			<dc:creator>Shaobo Wu</dc:creator>
			<dc:creator>Yuxuan Wang</dc:creator>
			<dc:creator>Yi Gong</dc:creator>
		<dc:identifier>doi: 10.3390/s26092826</dc:identifier>
	<dc:source>Sensors</dc:source>
	<dc:date>2026-05-01</dc:date>

	<prism:publicationName>Sensors</prism:publicationName>
	<prism:publicationDate>2026-05-01</prism:publicationDate>
	<prism:volume>26</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>2826</prism:startingPage>
		<prism:doi>10.3390/s26092826</prism:doi>
	<prism:url>https://www.mdpi.com/1424-8220/26/9/2826</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1424-8220/26/9/2825">

	<title>Sensors, Vol. 26, Pages 2825: Fuzzy Vaults in Biometric Cryptosystems: A Survey of Techniques, Performance, and Applications</title>
	<link>https://www.mdpi.com/1424-8220/26/9/2825</link>
	<description>Biometric sensing systems enable accurate identity recognition using unique physiological traits. These systems can be unimodal (single trait) or multimodal (multiple traits, such as iris and fingerprint). Biometric templates, digital representations of these traits, enhance security over traditional methods but are vulnerable to attacks. Unlike passwords, compromised templates cannot be replaced, necessitating robust protection. Various security schemes exist, including cancellable biometrics, biometric cryptosystems, sensing technology, and biometrics in the encrypted domain. Cancellable biometrics apply transformations, such as biometric salting, to obscure the original data. Biometric cryptosystems integrate cryptographic techniques, including key generation and key binding, to enhance security. Biometrics in the encrypted domain, such as homomorphic encryption, ensures data remains encrypted during storage and computation. This survey focuses on the fuzzy vault method, a key-binding biometric cryptosystem. It analyses its applications, security performance, and associated challenges across different domains. By analysing advancements in fuzzy vault mechanisms, this paper provides insights into enhancing sensor-based biometric security. The study aims to serve as a reference for researchers exploring secure and efficient biometric authentication methods, ensuring robust protection against unauthorised access while maintaining the integrity and usability of biometric data in real-world applications.</description>
	<pubDate>2026-05-01</pubDate>

	<content:encoded><![CDATA[
	<p><b>Sensors, Vol. 26, Pages 2825: Fuzzy Vaults in Biometric Cryptosystems: A Survey of Techniques, Performance, and Applications</b></p>
	<p>Sensors <a href="https://www.mdpi.com/1424-8220/26/9/2825">doi: 10.3390/s26092825</a></p>
	<p>Authors:
		Faria Farheen
		Woo Yeol Yang
		Sparsh Sharma
		Saurabh Singh
		</p>
	<p>Biometric sensing systems enable accurate identity recognition using unique physiological traits. These systems can be unimodal (single trait) or multimodal (multiple traits, such as iris and fingerprint). Biometric templates, digital representations of these traits, enhance security over traditional methods but are vulnerable to attacks. Unlike passwords, compromised templates cannot be replaced, necessitating robust protection. Various security schemes exist, including cancellable biometrics, biometric cryptosystems, sensing technology, and biometrics in the encrypted domain. Cancellable biometrics apply transformations, such as biometric salting, to obscure the original data. Biometric cryptosystems integrate cryptographic techniques, including key generation and key binding, to enhance security. Biometrics in the encrypted domain, such as homomorphic encryption, ensures data remains encrypted during storage and computation. This survey focuses on the fuzzy vault method, a key-binding biometric cryptosystem. It analyses its applications, security performance, and associated challenges across different domains. By analysing advancements in fuzzy vault mechanisms, this paper provides insights into enhancing sensor-based biometric security. The study aims to serve as a reference for researchers exploring secure and efficient biometric authentication methods, ensuring robust protection against unauthorised access while maintaining the integrity and usability of biometric data in real-world applications.</p>
	]]></content:encoded>

	<dc:title>Fuzzy Vaults in Biometric Cryptosystems: A Survey of Techniques, Performance, and Applications</dc:title>
			<dc:creator>Faria Farheen</dc:creator>
			<dc:creator>Woo Yeol Yang</dc:creator>
			<dc:creator>Sparsh Sharma</dc:creator>
			<dc:creator>Saurabh Singh</dc:creator>
		<dc:identifier>doi: 10.3390/s26092825</dc:identifier>
	<dc:source>Sensors</dc:source>
	<dc:date>2026-05-01</dc:date>

	<prism:publicationName>Sensors</prism:publicationName>
	<prism:publicationDate>2026-05-01</prism:publicationDate>
	<prism:volume>26</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Review</prism:section>
	<prism:startingPage>2825</prism:startingPage>
		<prism:doi>10.3390/s26092825</prism:doi>
	<prism:url>https://www.mdpi.com/1424-8220/26/9/2825</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1424-8220/26/9/2824">

	<title>Sensors, Vol. 26, Pages 2824: Domain-Adversarial Neural Network for UWB NLOS Identification in Multiple Environments</title>
	<link>https://www.mdpi.com/1424-8220/26/9/2824</link>
	<description>Accurate recognition of Line-of-Sight (LOS) and Non-Line-of-Sight (NLOS) signals is crucial for mitigating positioning errors and improving the positioning performance of Ultra-Wideband (UWB) localization systems. Current NLOS identification methods are limited to the specific measurement environments and fail to exhibit effective cross-domain adaptability, being unable to generalize to unseen environments. To address these challenges, we propose a novel NLOS identification strategy based on a Domain-Adversarial Neural Network (DANN). Firstly, aiming at the problem that traditional feature extraction methods fail to capture the deep nonlinear characteristics of Channel Impulse Response (CIR) data, we develop a CNN-DAE-MLP-Attention (CDM) hybrid model for high-quality channel feature extraction, which takes both raw CIR data and handcrafted channel features into account. Secondly, we integrate the CDM model into the DANN framework by replacing its original shallow feature extraction module to further propose the CDMD algorithm; by combining the robust feature representation capability of CDM with the excellent domain adaptation capability of DANN, the proposed CDMD algorithm achieves enhanced performance in cross-domain LOS/NLOS identification. Finally, the effectiveness of the proposed algorithm is verified using measured data from different scenarios. Results demonstrate that the proposed algorithm possesses strong generalization ability. For cross-domain NLOS recognition from underground parking garage to corridor and underground parking garage to lobby, the proposed method achieves accuracies of 77.00% and 72.84%, respectively. Moreover, the results indicate that only a limited number of target-domain samples are sufficient for the model to achieve accurate cross-domain transfer.</description>
	<pubDate>2026-05-01</pubDate>

	<content:encoded><![CDATA[
	<p><b>Sensors, Vol. 26, Pages 2824: Domain-Adversarial Neural Network for UWB NLOS Identification in Multiple Environments</b></p>
	<p>Sensors <a href="https://www.mdpi.com/1424-8220/26/9/2824">doi: 10.3390/s26092824</a></p>
	<p>Authors:
		Suying Jiang
		Jiachun Li
		Yadong Xu
		Yuyang Rong
		</p>
	<p>Accurate recognition of Line-of-Sight (LOS) and Non-Line-of-Sight (NLOS) signals is crucial for mitigating positioning errors and improving the positioning performance of Ultra-Wideband (UWB) localization systems. Current NLOS identification methods are limited to the specific measurement environments and fail to exhibit effective cross-domain adaptability, being unable to generalize to unseen environments. To address these challenges, we propose a novel NLOS identification strategy based on a Domain-Adversarial Neural Network (DANN). Firstly, aiming at the problem that traditional feature extraction methods fail to capture the deep nonlinear characteristics of Channel Impulse Response (CIR) data, we develop a CNN-DAE-MLP-Attention (CDM) hybrid model for high-quality channel feature extraction, which takes both raw CIR data and handcrafted channel features into account. Secondly, we integrate the CDM model into the DANN framework by replacing its original shallow feature extraction module to further propose the CDMD algorithm; by combining the robust feature representation capability of CDM with the excellent domain adaptation capability of DANN, the proposed CDMD algorithm achieves enhanced performance in cross-domain LOS/NLOS identification. Finally, the effectiveness of the proposed algorithm is verified using measured data from different scenarios. Results demonstrate that the proposed algorithm possesses strong generalization ability. For cross-domain NLOS recognition from underground parking garage to corridor and underground parking garage to lobby, the proposed method achieves accuracies of 77.00% and 72.84%, respectively. Moreover, the results indicate that only a limited number of target-domain samples are sufficient for the model to achieve accurate cross-domain transfer.</p>
	]]></content:encoded>

	<dc:title>Domain-Adversarial Neural Network for UWB NLOS Identification in Multiple Environments</dc:title>
			<dc:creator>Suying Jiang</dc:creator>
			<dc:creator>Jiachun Li</dc:creator>
			<dc:creator>Yadong Xu</dc:creator>
			<dc:creator>Yuyang Rong</dc:creator>
		<dc:identifier>doi: 10.3390/s26092824</dc:identifier>
	<dc:source>Sensors</dc:source>
	<dc:date>2026-05-01</dc:date>

	<prism:publicationName>Sensors</prism:publicationName>
	<prism:publicationDate>2026-05-01</prism:publicationDate>
	<prism:volume>26</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>2824</prism:startingPage>
		<prism:doi>10.3390/s26092824</prism:doi>
	<prism:url>https://www.mdpi.com/1424-8220/26/9/2824</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1424-8220/26/9/2823">

	<title>Sensors, Vol. 26, Pages 2823: DeDiAttack: Enhancing Transferability of Unrestricted Adversarial Examples via Deformation-Constrained Diffusion</title>
	<link>https://www.mdpi.com/1424-8220/26/9/2823</link>
	<description>DNNs are highly vulnerable to adversarial examples (AEs). To achieve high transferability, traditional AEs often introduce unnatural artifacts that are easily perceptible to the human eye. Unrestricted attacks have emerged as a promising paradigm to generate more natural unrestricted adversarial examples (UAEs). However, existing UAEs struggle to balance visual fidelity and black-box transferability. Color-based attacks produce noticeable unnatural visual mutations, and diffusion-based attacks transfer poorly to unknown black-box models. We observe that directly injecting unconstrained random perturbations into the diffusion latent space destroys the normal distribution of data, thereby causing a distribution shift. Distribution shifts degrade adversarial perturbations into invalid noise and cause surrogate model overfitting. Furthermore, introducing elastic deformation during the denoising process forces surrogate models to focus on highly transferable features. As a result, we propose an unrestricted attack based on deformation-constrained diffusion, called DeDiAttack. Our method utilizes the manifold prior knowledge of diffusion models to translate elastic deformations into smooth fluid changes. The mechanism effectively eliminates unnatural artifacts and generates highly natural and transferable UAEs. Extensive black-box experiments demonstrate that DeDiAttack outperforms existing attacks and improves the black-box transferability of generated UAEs by 7.2% on the ViT-B surrogate model. The proposed method also provides a useful robustness evaluation tool for vision-based sensing and imaging systems.</description>
	<pubDate>2026-05-01</pubDate>

	<content:encoded><![CDATA[
	<p><b>Sensors, Vol. 26, Pages 2823: DeDiAttack: Enhancing Transferability of Unrestricted Adversarial Examples via Deformation-Constrained Diffusion</b></p>
	<p>Sensors <a href="https://www.mdpi.com/1424-8220/26/9/2823">doi: 10.3390/s26092823</a></p>
	<p>Authors:
		Bin Qu
		Anjie Peng
		Shijie Zhao
		</p>
	<p>DNNs are highly vulnerable to adversarial examples (AEs). To achieve high transferability, traditional AEs often introduce unnatural artifacts that are easily perceptible to the human eye. Unrestricted attacks have emerged as a promising paradigm to generate more natural unrestricted adversarial examples (UAEs). However, existing UAEs struggle to balance visual fidelity and black-box transferability. Color-based attacks produce noticeable unnatural visual mutations, and diffusion-based attacks transfer poorly to unknown black-box models. We observe that directly injecting unconstrained random perturbations into the diffusion latent space destroys the normal distribution of data, thereby causing a distribution shift. Distribution shifts degrade adversarial perturbations into invalid noise and cause surrogate model overfitting. Furthermore, introducing elastic deformation during the denoising process forces surrogate models to focus on highly transferable features. As a result, we propose an unrestricted attack based on deformation-constrained diffusion, called DeDiAttack. Our method utilizes the manifold prior knowledge of diffusion models to translate elastic deformations into smooth fluid changes. The mechanism effectively eliminates unnatural artifacts and generates highly natural and transferable UAEs. Extensive black-box experiments demonstrate that DeDiAttack outperforms existing attacks and improves the black-box transferability of generated UAEs by 7.2% on the ViT-B surrogate model. The proposed method also provides a useful robustness evaluation tool for vision-based sensing and imaging systems.</p>
	]]></content:encoded>

	<dc:title>DeDiAttack: Enhancing Transferability of Unrestricted Adversarial Examples via Deformation-Constrained Diffusion</dc:title>
			<dc:creator>Bin Qu</dc:creator>
			<dc:creator>Anjie Peng</dc:creator>
			<dc:creator>Shijie Zhao</dc:creator>
		<dc:identifier>doi: 10.3390/s26092823</dc:identifier>
	<dc:source>Sensors</dc:source>
	<dc:date>2026-05-01</dc:date>

	<prism:publicationName>Sensors</prism:publicationName>
	<prism:publicationDate>2026-05-01</prism:publicationDate>
	<prism:volume>26</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>2823</prism:startingPage>
		<prism:doi>10.3390/s26092823</prism:doi>
	<prism:url>https://www.mdpi.com/1424-8220/26/9/2823</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1424-8220/26/9/2822">

	<title>Sensors, Vol. 26, Pages 2822: Multi-Chaotic HEOA for Hardware-Aware Neural Architecture Search: Brain Tumor Classification on FPGA</title>
	<link>https://www.mdpi.com/1424-8220/26/9/2822</link>
	<description>Automated brain tumor classification from MRI scans requires optimized CNN architectures deployable on embedded FPGA platforms. This paper presents an integrated approach combining the Multi-Chaotic Enhanced HEOA (MC-HEOA) for automatic CNN architecture discovery with deployment validation on a Xilinx Zynq-7000 FPGA. A CEC2023 benchmark across 10 test functions evaluates 6 chaotic maps and selects the Tent map as the optimal diversity generator. The NAS search space spans a massive combinatorial space of 1.31&amp;amp;nbsp;&amp;amp;times;&amp;amp;nbsp;1016 configurations encoding architectural choices (layers, convolutions, channels, pooling) under a strict constraint of fewer than one million parameters for FPGA compatibility. The optimal discovered architecture, trained and evaluated using single-channel grayscale input (224 &amp;amp;times; 224 &amp;amp;times; 1)&amp;amp;mdash;the natural representation for intrinsically monochromatic MRI data&amp;amp;mdash; achieves 91.33% test accuracy and 92.44% validation accuracy with 724,200 parameters on the 4-class Brain Tumor MRI dataset (glioma, meningioma, pituitary, no tumor). HLS synthesis on the Zynq-7000 (xc7z020clg484-1) validates embedded deployment feasibility, with DSP utilization of 16%, LUT utilization of 57%, FF utilization of 28%, and an inference latency of 374 ms at 100 MHz. This study demonstrates the effectiveness of MC-HEOA for discovering compact, high-performing CNN architectures compatible with FPGA deployment, opening new perspectives for real-time embedded medical diagnosis.</description>
	<pubDate>2026-05-01</pubDate>

	<content:encoded><![CDATA[
	<p><b>Sensors, Vol. 26, Pages 2822: Multi-Chaotic HEOA for Hardware-Aware Neural Architecture Search: Brain Tumor Classification on FPGA</b></p>
	<p>Sensors <a href="https://www.mdpi.com/1424-8220/26/9/2822">doi: 10.3390/s26092822</a></p>
	<p>Authors:
		Ismail Mchichou
		Hamza Tahiri
		Mohamed Amine Tahiri
		Hicham Amakdouf
		</p>
	<p>Automated brain tumor classification from MRI scans requires optimized CNN architectures deployable on embedded FPGA platforms. This paper presents an integrated approach combining the Multi-Chaotic Enhanced HEOA (MC-HEOA) for automatic CNN architecture discovery with deployment validation on a Xilinx Zynq-7000 FPGA. A CEC2023 benchmark across 10 test functions evaluates 6 chaotic maps and selects the Tent map as the optimal diversity generator. The NAS search space spans a massive combinatorial space of 1.31&amp;amp;nbsp;&amp;amp;times;&amp;amp;nbsp;1016 configurations encoding architectural choices (layers, convolutions, channels, pooling) under a strict constraint of fewer than one million parameters for FPGA compatibility. The optimal discovered architecture, trained and evaluated using single-channel grayscale input (224 &amp;amp;times; 224 &amp;amp;times; 1)&amp;amp;mdash;the natural representation for intrinsically monochromatic MRI data&amp;amp;mdash; achieves 91.33% test accuracy and 92.44% validation accuracy with 724,200 parameters on the 4-class Brain Tumor MRI dataset (glioma, meningioma, pituitary, no tumor). HLS synthesis on the Zynq-7000 (xc7z020clg484-1) validates embedded deployment feasibility, with DSP utilization of 16%, LUT utilization of 57%, FF utilization of 28%, and an inference latency of 374 ms at 100 MHz. This study demonstrates the effectiveness of MC-HEOA for discovering compact, high-performing CNN architectures compatible with FPGA deployment, opening new perspectives for real-time embedded medical diagnosis.</p>
	]]></content:encoded>

	<dc:title>Multi-Chaotic HEOA for Hardware-Aware Neural Architecture Search: Brain Tumor Classification on FPGA</dc:title>
			<dc:creator>Ismail Mchichou</dc:creator>
			<dc:creator>Hamza Tahiri</dc:creator>
			<dc:creator>Mohamed Amine Tahiri</dc:creator>
			<dc:creator>Hicham Amakdouf</dc:creator>
		<dc:identifier>doi: 10.3390/s26092822</dc:identifier>
	<dc:source>Sensors</dc:source>
	<dc:date>2026-05-01</dc:date>

	<prism:publicationName>Sensors</prism:publicationName>
	<prism:publicationDate>2026-05-01</prism:publicationDate>
	<prism:volume>26</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>2822</prism:startingPage>
		<prism:doi>10.3390/s26092822</prism:doi>
	<prism:url>https://www.mdpi.com/1424-8220/26/9/2822</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1424-8220/26/9/2821">

	<title>Sensors, Vol. 26, Pages 2821: Extended Field of View and Resolution Enhancement in Lensless Digital Holography</title>
	<link>https://www.mdpi.com/1424-8220/26/9/2821</link>
	<description>Lensless digital holography provides a simple, low-cost imaging platform with a large field of view (FOV) and quantitative phase capability, making it attractive for biomedical imaging, microstructure inspection, and large area imaging. However, the achievable FOV is still limited by sensor size, and in-line reconstruction suffers from twin-image artifacts that degrade image quality. To overcome these limitations, this study proposes an extended FOV lensless digital holography method that combines hologram stitching with multi-depth phase retrieval. Multiple holograms acquired from laterally shifted FOVs are stitched to form an extended hologram, while holograms recorded at multiple axial depths are used to suppress twin-image artifacts and improve reconstruction fidelity. Experimental results show that the proposed method effectively expands the imaging area, enhances effective resolution by integrating complementary diffraction information from different FOVs, and improves image contrast and feature visibility. This approach enables extended FOV, resolution enhancement, and high-quality holographic imaging while preserving the simple lensless digital holography architecture.</description>
	<pubDate>2026-04-30</pubDate>

	<content:encoded><![CDATA[
	<p><b>Sensors, Vol. 26, Pages 2821: Extended Field of View and Resolution Enhancement in Lensless Digital Holography</b></p>
	<p>Sensors <a href="https://www.mdpi.com/1424-8220/26/9/2821">doi: 10.3390/s26092821</a></p>
	<p>Authors:
		Chung-Hsuan Huang
		Chih-Cheng Hsu
		Huai-Che Chu
		Chau-Jern Cheng
		Han-Yen Tu
		</p>
	<p>Lensless digital holography provides a simple, low-cost imaging platform with a large field of view (FOV) and quantitative phase capability, making it attractive for biomedical imaging, microstructure inspection, and large area imaging. However, the achievable FOV is still limited by sensor size, and in-line reconstruction suffers from twin-image artifacts that degrade image quality. To overcome these limitations, this study proposes an extended FOV lensless digital holography method that combines hologram stitching with multi-depth phase retrieval. Multiple holograms acquired from laterally shifted FOVs are stitched to form an extended hologram, while holograms recorded at multiple axial depths are used to suppress twin-image artifacts and improve reconstruction fidelity. Experimental results show that the proposed method effectively expands the imaging area, enhances effective resolution by integrating complementary diffraction information from different FOVs, and improves image contrast and feature visibility. This approach enables extended FOV, resolution enhancement, and high-quality holographic imaging while preserving the simple lensless digital holography architecture.</p>
	]]></content:encoded>

	<dc:title>Extended Field of View and Resolution Enhancement in Lensless Digital Holography</dc:title>
			<dc:creator>Chung-Hsuan Huang</dc:creator>
			<dc:creator>Chih-Cheng Hsu</dc:creator>
			<dc:creator>Huai-Che Chu</dc:creator>
			<dc:creator>Chau-Jern Cheng</dc:creator>
			<dc:creator>Han-Yen Tu</dc:creator>
		<dc:identifier>doi: 10.3390/s26092821</dc:identifier>
	<dc:source>Sensors</dc:source>
	<dc:date>2026-04-30</dc:date>

	<prism:publicationName>Sensors</prism:publicationName>
	<prism:publicationDate>2026-04-30</prism:publicationDate>
	<prism:volume>26</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>2821</prism:startingPage>
		<prism:doi>10.3390/s26092821</prism:doi>
	<prism:url>https://www.mdpi.com/1424-8220/26/9/2821</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1424-8220/26/9/2820">

	<title>Sensors, Vol. 26, Pages 2820: Multi-Source Aero-Engine Fault Diagnosis Using Explainable Boosted Tree with Spatiotemporal Attention and Adaptive Feature Selection</title>
	<link>https://www.mdpi.com/1424-8220/26/9/2820</link>
	<description>Faults in aero-engine rotating components account for more than 60% of total failures, and their early features are easily masked by noise under complex conditions. Traditional single-sensor diagnosis suffers from low feature utilization, poor interpretability, and weak cross-condition generalization. This paper proposes a multi-source fault diagnosis method for aero-engines based on an explainable boosted tree, integrating spatiotemporal attention (STA) and adaptive feature selection (AFS). We collect multi-domain data from four standard core sensors widely used in existing engine health management systems and extract multi-dimensional features to build a heterogeneous feature set. Adaptive feature selection is implemented using mutual information and a variance inflation factor. A spatiotemporal attention mechanism is introduced to weight and fuse features effectively. The fused features are used to train an XGBoost classifier, and SHAP values are adopted to quantify feature contributions and improve model interpretability. Uncertainty sources and sensitivity boundaries are quantitatively analyzed to support engineering acceptance. The method achieves high sensitivity to early weak faults and stable uncertainty under complex operating conditions. Tests on a fault simulation test rig show that the proposed method achieves 99.2% diagnosis accuracy and 97.5% cross-condition generalization accuracy, outperforming conventional models. It can identify early weak fault signatures, clarify key fault indicators, and provide a quantitative basis for fault tracing and maintenance decision-making. The method employs a standard sensor suite without additional hardware costs, features lightweight computation and low inference overhead, and delivers clear economic benefits by reducing false alarms, avoiding unplanned downtime, and optimizing maintenance resources. It offers a reliable, cost-effective solution for aero-engine fault diagnosis under complex operating conditions.</description>
	<pubDate>2026-04-30</pubDate>

	<content:encoded><![CDATA[
	<p><b>Sensors, Vol. 26, Pages 2820: Multi-Source Aero-Engine Fault Diagnosis Using Explainable Boosted Tree with Spatiotemporal Attention and Adaptive Feature Selection</b></p>
	<p>Sensors <a href="https://www.mdpi.com/1424-8220/26/9/2820">doi: 10.3390/s26092820</a></p>
	<p>Authors:
		Ting Zhou
		Hua-Chun Xiang
		Feng Zhang
		Mao-Bin Lv
		Jie Shen
		</p>
	<p>Faults in aero-engine rotating components account for more than 60% of total failures, and their early features are easily masked by noise under complex conditions. Traditional single-sensor diagnosis suffers from low feature utilization, poor interpretability, and weak cross-condition generalization. This paper proposes a multi-source fault diagnosis method for aero-engines based on an explainable boosted tree, integrating spatiotemporal attention (STA) and adaptive feature selection (AFS). We collect multi-domain data from four standard core sensors widely used in existing engine health management systems and extract multi-dimensional features to build a heterogeneous feature set. Adaptive feature selection is implemented using mutual information and a variance inflation factor. A spatiotemporal attention mechanism is introduced to weight and fuse features effectively. The fused features are used to train an XGBoost classifier, and SHAP values are adopted to quantify feature contributions and improve model interpretability. Uncertainty sources and sensitivity boundaries are quantitatively analyzed to support engineering acceptance. The method achieves high sensitivity to early weak faults and stable uncertainty under complex operating conditions. Tests on a fault simulation test rig show that the proposed method achieves 99.2% diagnosis accuracy and 97.5% cross-condition generalization accuracy, outperforming conventional models. It can identify early weak fault signatures, clarify key fault indicators, and provide a quantitative basis for fault tracing and maintenance decision-making. The method employs a standard sensor suite without additional hardware costs, features lightweight computation and low inference overhead, and delivers clear economic benefits by reducing false alarms, avoiding unplanned downtime, and optimizing maintenance resources. It offers a reliable, cost-effective solution for aero-engine fault diagnosis under complex operating conditions.</p>
	]]></content:encoded>

	<dc:title>Multi-Source Aero-Engine Fault Diagnosis Using Explainable Boosted Tree with Spatiotemporal Attention and Adaptive Feature Selection</dc:title>
			<dc:creator>Ting Zhou</dc:creator>
			<dc:creator>Hua-Chun Xiang</dc:creator>
			<dc:creator>Feng Zhang</dc:creator>
			<dc:creator>Mao-Bin Lv</dc:creator>
			<dc:creator>Jie Shen</dc:creator>
		<dc:identifier>doi: 10.3390/s26092820</dc:identifier>
	<dc:source>Sensors</dc:source>
	<dc:date>2026-04-30</dc:date>

	<prism:publicationName>Sensors</prism:publicationName>
	<prism:publicationDate>2026-04-30</prism:publicationDate>
	<prism:volume>26</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>2820</prism:startingPage>
		<prism:doi>10.3390/s26092820</prism:doi>
	<prism:url>https://www.mdpi.com/1424-8220/26/9/2820</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1424-8220/26/9/2819">

	<title>Sensors, Vol. 26, Pages 2819: Electrochemical Stripping Analysis at Paper-Based (Bio)Sensors: Current State-of-the-Art and Prospects</title>
	<link>https://www.mdpi.com/1424-8220/26/9/2819</link>
	<description>Paper-based devices (PADs) have gained increasing attention over the last few years as portable, low-cost and disposable (bio)sensors for point-of-care and on-site analysis. Electrochemistry is a particularly attractive detection mode in PAD assays thanks to its sensitivity and compatibility with portable instrumentation. In particular, electrochemical stripping analysis (ESA) is one of the most sensitive electroanalytical techniques, and, therefore, is suitable for trace assays required in environmental monitoring, clinical diagnostics and food control. Coupling paper as a functional platform with the exceptional sensitivity of ESA creates a powerful analytical tool for trace metals and (bio)sensing. This perspective briefly outlines the current state-of-the art in the field of paper-based (bio)sensors using ESA. It describes the principle of ESA, illustrates different strategies for on-paper electrode fabrication and modification and demonstrates representative applications to trace metal analysis and biosensing. Finally, limitations are identified and future prospects are discussed.</description>
	<pubDate>2026-04-30</pubDate>

	<content:encoded><![CDATA[
	<p><b>Sensors, Vol. 26, Pages 2819: Electrochemical Stripping Analysis at Paper-Based (Bio)Sensors: Current State-of-the-Art and Prospects</b></p>
	<p>Sensors <a href="https://www.mdpi.com/1424-8220/26/9/2819">doi: 10.3390/s26092819</a></p>
	<p>Authors:
		Christos Kokkinos
		Anastasios Economou
		</p>
	<p>Paper-based devices (PADs) have gained increasing attention over the last few years as portable, low-cost and disposable (bio)sensors for point-of-care and on-site analysis. Electrochemistry is a particularly attractive detection mode in PAD assays thanks to its sensitivity and compatibility with portable instrumentation. In particular, electrochemical stripping analysis (ESA) is one of the most sensitive electroanalytical techniques, and, therefore, is suitable for trace assays required in environmental monitoring, clinical diagnostics and food control. Coupling paper as a functional platform with the exceptional sensitivity of ESA creates a powerful analytical tool for trace metals and (bio)sensing. This perspective briefly outlines the current state-of-the art in the field of paper-based (bio)sensors using ESA. It describes the principle of ESA, illustrates different strategies for on-paper electrode fabrication and modification and demonstrates representative applications to trace metal analysis and biosensing. Finally, limitations are identified and future prospects are discussed.</p>
	]]></content:encoded>

	<dc:title>Electrochemical Stripping Analysis at Paper-Based (Bio)Sensors: Current State-of-the-Art and Prospects</dc:title>
			<dc:creator>Christos Kokkinos</dc:creator>
			<dc:creator>Anastasios Economou</dc:creator>
		<dc:identifier>doi: 10.3390/s26092819</dc:identifier>
	<dc:source>Sensors</dc:source>
	<dc:date>2026-04-30</dc:date>

	<prism:publicationName>Sensors</prism:publicationName>
	<prism:publicationDate>2026-04-30</prism:publicationDate>
	<prism:volume>26</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Perspective</prism:section>
	<prism:startingPage>2819</prism:startingPage>
		<prism:doi>10.3390/s26092819</prism:doi>
	<prism:url>https://www.mdpi.com/1424-8220/26/9/2819</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1424-8220/26/9/2818">

	<title>Sensors, Vol. 26, Pages 2818: Kinematic Characteristics of the Racket in Table Tennis During Backhand Flick Followed by Backhand Fast Block and Forehand Fast Block Combinations</title>
	<link>https://www.mdpi.com/1424-8220/26/9/2818</link>
	<description>A backhand flick is frequently used in table tennis to initiate offensive play, yet how racket motion evolves during subsequent stroke transitions remains insufficiently characterized. This study examined racket kinematics in two common follow-up combinations: a backhand flick followed by a backhand fast block (BFBB) and a backhand flick followed by a forehand fast block (BFFB). In a within-subject design, ten national-level male players performed both combinations, and racket motion was recorded using a three-dimensional motion capture system at 200 Hz. Racket velocity, phase duration, and spatial displacement were quantified across the stroke sequence, and within-player differences between the two stroke transition combinations following the backhand flick were examined. Compared with BFBB, BFFB showed higher racket velocity at most key moments, particularly near ball contact, whereas no significant difference was found at the end of the follow-through. Backward-phase duration did not differ between the two conditions, but BFFB showed longer durations during the hitting and follow-through phases, together with a longer overall duration. BFFB also exhibited greater directional displacement across multiple phases, whereas BFBB was characterized by a more compact spatiotemporal pattern. These findings provide biomechanical evidence that different follow-up strokes after an identical backhand flick are associated with distinct patterns of racket motion during stroke transitions and may offer a kinematic reference for sequence-specific training in table tennis.</description>
	<pubDate>2026-04-30</pubDate>

	<content:encoded><![CDATA[
	<p><b>Sensors, Vol. 26, Pages 2818: Kinematic Characteristics of the Racket in Table Tennis During Backhand Flick Followed by Backhand Fast Block and Forehand Fast Block Combinations</b></p>
	<p>Sensors <a href="https://www.mdpi.com/1424-8220/26/9/2818">doi: 10.3390/s26092818</a></p>
	<p>Authors:
		Jianfeng Niu
		Chingleong Gan
		Xinyu May Teo
		Yaqi Xue
		Xiaojie Guo
		Wenlong Ma
		Haobai Li
		Zhikun Gao
		Zhiping Zeng
		</p>
	<p>A backhand flick is frequently used in table tennis to initiate offensive play, yet how racket motion evolves during subsequent stroke transitions remains insufficiently characterized. This study examined racket kinematics in two common follow-up combinations: a backhand flick followed by a backhand fast block (BFBB) and a backhand flick followed by a forehand fast block (BFFB). In a within-subject design, ten national-level male players performed both combinations, and racket motion was recorded using a three-dimensional motion capture system at 200 Hz. Racket velocity, phase duration, and spatial displacement were quantified across the stroke sequence, and within-player differences between the two stroke transition combinations following the backhand flick were examined. Compared with BFBB, BFFB showed higher racket velocity at most key moments, particularly near ball contact, whereas no significant difference was found at the end of the follow-through. Backward-phase duration did not differ between the two conditions, but BFFB showed longer durations during the hitting and follow-through phases, together with a longer overall duration. BFFB also exhibited greater directional displacement across multiple phases, whereas BFBB was characterized by a more compact spatiotemporal pattern. These findings provide biomechanical evidence that different follow-up strokes after an identical backhand flick are associated with distinct patterns of racket motion during stroke transitions and may offer a kinematic reference for sequence-specific training in table tennis.</p>
	]]></content:encoded>

	<dc:title>Kinematic Characteristics of the Racket in Table Tennis During Backhand Flick Followed by Backhand Fast Block and Forehand Fast Block Combinations</dc:title>
			<dc:creator>Jianfeng Niu</dc:creator>
			<dc:creator>Chingleong Gan</dc:creator>
			<dc:creator>Xinyu May Teo</dc:creator>
			<dc:creator>Yaqi Xue</dc:creator>
			<dc:creator>Xiaojie Guo</dc:creator>
			<dc:creator>Wenlong Ma</dc:creator>
			<dc:creator>Haobai Li</dc:creator>
			<dc:creator>Zhikun Gao</dc:creator>
			<dc:creator>Zhiping Zeng</dc:creator>
		<dc:identifier>doi: 10.3390/s26092818</dc:identifier>
	<dc:source>Sensors</dc:source>
	<dc:date>2026-04-30</dc:date>

	<prism:publicationName>Sensors</prism:publicationName>
	<prism:publicationDate>2026-04-30</prism:publicationDate>
	<prism:volume>26</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>2818</prism:startingPage>
		<prism:doi>10.3390/s26092818</prism:doi>
	<prism:url>https://www.mdpi.com/1424-8220/26/9/2818</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1424-8220/26/9/2817">

	<title>Sensors, Vol. 26, Pages 2817: Headset-Type Biofluorometric Gas Sensor with CMOS for Transcutaneous Ethanol from the Ear Canal</title>
	<link>https://www.mdpi.com/1424-8220/26/9/2817</link>
	<description>This study presents a headset-type biofluorometric gas sensor incorporating a CMOS camera for continuous, non-invasive monitoring of transcutaneous ethanol from the ear canal. The sensor employs alcohol dehydrogenase (ADH) to catalyze the NAD+-to-NADH conversion during ethanol oxidation, enabling quantitative measurement through NADH fluorescence detection (&amp;amp;lambda;ex = 340 nm, &amp;amp;lambda;em = 490 nm). The integrated system comprises a wireless CMOS camera, an ADH-immobilized cotton mesh enzyme membrane, UV-LED excitation source, optical bandpass filters, and a dual convex lens assembly housed in a 3D-printed headset powered by a lithium battery. Key improvements include a 3.5-fold enhancement in fluorescence collection efficiency achieved through optimized dual convex lens configuration. Systematic screening of seven cotton mesh materials identified Iwatsuki cotton mesh as the optimal enzyme immobilization substrate, exhibiting minimal autofluorescence and 14.2-fold higher water retention capacity compared to H-PTFE membranes. The glutaraldehyde-crosslinked ADH-immobilized cotton mesh maintained enzymatic activity for over 45 min with a 10-fold improvement in signal-to-noise ratio. The system demonstrated a dynamic detection range spanning 10 ppb to 10 ppm for gaseous ethanol and exhibited high selectivity against interfering volatile organic compounds in skin gas, including methanol, acetaldehyde, formaldehyde, and acetone. Human experiments validated the system&amp;amp;rsquo;s practical performance. Following alcohol consumption, subjects wore the device for 50 min while real-time fluorescence monitoring captured dynamic ethanol concentration changes in the ear canal. The dose-dependent fluorescence response&amp;amp;mdash;approximately 2-fold higher at 0.4 g/kg versus 0.04 g/kg alcohol intake&amp;amp;mdash;correlated well with calibration data. This headset-type biofluorometric sensor enables unrestrained continuous monitoring of ear canal ethanol, providing a novel wearable platform for alcohol metabolism assessment with potential applications in health monitoring and clinical research.</description>
	<pubDate>2026-04-30</pubDate>

	<content:encoded><![CDATA[
	<p><b>Sensors, Vol. 26, Pages 2817: Headset-Type Biofluorometric Gas Sensor with CMOS for Transcutaneous Ethanol from the Ear Canal</b></p>
	<p>Sensors <a href="https://www.mdpi.com/1424-8220/26/9/2817">doi: 10.3390/s26092817</a></p>
	<p>Authors:
		Geng Zhang
		Di Huang
		Kenta Ichikawa
		Kenta Iitani
		Yoshikazu Nakajima
		Kohji Mitsubayashi
		</p>
	<p>This study presents a headset-type biofluorometric gas sensor incorporating a CMOS camera for continuous, non-invasive monitoring of transcutaneous ethanol from the ear canal. The sensor employs alcohol dehydrogenase (ADH) to catalyze the NAD+-to-NADH conversion during ethanol oxidation, enabling quantitative measurement through NADH fluorescence detection (&amp;amp;lambda;ex = 340 nm, &amp;amp;lambda;em = 490 nm). The integrated system comprises a wireless CMOS camera, an ADH-immobilized cotton mesh enzyme membrane, UV-LED excitation source, optical bandpass filters, and a dual convex lens assembly housed in a 3D-printed headset powered by a lithium battery. Key improvements include a 3.5-fold enhancement in fluorescence collection efficiency achieved through optimized dual convex lens configuration. Systematic screening of seven cotton mesh materials identified Iwatsuki cotton mesh as the optimal enzyme immobilization substrate, exhibiting minimal autofluorescence and 14.2-fold higher water retention capacity compared to H-PTFE membranes. The glutaraldehyde-crosslinked ADH-immobilized cotton mesh maintained enzymatic activity for over 45 min with a 10-fold improvement in signal-to-noise ratio. The system demonstrated a dynamic detection range spanning 10 ppb to 10 ppm for gaseous ethanol and exhibited high selectivity against interfering volatile organic compounds in skin gas, including methanol, acetaldehyde, formaldehyde, and acetone. Human experiments validated the system&amp;amp;rsquo;s practical performance. Following alcohol consumption, subjects wore the device for 50 min while real-time fluorescence monitoring captured dynamic ethanol concentration changes in the ear canal. The dose-dependent fluorescence response&amp;amp;mdash;approximately 2-fold higher at 0.4 g/kg versus 0.04 g/kg alcohol intake&amp;amp;mdash;correlated well with calibration data. This headset-type biofluorometric sensor enables unrestrained continuous monitoring of ear canal ethanol, providing a novel wearable platform for alcohol metabolism assessment with potential applications in health monitoring and clinical research.</p>
	]]></content:encoded>

	<dc:title>Headset-Type Biofluorometric Gas Sensor with CMOS for Transcutaneous Ethanol from the Ear Canal</dc:title>
			<dc:creator>Geng Zhang</dc:creator>
			<dc:creator>Di Huang</dc:creator>
			<dc:creator>Kenta Ichikawa</dc:creator>
			<dc:creator>Kenta Iitani</dc:creator>
			<dc:creator>Yoshikazu Nakajima</dc:creator>
			<dc:creator>Kohji Mitsubayashi</dc:creator>
		<dc:identifier>doi: 10.3390/s26092817</dc:identifier>
	<dc:source>Sensors</dc:source>
	<dc:date>2026-04-30</dc:date>

	<prism:publicationName>Sensors</prism:publicationName>
	<prism:publicationDate>2026-04-30</prism:publicationDate>
	<prism:volume>26</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>2817</prism:startingPage>
		<prism:doi>10.3390/s26092817</prism:doi>
	<prism:url>https://www.mdpi.com/1424-8220/26/9/2817</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1424-8220/26/9/2816">

	<title>Sensors, Vol. 26, Pages 2816: Experimental and DFT Investigation of a Vitamin B6-Derived Fluorescent Probe for Detection of Al3+ and Ga3+ Ions in a Buffered Aqueous DMSO Solution</title>
	<link>https://www.mdpi.com/1424-8220/26/9/2816</link>
	<description>A new selective fluorescent probe based on a vitamin B6 derived hydrazone was synthesized and characterized for the detection of Al3+ and Ga3+ ions. The probe&amp;amp;rsquo;s selectivity and sensitivity were evaluated using UV-Vis, fluorescence, and NMR spectroscopy in a buffered DMSO/water solution, complemented by density functional theory (DFT) calculations to elucidate the electronic structure and coordination modes of the resulting complexes. The probe exhibited a notable &amp;amp;ldquo;turn-on&amp;amp;rdquo; fluorescence response upon binding Al3+ and Ga3+, with emission maxima at 466 nm and 477 nm, respectively, and detection limits as low as 48 nM for Al3+ and 33 nM for Ga3+. The probe showed high selectivity for these ions over a wide range of competing cations and anions, forming stable 1:1 complexes with log &amp;amp;beta;&amp;amp;prime; values of 5.98 for Al3+ and 6.28 for Ga3+. DFT calculations revealed a tridentate coordination mode via the phenolic oxygen, azomethine nitrogen, and carbonyl oxygen, with distinct electronic transitions for each complex, including a ligand-to-metal charge transfer character in the Ga3+ complex. The probe demonstrates reversibility and excellent solution stability, offering a simple and sensitive platform for the environmental and biological monitoring of aluminum(III) and gallium(III) ions.</description>
	<pubDate>2026-04-30</pubDate>

	<content:encoded><![CDATA[
	<p><b>Sensors, Vol. 26, Pages 2816: Experimental and DFT Investigation of a Vitamin B6-Derived Fluorescent Probe for Detection of Al3+ and Ga3+ Ions in a Buffered Aqueous DMSO Solution</b></p>
	<p>Sensors <a href="https://www.mdpi.com/1424-8220/26/9/2816">doi: 10.3390/s26092816</a></p>
	<p>Authors:
		Maksim N. Zavalishin
		Artemiy A. Guschin
		George A. Gamov
		</p>
	<p>A new selective fluorescent probe based on a vitamin B6 derived hydrazone was synthesized and characterized for the detection of Al3+ and Ga3+ ions. The probe&amp;amp;rsquo;s selectivity and sensitivity were evaluated using UV-Vis, fluorescence, and NMR spectroscopy in a buffered DMSO/water solution, complemented by density functional theory (DFT) calculations to elucidate the electronic structure and coordination modes of the resulting complexes. The probe exhibited a notable &amp;amp;ldquo;turn-on&amp;amp;rdquo; fluorescence response upon binding Al3+ and Ga3+, with emission maxima at 466 nm and 477 nm, respectively, and detection limits as low as 48 nM for Al3+ and 33 nM for Ga3+. The probe showed high selectivity for these ions over a wide range of competing cations and anions, forming stable 1:1 complexes with log &amp;amp;beta;&amp;amp;prime; values of 5.98 for Al3+ and 6.28 for Ga3+. DFT calculations revealed a tridentate coordination mode via the phenolic oxygen, azomethine nitrogen, and carbonyl oxygen, with distinct electronic transitions for each complex, including a ligand-to-metal charge transfer character in the Ga3+ complex. The probe demonstrates reversibility and excellent solution stability, offering a simple and sensitive platform for the environmental and biological monitoring of aluminum(III) and gallium(III) ions.</p>
	]]></content:encoded>

	<dc:title>Experimental and DFT Investigation of a Vitamin B6-Derived Fluorescent Probe for Detection of Al3+ and Ga3+ Ions in a Buffered Aqueous DMSO Solution</dc:title>
			<dc:creator>Maksim N. Zavalishin</dc:creator>
			<dc:creator>Artemiy A. Guschin</dc:creator>
			<dc:creator>George A. Gamov</dc:creator>
		<dc:identifier>doi: 10.3390/s26092816</dc:identifier>
	<dc:source>Sensors</dc:source>
	<dc:date>2026-04-30</dc:date>

	<prism:publicationName>Sensors</prism:publicationName>
	<prism:publicationDate>2026-04-30</prism:publicationDate>
	<prism:volume>26</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>2816</prism:startingPage>
		<prism:doi>10.3390/s26092816</prism:doi>
	<prism:url>https://www.mdpi.com/1424-8220/26/9/2816</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1424-8220/26/9/2814">

	<title>Sensors, Vol. 26, Pages 2814: A Flexible Wearable Electronics System for Electrocardiographic Assessment of Colchicine Therapy for Post-MI Remodeling</title>
	<link>https://www.mdpi.com/1424-8220/26/9/2814</link>
	<description>Objective: Myocardial infarction (MI) triggers inflammation and fibrosis that drive the progressive impairment of cardiac function. Yet most pharmacological studies still depend on single-time-point histological or imaging endpoints and lack longitudinal, non-invasive assessments of treatment response. Electrocardiography (ECG) detects conduction and repolarization abnormalities tightly associated with myocardial injury and structural remodeling. However, ECG monitoring in mice is limited by rigid or invasive hardware, which restricts its use for longitudinal assessment of cardiac structure and function. Approach: Here, we propose an ECG-based non-invasive post-MI cardiac remodeling assessment approach and develop a flexible electrocardiographic monitoring microsystem (FECMS). Using the anti-remodeling drug (colchicine) therapy in an MI mouse model (Sham n = 4, MI n = 7 survivors, Col n = 7 survivors) for validation, we longitudinally track drug-induced changes in ECG parameters and systematically evaluate their concordance with functional, structural, and molecular indicators of cardiac injury and remodeling. Results: Colchicine treatment induced progressive shortening of the QRS and QT intervals and gradual stabilization of the PR interval. These interval changes were accompanied by increased EF and FS, decreased LVESV, reduced myocardial fibrosis and inflammatory infiltration, and lower plasma troponin I levels at the endpoint. Correlation analyses revealed strong relationships between drug-induced changes in ECG parameters and functional recovery and inhibited structural remodeling. Significance: The FECMS provides a new, non-invasive tool for longitudinal cardiovascular drug evaluation. This approach has the potential to complement or reduce reliance on terminal histological endpoints and to facilitate the optimization of dosing strategies in preclinical cardiovascular pharmacology.</description>
	<pubDate>2026-04-30</pubDate>

	<content:encoded><![CDATA[
	<p><b>Sensors, Vol. 26, Pages 2814: A Flexible Wearable Electronics System for Electrocardiographic Assessment of Colchicine Therapy for Post-MI Remodeling</b></p>
	<p>Sensors <a href="https://www.mdpi.com/1424-8220/26/9/2814">doi: 10.3390/s26092814</a></p>
	<p>Authors:
		Weijia Huang
		Xiangfeng Gong
		Maoshuai Yang
		Ting Huang
		Qiyao Zhuang
		Zhenghua Xiao
		Tao Xiong
		Gang Yang
		</p>
	<p>Objective: Myocardial infarction (MI) triggers inflammation and fibrosis that drive the progressive impairment of cardiac function. Yet most pharmacological studies still depend on single-time-point histological or imaging endpoints and lack longitudinal, non-invasive assessments of treatment response. Electrocardiography (ECG) detects conduction and repolarization abnormalities tightly associated with myocardial injury and structural remodeling. However, ECG monitoring in mice is limited by rigid or invasive hardware, which restricts its use for longitudinal assessment of cardiac structure and function. Approach: Here, we propose an ECG-based non-invasive post-MI cardiac remodeling assessment approach and develop a flexible electrocardiographic monitoring microsystem (FECMS). Using the anti-remodeling drug (colchicine) therapy in an MI mouse model (Sham n = 4, MI n = 7 survivors, Col n = 7 survivors) for validation, we longitudinally track drug-induced changes in ECG parameters and systematically evaluate their concordance with functional, structural, and molecular indicators of cardiac injury and remodeling. Results: Colchicine treatment induced progressive shortening of the QRS and QT intervals and gradual stabilization of the PR interval. These interval changes were accompanied by increased EF and FS, decreased LVESV, reduced myocardial fibrosis and inflammatory infiltration, and lower plasma troponin I levels at the endpoint. Correlation analyses revealed strong relationships between drug-induced changes in ECG parameters and functional recovery and inhibited structural remodeling. Significance: The FECMS provides a new, non-invasive tool for longitudinal cardiovascular drug evaluation. This approach has the potential to complement or reduce reliance on terminal histological endpoints and to facilitate the optimization of dosing strategies in preclinical cardiovascular pharmacology.</p>
	]]></content:encoded>

	<dc:title>A Flexible Wearable Electronics System for Electrocardiographic Assessment of Colchicine Therapy for Post-MI Remodeling</dc:title>
			<dc:creator>Weijia Huang</dc:creator>
			<dc:creator>Xiangfeng Gong</dc:creator>
			<dc:creator>Maoshuai Yang</dc:creator>
			<dc:creator>Ting Huang</dc:creator>
			<dc:creator>Qiyao Zhuang</dc:creator>
			<dc:creator>Zhenghua Xiao</dc:creator>
			<dc:creator>Tao Xiong</dc:creator>
			<dc:creator>Gang Yang</dc:creator>
		<dc:identifier>doi: 10.3390/s26092814</dc:identifier>
	<dc:source>Sensors</dc:source>
	<dc:date>2026-04-30</dc:date>

	<prism:publicationName>Sensors</prism:publicationName>
	<prism:publicationDate>2026-04-30</prism:publicationDate>
	<prism:volume>26</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>2814</prism:startingPage>
		<prism:doi>10.3390/s26092814</prism:doi>
	<prism:url>https://www.mdpi.com/1424-8220/26/9/2814</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1424-8220/26/9/2813">

	<title>Sensors, Vol. 26, Pages 2813: Knowledge-Guided Modulation for Terrain-Aware Landslide Detection Using Deformable Transformers</title>
	<link>https://www.mdpi.com/1424-8220/26/9/2813</link>
	<description>Landslide detection using medium-resolution optical remote sensing imagery remains challenging in complex mountainous environments because of spectral ambiguity, vegetation cover, shadows, and background interference. Although recent deep learning methods have improved detection performance, most existing approaches remain primarily appearance-driven and do not explicitly exploit terrain-related priors that are closely associated with slope instability. To address this limitation, we propose a terrain-aware deformable transformer framework for landslide detection using multimodal remote sensing data, in which RGB imagery, DEM, and slope are jointly incorporated through a unified five-channel representation, and a knowledge-guided modulation module is introduced to enhance feature learning using terrain priors derived from DEM and slope. Here, &amp;amp;ldquo;knowledge-guided&amp;amp;rdquo; refers specifically to explicit topographic priors rather than complete geological or hydrological knowledge. Experimental results on the Bijie landslide dataset show that the proposed method outperforms several competitive baselines and achieves 72.9% AP@[0.5:0.95] and 77.2% AP75, while improving localization robustness in visually confusing mountainous scenes. These results indicate that terrain-aware feature modulation can improve geomorphological plausibility and detection accuracy for landslide inventory mapping, although further cross-region validation is still needed to assess broader generalization.</description>
	<pubDate>2026-04-30</pubDate>

	<content:encoded><![CDATA[
	<p><b>Sensors, Vol. 26, Pages 2813: Knowledge-Guided Modulation for Terrain-Aware Landslide Detection Using Deformable Transformers</b></p>
	<p>Sensors <a href="https://www.mdpi.com/1424-8220/26/9/2813">doi: 10.3390/s26092813</a></p>
	<p>Authors:
		Yan-Chang Jia
		Shu-Yan Hua
		Hong-Fei Wang
		Tong Jiang
		Qi-Qi Zhao
		</p>
	<p>Landslide detection using medium-resolution optical remote sensing imagery remains challenging in complex mountainous environments because of spectral ambiguity, vegetation cover, shadows, and background interference. Although recent deep learning methods have improved detection performance, most existing approaches remain primarily appearance-driven and do not explicitly exploit terrain-related priors that are closely associated with slope instability. To address this limitation, we propose a terrain-aware deformable transformer framework for landslide detection using multimodal remote sensing data, in which RGB imagery, DEM, and slope are jointly incorporated through a unified five-channel representation, and a knowledge-guided modulation module is introduced to enhance feature learning using terrain priors derived from DEM and slope. Here, &amp;amp;ldquo;knowledge-guided&amp;amp;rdquo; refers specifically to explicit topographic priors rather than complete geological or hydrological knowledge. Experimental results on the Bijie landslide dataset show that the proposed method outperforms several competitive baselines and achieves 72.9% AP@[0.5:0.95] and 77.2% AP75, while improving localization robustness in visually confusing mountainous scenes. These results indicate that terrain-aware feature modulation can improve geomorphological plausibility and detection accuracy for landslide inventory mapping, although further cross-region validation is still needed to assess broader generalization.</p>
	]]></content:encoded>

	<dc:title>Knowledge-Guided Modulation for Terrain-Aware Landslide Detection Using Deformable Transformers</dc:title>
			<dc:creator>Yan-Chang Jia</dc:creator>
			<dc:creator>Shu-Yan Hua</dc:creator>
			<dc:creator>Hong-Fei Wang</dc:creator>
			<dc:creator>Tong Jiang</dc:creator>
			<dc:creator>Qi-Qi Zhao</dc:creator>
		<dc:identifier>doi: 10.3390/s26092813</dc:identifier>
	<dc:source>Sensors</dc:source>
	<dc:date>2026-04-30</dc:date>

	<prism:publicationName>Sensors</prism:publicationName>
	<prism:publicationDate>2026-04-30</prism:publicationDate>
	<prism:volume>26</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>2813</prism:startingPage>
		<prism:doi>10.3390/s26092813</prism:doi>
	<prism:url>https://www.mdpi.com/1424-8220/26/9/2813</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1424-8220/26/9/2815">

	<title>Sensors, Vol. 26, Pages 2815: Computational Simulation of a Surface Plasmonic Resonance Biosensor for &amp;beta;2-Microglobulin Based on Electrolyte-Gated Graphene</title>
	<link>https://www.mdpi.com/1424-8220/26/9/2815</link>
	<description>Biosensors have emerged as a rapidly evolving area of research, offering transformative potential across biomedical diagnostics, environmental monitoring, and pharmaceutical applications. Among the diverse range of biosensing technologies, graphene-based surface plasmonic resonance (SPR) biosensors have attracted particular interest due to their exceptional sensitivity, scalability for mass production, and cost-effective fabrication processes. This study explores the operational principles and current design methodologies of graphene-based SPR biosensors, with a special emphasis on the role of electrolyte gating and its impact on sensor performance. Furthermore, the influence of graphene&amp;amp;rsquo;s quantum capacitance is investigated as a critical parameter for improving the accuracy and reliability of performance predictions in the proposed sensor configuration. Computational analysis of sensitivity and key performance metrics was conducted. Notably, key performance metrics of the sensor improved upon incorporating quantum capacitance effects into the simulation framework. At a &amp;amp;beta;2-microglobulin concentration of 0.00118 g/L, the sensitivity increased to 174 GHz&amp;amp;middot;g/L, the figure of merit reached 0.55 L/g, the quality factor was 0.01, the signal-to-noise ratio (SNR) rose to 0.008, and the detection accuracy (DA) reached 0.08 L/THz, demonstrating the significant impact of quantum capacitance on the sensor&amp;amp;rsquo;s performance. These findings highlight the potential of quantum-electrostatic considerations to enhance the precision and efficacy of graphene-based SPR biosensors, paving the way for the development of next-generation biosensing platforms with improved analytical capabilities. Unlike conventional graphene SPR biosensors, which primarily detect refractive index changes near the graphene surface, our model explicitly considers the electrostatic effect of biomolecules on graphene&amp;amp;rsquo;s Fermi energy. By modelling &amp;amp;beta;2-microglobulin as a charged species, we compute the resulting electric double layer and incorporate quantum capacitance in series. This amplifies the charge-induced modulation of graphene&amp;amp;rsquo;s optical conductivity, and, combined with a graphene perfect absorber design, leads to enhanced plasmonic resonance shifts. Consequently, our approach achieves higher sensitivity and more precise detection of biomolecular interactions compared to traditional simulations.</description>
	<pubDate>2026-04-30</pubDate>

	<content:encoded><![CDATA[
	<p><b>Sensors, Vol. 26, Pages 2815: Computational Simulation of a Surface Plasmonic Resonance Biosensor for &amp;beta;2-Microglobulin Based on Electrolyte-Gated Graphene</b></p>
	<p>Sensors <a href="https://www.mdpi.com/1424-8220/26/9/2815">doi: 10.3390/s26092815</a></p>
	<p>Authors:
		Ghassem Baridi
		Arslan Liaquat
		Leonardo Martini
		Federico Rapuzzi
		Vito Clericò
		Mario Amado
		Enrique Diez
		El Hadj Abidi
		Maria Celeste Maschio
		Stefano Corni
		Yahya Moubarak Meziani
		Giorgia Brancolini
		Francesco Rossella
		Luigi Rovati
		</p>
	<p>Biosensors have emerged as a rapidly evolving area of research, offering transformative potential across biomedical diagnostics, environmental monitoring, and pharmaceutical applications. Among the diverse range of biosensing technologies, graphene-based surface plasmonic resonance (SPR) biosensors have attracted particular interest due to their exceptional sensitivity, scalability for mass production, and cost-effective fabrication processes. This study explores the operational principles and current design methodologies of graphene-based SPR biosensors, with a special emphasis on the role of electrolyte gating and its impact on sensor performance. Furthermore, the influence of graphene&amp;amp;rsquo;s quantum capacitance is investigated as a critical parameter for improving the accuracy and reliability of performance predictions in the proposed sensor configuration. Computational analysis of sensitivity and key performance metrics was conducted. Notably, key performance metrics of the sensor improved upon incorporating quantum capacitance effects into the simulation framework. At a &amp;amp;beta;2-microglobulin concentration of 0.00118 g/L, the sensitivity increased to 174 GHz&amp;amp;middot;g/L, the figure of merit reached 0.55 L/g, the quality factor was 0.01, the signal-to-noise ratio (SNR) rose to 0.008, and the detection accuracy (DA) reached 0.08 L/THz, demonstrating the significant impact of quantum capacitance on the sensor&amp;amp;rsquo;s performance. These findings highlight the potential of quantum-electrostatic considerations to enhance the precision and efficacy of graphene-based SPR biosensors, paving the way for the development of next-generation biosensing platforms with improved analytical capabilities. Unlike conventional graphene SPR biosensors, which primarily detect refractive index changes near the graphene surface, our model explicitly considers the electrostatic effect of biomolecules on graphene&amp;amp;rsquo;s Fermi energy. By modelling &amp;amp;beta;2-microglobulin as a charged species, we compute the resulting electric double layer and incorporate quantum capacitance in series. This amplifies the charge-induced modulation of graphene&amp;amp;rsquo;s optical conductivity, and, combined with a graphene perfect absorber design, leads to enhanced plasmonic resonance shifts. Consequently, our approach achieves higher sensitivity and more precise detection of biomolecular interactions compared to traditional simulations.</p>
	]]></content:encoded>

	<dc:title>Computational Simulation of a Surface Plasmonic Resonance Biosensor for &amp;amp;beta;2-Microglobulin Based on Electrolyte-Gated Graphene</dc:title>
			<dc:creator>Ghassem Baridi</dc:creator>
			<dc:creator>Arslan Liaquat</dc:creator>
			<dc:creator>Leonardo Martini</dc:creator>
			<dc:creator>Federico Rapuzzi</dc:creator>
			<dc:creator>Vito Clericò</dc:creator>
			<dc:creator>Mario Amado</dc:creator>
			<dc:creator>Enrique Diez</dc:creator>
			<dc:creator>El Hadj Abidi</dc:creator>
			<dc:creator>Maria Celeste Maschio</dc:creator>
			<dc:creator>Stefano Corni</dc:creator>
			<dc:creator>Yahya Moubarak Meziani</dc:creator>
			<dc:creator>Giorgia Brancolini</dc:creator>
			<dc:creator>Francesco Rossella</dc:creator>
			<dc:creator>Luigi Rovati</dc:creator>
		<dc:identifier>doi: 10.3390/s26092815</dc:identifier>
	<dc:source>Sensors</dc:source>
	<dc:date>2026-04-30</dc:date>

	<prism:publicationName>Sensors</prism:publicationName>
	<prism:publicationDate>2026-04-30</prism:publicationDate>
	<prism:volume>26</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>2815</prism:startingPage>
		<prism:doi>10.3390/s26092815</prism:doi>
	<prism:url>https://www.mdpi.com/1424-8220/26/9/2815</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1424-8220/26/9/2812">

	<title>Sensors, Vol. 26, Pages 2812: L-SAINet: A Shape-Adaptive and Inner-Scale Interaction Network for Landslide Detection in Complex Remote Sensing Scenarios</title>
	<link>https://www.mdpi.com/1424-8220/26/9/2812</link>
	<description>Landslides are widespread geohazards in mountainous regions and pose serious threats to human safety, infrastructure, and ecosystems. Accurate detection from high-resolution optical remote sensing imagery remains challenging because landslide targets often exhibit irregular morphology, large scale variation, weak boundaries, and strong background interference. To address these issues, this study proposes L-SAINet, a shape-adaptive and inner-scale interaction network for landslide detection in complex remote sensing scenarios. Built on a lightweight one-stage detection framework, the proposed method introduces an L-SAI module that integrates adaptive deformable convolution, channel&amp;amp;ndash;spatial attention, and inner-scale feature interaction. The shape-adaptive branch improves geometric alignment for irregular and elongated landslide bodies, while the attention branch enhances semantic discrimination under heterogeneous background conditions. The two branches are further fused at the same feature scale to construct a more unified landslide representation. Experiments on the Bijie Landslide Remote Sensing Dataset show that L-SAINet consistently outperforms the baseline detector and single-branch variants in Precision, Recall, mAP@0.5, and mAP@0.5:0.95. Additional analyses based on precision&amp;amp;ndash;recall curves, confusion matrices, convergence behavior, model complexity, and representative complex-scene examples further confirm its effectiveness and robustness. The results demonstrate that jointly modeling geometric adaptability and semantic refinement is an effective strategy for landslide detection in complex mountain environments.</description>
	<pubDate>2026-04-30</pubDate>

	<content:encoded><![CDATA[
	<p><b>Sensors, Vol. 26, Pages 2812: L-SAINet: A Shape-Adaptive and Inner-Scale Interaction Network for Landslide Detection in Complex Remote Sensing Scenarios</b></p>
	<p>Sensors <a href="https://www.mdpi.com/1424-8220/26/9/2812">doi: 10.3390/s26092812</a></p>
	<p>Authors:
		Yanchang Jia
		Shuyan Hua
		Hongfei Wang
		Tong Jiang
		Qiqi Zhao
		</p>
	<p>Landslides are widespread geohazards in mountainous regions and pose serious threats to human safety, infrastructure, and ecosystems. Accurate detection from high-resolution optical remote sensing imagery remains challenging because landslide targets often exhibit irregular morphology, large scale variation, weak boundaries, and strong background interference. To address these issues, this study proposes L-SAINet, a shape-adaptive and inner-scale interaction network for landslide detection in complex remote sensing scenarios. Built on a lightweight one-stage detection framework, the proposed method introduces an L-SAI module that integrates adaptive deformable convolution, channel&amp;amp;ndash;spatial attention, and inner-scale feature interaction. The shape-adaptive branch improves geometric alignment for irregular and elongated landslide bodies, while the attention branch enhances semantic discrimination under heterogeneous background conditions. The two branches are further fused at the same feature scale to construct a more unified landslide representation. Experiments on the Bijie Landslide Remote Sensing Dataset show that L-SAINet consistently outperforms the baseline detector and single-branch variants in Precision, Recall, mAP@0.5, and mAP@0.5:0.95. Additional analyses based on precision&amp;amp;ndash;recall curves, confusion matrices, convergence behavior, model complexity, and representative complex-scene examples further confirm its effectiveness and robustness. The results demonstrate that jointly modeling geometric adaptability and semantic refinement is an effective strategy for landslide detection in complex mountain environments.</p>
	]]></content:encoded>

	<dc:title>L-SAINet: A Shape-Adaptive and Inner-Scale Interaction Network for Landslide Detection in Complex Remote Sensing Scenarios</dc:title>
			<dc:creator>Yanchang Jia</dc:creator>
			<dc:creator>Shuyan Hua</dc:creator>
			<dc:creator>Hongfei Wang</dc:creator>
			<dc:creator>Tong Jiang</dc:creator>
			<dc:creator>Qiqi Zhao</dc:creator>
		<dc:identifier>doi: 10.3390/s26092812</dc:identifier>
	<dc:source>Sensors</dc:source>
	<dc:date>2026-04-30</dc:date>

	<prism:publicationName>Sensors</prism:publicationName>
	<prism:publicationDate>2026-04-30</prism:publicationDate>
	<prism:volume>26</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>2812</prism:startingPage>
		<prism:doi>10.3390/s26092812</prism:doi>
	<prism:url>https://www.mdpi.com/1424-8220/26/9/2812</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1424-8220/26/9/2810">

	<title>Sensors, Vol. 26, Pages 2810: Development and Validation of Accelerometer-Based Machine Learning Models for Classifying Walking, Running, and Jumping Activities</title>
	<link>https://www.mdpi.com/1424-8220/26/9/2810</link>
	<description>Quantifying mechanical loading during daily physical activities is essential for designing and evaluating bone health interventions. Accelerometers are a promising tool for estimating these loads under free-living conditions, yet existing prediction models depend on prior knowledge of the activity being performed. This study developed and validated machine learning models to automatically distinguish between walking, running, and jumping using accelerometer data. Forty-eight healthy adults completed a protocol of walking, running, and jumping tasks while wearing ActiGraph GT9X Link accelerometers at the ankle, lower back, and hip. Three algorithms (Random Forest, Support Vector Machine, and K-Nearest Neighbors) were trained and evaluated through multiple performance metrics. All models achieved excellent classification accuracy across sensor placements, with percent agreement between 93.8% and 97.7%, receiver operating characteristic area under the curve values consistently above 0.97, and Kappa coefficients exceeding 0.89. These results demonstrate that accelerometer-based activity classification can reliably differentiate walking, running, and jumping, establishing a practical framework for applying activity-specific mechanical loading prediction equations under free-living conditions.</description>
	<pubDate>2026-04-30</pubDate>

	<content:encoded><![CDATA[
	<p><b>Sensors, Vol. 26, Pages 2810: Development and Validation of Accelerometer-Based Machine Learning Models for Classifying Walking, Running, and Jumping Activities</b></p>
	<p>Sensors <a href="https://www.mdpi.com/1424-8220/26/9/2810">doi: 10.3390/s26092810</a></p>
	<p>Authors:
		Lucas Veras
		Florêncio Diniz-Sousa
		Giorjines Boppre
		Ana Resende-Coelho
		José Oliveira
		Hélder Fonseca
		</p>
	<p>Quantifying mechanical loading during daily physical activities is essential for designing and evaluating bone health interventions. Accelerometers are a promising tool for estimating these loads under free-living conditions, yet existing prediction models depend on prior knowledge of the activity being performed. This study developed and validated machine learning models to automatically distinguish between walking, running, and jumping using accelerometer data. Forty-eight healthy adults completed a protocol of walking, running, and jumping tasks while wearing ActiGraph GT9X Link accelerometers at the ankle, lower back, and hip. Three algorithms (Random Forest, Support Vector Machine, and K-Nearest Neighbors) were trained and evaluated through multiple performance metrics. All models achieved excellent classification accuracy across sensor placements, with percent agreement between 93.8% and 97.7%, receiver operating characteristic area under the curve values consistently above 0.97, and Kappa coefficients exceeding 0.89. These results demonstrate that accelerometer-based activity classification can reliably differentiate walking, running, and jumping, establishing a practical framework for applying activity-specific mechanical loading prediction equations under free-living conditions.</p>
	]]></content:encoded>

	<dc:title>Development and Validation of Accelerometer-Based Machine Learning Models for Classifying Walking, Running, and Jumping Activities</dc:title>
			<dc:creator>Lucas Veras</dc:creator>
			<dc:creator>Florêncio Diniz-Sousa</dc:creator>
			<dc:creator>Giorjines Boppre</dc:creator>
			<dc:creator>Ana Resende-Coelho</dc:creator>
			<dc:creator>José Oliveira</dc:creator>
			<dc:creator>Hélder Fonseca</dc:creator>
		<dc:identifier>doi: 10.3390/s26092810</dc:identifier>
	<dc:source>Sensors</dc:source>
	<dc:date>2026-04-30</dc:date>

	<prism:publicationName>Sensors</prism:publicationName>
	<prism:publicationDate>2026-04-30</prism:publicationDate>
	<prism:volume>26</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>2810</prism:startingPage>
		<prism:doi>10.3390/s26092810</prism:doi>
	<prism:url>https://www.mdpi.com/1424-8220/26/9/2810</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1424-8220/26/9/2811">

	<title>Sensors, Vol. 26, Pages 2811: Time Synchronization Attack Detection Method Based on Carrier Doppler Pearson Correlation Coefficient Estimation</title>
	<link>https://www.mdpi.com/1424-8220/26/9/2811</link>
	<description>The global navigation satellite system (GNSS), the main time synchronization method for phasor measurement units (PMUs) in smart grids, is highly vulnerable to time synchronization attacks (TSAs). This affects the timing of results and poses a serious threat to the safe and stable operation of power systems. To quickly detect TSAs and minimize the impact of time errors on PMU sensor networks, a TSA detection method based on carrier Doppler Pearson correlation coefficient estimation is proposed. This method can be directly implemented on existing commercial receivers without modifications. The method leverages the fact that carrier Doppler shifts in each satellite channel exhibit consistent changes when subjected to a TSA; therefore, if there is a correlation between channels, a consistent change in carrier Doppler shift caused by the TSA can be quickly detected through Pearson correlation coefficient estimation. In the TSA detection experiment, the proposed method was compared against four existing TSA detection methods on a self-developed experimental platform. The experimental results show that compared with the other four methods, the proposed method responds 4&amp;amp;ndash;22 s faster and has better detection speed, with more significant changes in the detection statistics. Notably, these advantages become more pronounced as the spoofing speed decreases and the spoofing stealthiness increases, indicating that this method has robust detection capability against sophisticated attacks. Meanwhile, it offers a lightweight computational overhead suitable for embedded PMU implementations, enhancing sensor-layer security in critical infrastructure. This work provides reliable synchronized measurements for power system monitoring and control over a wide area.</description>
	<pubDate>2026-04-30</pubDate>

	<content:encoded><![CDATA[
	<p><b>Sensors, Vol. 26, Pages 2811: Time Synchronization Attack Detection Method Based on Carrier Doppler Pearson Correlation Coefficient Estimation</b></p>
	<p>Sensors <a href="https://www.mdpi.com/1424-8220/26/9/2811">doi: 10.3390/s26092811</a></p>
	<p>Authors:
		Lifen Li
		Zhiyun Xiao
		</p>
	<p>The global navigation satellite system (GNSS), the main time synchronization method for phasor measurement units (PMUs) in smart grids, is highly vulnerable to time synchronization attacks (TSAs). This affects the timing of results and poses a serious threat to the safe and stable operation of power systems. To quickly detect TSAs and minimize the impact of time errors on PMU sensor networks, a TSA detection method based on carrier Doppler Pearson correlation coefficient estimation is proposed. This method can be directly implemented on existing commercial receivers without modifications. The method leverages the fact that carrier Doppler shifts in each satellite channel exhibit consistent changes when subjected to a TSA; therefore, if there is a correlation between channels, a consistent change in carrier Doppler shift caused by the TSA can be quickly detected through Pearson correlation coefficient estimation. In the TSA detection experiment, the proposed method was compared against four existing TSA detection methods on a self-developed experimental platform. The experimental results show that compared with the other four methods, the proposed method responds 4&amp;amp;ndash;22 s faster and has better detection speed, with more significant changes in the detection statistics. Notably, these advantages become more pronounced as the spoofing speed decreases and the spoofing stealthiness increases, indicating that this method has robust detection capability against sophisticated attacks. Meanwhile, it offers a lightweight computational overhead suitable for embedded PMU implementations, enhancing sensor-layer security in critical infrastructure. This work provides reliable synchronized measurements for power system monitoring and control over a wide area.</p>
	]]></content:encoded>

	<dc:title>Time Synchronization Attack Detection Method Based on Carrier Doppler Pearson Correlation Coefficient Estimation</dc:title>
			<dc:creator>Lifen Li</dc:creator>
			<dc:creator>Zhiyun Xiao</dc:creator>
		<dc:identifier>doi: 10.3390/s26092811</dc:identifier>
	<dc:source>Sensors</dc:source>
	<dc:date>2026-04-30</dc:date>

	<prism:publicationName>Sensors</prism:publicationName>
	<prism:publicationDate>2026-04-30</prism:publicationDate>
	<prism:volume>26</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>2811</prism:startingPage>
		<prism:doi>10.3390/s26092811</prism:doi>
	<prism:url>https://www.mdpi.com/1424-8220/26/9/2811</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1424-8220/26/9/2807">

	<title>Sensors, Vol. 26, Pages 2807: Embedded Wireless Flexible Sensor for Monitoring Interface Stress of Solid Rocket Motor</title>
	<link>https://www.mdpi.com/1424-8220/26/9/2807</link>
	<description>Solid rocket motor (SRM) is a reliable and cost-effective aerospace propulsion system, by virtue of its advantages in terms of simple structure, long storage life, low cost, and ease of manufacturing. However, cracks and interfacial delamination may occur at the interface owing to the interface stress resulting from the complex service scenarios throughout the entire life cycle of the SRM. Therefore, it is crucial to monitor the interface stress for health assessment of the SRM. To achieve non-destructive in situ monitoring of interface stress, this paper proposes a novel embedded wireless flexible sensor (EWFS). Through theoretical analysis, the expression of the relationship between the input and output signals of EWFS is formulated. The response patterns of the output signals under different interface stresses are investigated. A prototype of the EWFS comprising the flexible printed circuit board (FPCB) and polydimethylsiloxane (PDMS) is fabricated, along with an interface stress-testing system established for experiments. The experimental results indicate that the EWFS exhibits a sensitivity of 27.2 mV &amp;amp;middot; MPa&amp;amp;minus;1, a linearity error of 1.73%, a maximum hysteresis error of 2.67%, and a stability error of 0.023%.</description>
	<pubDate>2026-04-30</pubDate>

	<content:encoded><![CDATA[
	<p><b>Sensors, Vol. 26, Pages 2807: Embedded Wireless Flexible Sensor for Monitoring Interface Stress of Solid Rocket Motor</b></p>
	<p>Sensors <a href="https://www.mdpi.com/1424-8220/26/9/2807">doi: 10.3390/s26092807</a></p>
	<p>Authors:
		Bei Yan
		Xiaozhou Lü
		Kecai Ding
		Yipeng Heng
		Yong Li
		</p>
	<p>Solid rocket motor (SRM) is a reliable and cost-effective aerospace propulsion system, by virtue of its advantages in terms of simple structure, long storage life, low cost, and ease of manufacturing. However, cracks and interfacial delamination may occur at the interface owing to the interface stress resulting from the complex service scenarios throughout the entire life cycle of the SRM. Therefore, it is crucial to monitor the interface stress for health assessment of the SRM. To achieve non-destructive in situ monitoring of interface stress, this paper proposes a novel embedded wireless flexible sensor (EWFS). Through theoretical analysis, the expression of the relationship between the input and output signals of EWFS is formulated. The response patterns of the output signals under different interface stresses are investigated. A prototype of the EWFS comprising the flexible printed circuit board (FPCB) and polydimethylsiloxane (PDMS) is fabricated, along with an interface stress-testing system established for experiments. The experimental results indicate that the EWFS exhibits a sensitivity of 27.2 mV &amp;amp;middot; MPa&amp;amp;minus;1, a linearity error of 1.73%, a maximum hysteresis error of 2.67%, and a stability error of 0.023%.</p>
	]]></content:encoded>

	<dc:title>Embedded Wireless Flexible Sensor for Monitoring Interface Stress of Solid Rocket Motor</dc:title>
			<dc:creator>Bei Yan</dc:creator>
			<dc:creator>Xiaozhou Lü</dc:creator>
			<dc:creator>Kecai Ding</dc:creator>
			<dc:creator>Yipeng Heng</dc:creator>
			<dc:creator>Yong Li</dc:creator>
		<dc:identifier>doi: 10.3390/s26092807</dc:identifier>
	<dc:source>Sensors</dc:source>
	<dc:date>2026-04-30</dc:date>

	<prism:publicationName>Sensors</prism:publicationName>
	<prism:publicationDate>2026-04-30</prism:publicationDate>
	<prism:volume>26</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>2807</prism:startingPage>
		<prism:doi>10.3390/s26092807</prism:doi>
	<prism:url>https://www.mdpi.com/1424-8220/26/9/2807</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1424-8220/26/9/2806">

	<title>Sensors, Vol. 26, Pages 2806: Wirelessly Interrogated, Implantable Capacitive MEMS Sensors for Continuous Intraocular Pressure Monitoring</title>
	<link>https://www.mdpi.com/1424-8220/26/9/2806</link>
	<description>This work presents wirelessly interrogated microelectromechanical system (MEMS) capacitive sensors for continuous intraocular pressure (IOP) monitoring. The sensor uses a passive inductor&amp;amp;ndash;capacitor (LC) tank circuit comprising a fixed, on-chip spiral inductor and a pressure-sensitive, variable-gap capacitor with parallel-plate membrane electrodes and side anchors. The membrane is designed with dimensions of 500 &amp;amp;micro;m &amp;amp;times; 500 &amp;amp;micro;m &amp;amp;times; 2 &amp;amp;micro;m and a capacitive transducer gap of 2.5 &amp;amp;micro;m. Applied pressure deflects the top membrane, producing a corresponding capacitance variation that changes the frequency and phase response of the LC tank circuit, enabling real-time and continuous IOP monitoring over a target detection range of 0&amp;amp;ndash;50 mmHg and beyond. Mutual inductive coupling between the sensor and the external readout coil is investigated as a reliable readout mechanism.</description>
	<pubDate>2026-04-30</pubDate>

	<content:encoded><![CDATA[
	<p><b>Sensors, Vol. 26, Pages 2806: Wirelessly Interrogated, Implantable Capacitive MEMS Sensors for Continuous Intraocular Pressure Monitoring</b></p>
	<p>Sensors <a href="https://www.mdpi.com/1424-8220/26/9/2806">doi: 10.3390/s26092806</a></p>
	<p>Authors:
		Liguan Li
		Adnan Zaman
		Ramesh Ayyala
		Jing Wang
		</p>
	<p>This work presents wirelessly interrogated microelectromechanical system (MEMS) capacitive sensors for continuous intraocular pressure (IOP) monitoring. The sensor uses a passive inductor&amp;amp;ndash;capacitor (LC) tank circuit comprising a fixed, on-chip spiral inductor and a pressure-sensitive, variable-gap capacitor with parallel-plate membrane electrodes and side anchors. The membrane is designed with dimensions of 500 &amp;amp;micro;m &amp;amp;times; 500 &amp;amp;micro;m &amp;amp;times; 2 &amp;amp;micro;m and a capacitive transducer gap of 2.5 &amp;amp;micro;m. Applied pressure deflects the top membrane, producing a corresponding capacitance variation that changes the frequency and phase response of the LC tank circuit, enabling real-time and continuous IOP monitoring over a target detection range of 0&amp;amp;ndash;50 mmHg and beyond. Mutual inductive coupling between the sensor and the external readout coil is investigated as a reliable readout mechanism.</p>
	]]></content:encoded>

	<dc:title>Wirelessly Interrogated, Implantable Capacitive MEMS Sensors for Continuous Intraocular Pressure Monitoring</dc:title>
			<dc:creator>Liguan Li</dc:creator>
			<dc:creator>Adnan Zaman</dc:creator>
			<dc:creator>Ramesh Ayyala</dc:creator>
			<dc:creator>Jing Wang</dc:creator>
		<dc:identifier>doi: 10.3390/s26092806</dc:identifier>
	<dc:source>Sensors</dc:source>
	<dc:date>2026-04-30</dc:date>

	<prism:publicationName>Sensors</prism:publicationName>
	<prism:publicationDate>2026-04-30</prism:publicationDate>
	<prism:volume>26</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>2806</prism:startingPage>
		<prism:doi>10.3390/s26092806</prism:doi>
	<prism:url>https://www.mdpi.com/1424-8220/26/9/2806</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1424-8220/26/9/2808">

	<title>Sensors, Vol. 26, Pages 2808: Parameter Adaptive Network for Large-Scale Neural In-Loop Filtering in Versatile Video Coding</title>
	<link>https://www.mdpi.com/1424-8220/26/9/2808</link>
	<description>Efficient in-loop filtering is critical for the latest video-coding standard, versatile video coding (H.266/VVC). While parameter-adaptive mechanisms are effective in traditional adaptive loop filters and overfitted small-scale neural in-loop filtering, there is difficulty when deploying them in large-scale models since both schemes require explicit parameter signaling within the bitstream, leading to prohibitive overhead. Existing parameter-generation networks avoid these transmission costs but introduce an excessive number of parameters. To address this, we propose modeling convolutional parameters as a linear combination of pre-trained kernels, where weights are adaptively estimated via input-driven attention. Specifically, we propose a multi-scale parameter-adaptive convolution and its extension, with side information, enabling parameter adaptation without transmission overhead or significant computational costs. Furthermore, we have designed an efficient parameter adaptive in-loop filtering network with the proposed convolutions to balance parameter efficiency and reconstruction performance. To improve the distortion guidance provided by side information, we have incorporated gradient information. Experiments on VTM-11.0 demonstrate {7.89%, 18.25%, and 19.15%} bitrate savings for {Y, U, and V} components, outperforming fixed-parameter baselines by an average of 1.41% with negligible computational overhead.</description>
	<pubDate>2026-04-30</pubDate>

	<content:encoded><![CDATA[
	<p><b>Sensors, Vol. 26, Pages 2808: Parameter Adaptive Network for Large-Scale Neural In-Loop Filtering in Versatile Video Coding</b></p>
	<p>Sensors <a href="https://www.mdpi.com/1424-8220/26/9/2808">doi: 10.3390/s26092808</a></p>
	<p>Authors:
		Yuansheng Wu
		Fan Cai
		Xiaodan Song
		Xuguang Zuo
		</p>
	<p>Efficient in-loop filtering is critical for the latest video-coding standard, versatile video coding (H.266/VVC). While parameter-adaptive mechanisms are effective in traditional adaptive loop filters and overfitted small-scale neural in-loop filtering, there is difficulty when deploying them in large-scale models since both schemes require explicit parameter signaling within the bitstream, leading to prohibitive overhead. Existing parameter-generation networks avoid these transmission costs but introduce an excessive number of parameters. To address this, we propose modeling convolutional parameters as a linear combination of pre-trained kernels, where weights are adaptively estimated via input-driven attention. Specifically, we propose a multi-scale parameter-adaptive convolution and its extension, with side information, enabling parameter adaptation without transmission overhead or significant computational costs. Furthermore, we have designed an efficient parameter adaptive in-loop filtering network with the proposed convolutions to balance parameter efficiency and reconstruction performance. To improve the distortion guidance provided by side information, we have incorporated gradient information. Experiments on VTM-11.0 demonstrate {7.89%, 18.25%, and 19.15%} bitrate savings for {Y, U, and V} components, outperforming fixed-parameter baselines by an average of 1.41% with negligible computational overhead.</p>
	]]></content:encoded>

	<dc:title>Parameter Adaptive Network for Large-Scale Neural In-Loop Filtering in Versatile Video Coding</dc:title>
			<dc:creator>Yuansheng Wu</dc:creator>
			<dc:creator>Fan Cai</dc:creator>
			<dc:creator>Xiaodan Song</dc:creator>
			<dc:creator>Xuguang Zuo</dc:creator>
		<dc:identifier>doi: 10.3390/s26092808</dc:identifier>
	<dc:source>Sensors</dc:source>
	<dc:date>2026-04-30</dc:date>

	<prism:publicationName>Sensors</prism:publicationName>
	<prism:publicationDate>2026-04-30</prism:publicationDate>
	<prism:volume>26</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>2808</prism:startingPage>
		<prism:doi>10.3390/s26092808</prism:doi>
	<prism:url>https://www.mdpi.com/1424-8220/26/9/2808</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1424-8220/26/9/2809">

	<title>Sensors, Vol. 26, Pages 2809: Optimization of Camera and Radar Placement for Sensor Fusion and Ball Tracking in Sports</title>
	<link>https://www.mdpi.com/1424-8220/26/9/2809</link>
	<description>The placement of sensors in an environment can significantly impact the sensing performance of a sensor fusion system. In this paper, the placement of cameras and radars is optimized based on the log determinant of the fused measurement noise of the sensor measurements. This is achieved by mapping the measurements into 3D Cartesian space and applying covariance intersection to obtain a final measurement distribution, which is taken as the measurement noise. The method was tested against random initial placements and optimization runs of sensors for a system that is intended for ball tracking in sports. The particular use case involves the tracking of a cricket ball for the purpose of match evaluation and assisted umpiring. However, in principle, the method is applicable to any sensor placement problem in which the objective is localization and tracking. The results indicate an improved root mean squared error for the optimized sensor placements, which in turn implies a reduction in the measurement noise covariance.</description>
	<pubDate>2026-04-30</pubDate>

	<content:encoded><![CDATA[
	<p><b>Sensors, Vol. 26, Pages 2809: Optimization of Camera and Radar Placement for Sensor Fusion and Ball Tracking in Sports</b></p>
	<p>Sensors <a href="https://www.mdpi.com/1424-8220/26/9/2809">doi: 10.3390/s26092809</a></p>
	<p>Authors:
		Dylan Kamstra
		Johan Pieter de Villiers
		</p>
	<p>The placement of sensors in an environment can significantly impact the sensing performance of a sensor fusion system. In this paper, the placement of cameras and radars is optimized based on the log determinant of the fused measurement noise of the sensor measurements. This is achieved by mapping the measurements into 3D Cartesian space and applying covariance intersection to obtain a final measurement distribution, which is taken as the measurement noise. The method was tested against random initial placements and optimization runs of sensors for a system that is intended for ball tracking in sports. The particular use case involves the tracking of a cricket ball for the purpose of match evaluation and assisted umpiring. However, in principle, the method is applicable to any sensor placement problem in which the objective is localization and tracking. The results indicate an improved root mean squared error for the optimized sensor placements, which in turn implies a reduction in the measurement noise covariance.</p>
	]]></content:encoded>

	<dc:title>Optimization of Camera and Radar Placement for Sensor Fusion and Ball Tracking in Sports</dc:title>
			<dc:creator>Dylan Kamstra</dc:creator>
			<dc:creator>Johan Pieter de Villiers</dc:creator>
		<dc:identifier>doi: 10.3390/s26092809</dc:identifier>
	<dc:source>Sensors</dc:source>
	<dc:date>2026-04-30</dc:date>

	<prism:publicationName>Sensors</prism:publicationName>
	<prism:publicationDate>2026-04-30</prism:publicationDate>
	<prism:volume>26</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>2809</prism:startingPage>
		<prism:doi>10.3390/s26092809</prism:doi>
	<prism:url>https://www.mdpi.com/1424-8220/26/9/2809</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1424-8220/26/9/2802">

	<title>Sensors, Vol. 26, Pages 2802: From Geometric Exploration to Semantic Completion: Scene Exploration Convolution and Large Format Perception for Adverse-Weather UAV Aerial Object Detection</title>
	<link>https://www.mdpi.com/1424-8220/26/9/2802</link>
	<description>Object detection from unmanned aerial vehicle (UAV) imagery is essential for applications such as traffic monitoring, disaster response, and urban surveillance, yet most existing methods are developed and evaluated under clear-sky conditions. In real-world UAV operations, adverse weather including fog, rain, and snow introduces severe image degradation that simultaneously disrupts both the geometric and photometric properties of targets. This paper identifies two fundamental bottlenecks underlying this performance collapse: the lack of geometric invariance in standard convolutional operators and the inability of fixed receptive fields to reconstruct features corrupted by atmospheric interference. To address these bottlenecks, we propose SELPNet (Scene Exploration and Large Format Perception Network), a unified framework that integrates geometric alignment and multi-scale contextual perception into the YOLOv13 head. SELPNet consists of two key modules: (1) The Scene Exploration Convolution (SEC) leverages affine Lie group theory to construct a discrete manifold of rotation and scale transformations, actively probing multiple geometric views and selecting the most coherent response via a Maxout mechanism. (2) The Large Format Perception Module (LPM) introduces a dynamic dilation strategy with depthwise separable convolutions, progressively enlarging the receptive field from fine-grained edge preservation to scene-level contextual perception for semantic completion of degraded regions. We further construct and release AWU-OBB, a large-scale benchmark containing over 18,000 oriented bounding box-annotated UAV images across four representative scene categories. Ablation experiments demonstrate that SEC and LPM yield complementary gains, achieving a combined improvement of +4.26% mAP50 over the YOLOv13-n baseline with only 0.11 M additional parameters and 0.2 extra GFLOPs. The source code will be publicly released upon acceptance of this paper.</description>
	<pubDate>2026-04-30</pubDate>

	<content:encoded><![CDATA[
	<p><b>Sensors, Vol. 26, Pages 2802: From Geometric Exploration to Semantic Completion: Scene Exploration Convolution and Large Format Perception for Adverse-Weather UAV Aerial Object Detection</b></p>
	<p>Sensors <a href="https://www.mdpi.com/1424-8220/26/9/2802">doi: 10.3390/s26092802</a></p>
	<p>Authors:
		Yize Zhao
		Bo Wang
		Jialei Zhan
		</p>
	<p>Object detection from unmanned aerial vehicle (UAV) imagery is essential for applications such as traffic monitoring, disaster response, and urban surveillance, yet most existing methods are developed and evaluated under clear-sky conditions. In real-world UAV operations, adverse weather including fog, rain, and snow introduces severe image degradation that simultaneously disrupts both the geometric and photometric properties of targets. This paper identifies two fundamental bottlenecks underlying this performance collapse: the lack of geometric invariance in standard convolutional operators and the inability of fixed receptive fields to reconstruct features corrupted by atmospheric interference. To address these bottlenecks, we propose SELPNet (Scene Exploration and Large Format Perception Network), a unified framework that integrates geometric alignment and multi-scale contextual perception into the YOLOv13 head. SELPNet consists of two key modules: (1) The Scene Exploration Convolution (SEC) leverages affine Lie group theory to construct a discrete manifold of rotation and scale transformations, actively probing multiple geometric views and selecting the most coherent response via a Maxout mechanism. (2) The Large Format Perception Module (LPM) introduces a dynamic dilation strategy with depthwise separable convolutions, progressively enlarging the receptive field from fine-grained edge preservation to scene-level contextual perception for semantic completion of degraded regions. We further construct and release AWU-OBB, a large-scale benchmark containing over 18,000 oriented bounding box-annotated UAV images across four representative scene categories. Ablation experiments demonstrate that SEC and LPM yield complementary gains, achieving a combined improvement of +4.26% mAP50 over the YOLOv13-n baseline with only 0.11 M additional parameters and 0.2 extra GFLOPs. The source code will be publicly released upon acceptance of this paper.</p>
	]]></content:encoded>

	<dc:title>From Geometric Exploration to Semantic Completion: Scene Exploration Convolution and Large Format Perception for Adverse-Weather UAV Aerial Object Detection</dc:title>
			<dc:creator>Yize Zhao</dc:creator>
			<dc:creator>Bo Wang</dc:creator>
			<dc:creator>Jialei Zhan</dc:creator>
		<dc:identifier>doi: 10.3390/s26092802</dc:identifier>
	<dc:source>Sensors</dc:source>
	<dc:date>2026-04-30</dc:date>

	<prism:publicationName>Sensors</prism:publicationName>
	<prism:publicationDate>2026-04-30</prism:publicationDate>
	<prism:volume>26</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>2802</prism:startingPage>
		<prism:doi>10.3390/s26092802</prism:doi>
	<prism:url>https://www.mdpi.com/1424-8220/26/9/2802</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1424-8220/26/9/2805">

	<title>Sensors, Vol. 26, Pages 2805: AI Methods in Sensor Calibration</title>
	<link>https://www.mdpi.com/1424-8220/26/9/2805</link>
	<description>Artificial intelligence (AI)-based methods are rapidly advancing the development of sensor technology, bringing about significant advancements for sensors in structural design/optimization, fabrication, calibration and application. The recent involvement of AI models has provided a new paradigm for the calibration of sensors and greatly improved the accuracy and stability of obtained sensing characteristics. In this paper, we present an overview of the advances of AI methods in sensor calibration in recent years. The superiority of leveraging AI models in getting the transfer function, compensating for ambient interferences/drifts, and promoting large-scale, low-cost sensors is reviewed and discussed to illustrate the pioneering transformations in this domain. Relevant enhancing tools for data preprocessing, training optimization and data augmentation are also mentioned. The significant achievements in various sensing systems have demonstrated that AI methods can be a powerful solution to the critical issues in calibrating sensors. However, there are still several critical challenges persisting alongside these remarkable achievements, and long-term commitment remains essential for future investigations.</description>
	<pubDate>2026-04-30</pubDate>

	<content:encoded><![CDATA[
	<p><b>Sensors, Vol. 26, Pages 2805: AI Methods in Sensor Calibration</b></p>
	<p>Sensors <a href="https://www.mdpi.com/1424-8220/26/9/2805">doi: 10.3390/s26092805</a></p>
	<p>Authors:
		Fei Kou
		Yu-Qing Liu
		Chen-Xi Li
		Hong-Bo Qin
		Yan Liu
		</p>
	<p>Artificial intelligence (AI)-based methods are rapidly advancing the development of sensor technology, bringing about significant advancements for sensors in structural design/optimization, fabrication, calibration and application. The recent involvement of AI models has provided a new paradigm for the calibration of sensors and greatly improved the accuracy and stability of obtained sensing characteristics. In this paper, we present an overview of the advances of AI methods in sensor calibration in recent years. The superiority of leveraging AI models in getting the transfer function, compensating for ambient interferences/drifts, and promoting large-scale, low-cost sensors is reviewed and discussed to illustrate the pioneering transformations in this domain. Relevant enhancing tools for data preprocessing, training optimization and data augmentation are also mentioned. The significant achievements in various sensing systems have demonstrated that AI methods can be a powerful solution to the critical issues in calibrating sensors. However, there are still several critical challenges persisting alongside these remarkable achievements, and long-term commitment remains essential for future investigations.</p>
	]]></content:encoded>

	<dc:title>AI Methods in Sensor Calibration</dc:title>
			<dc:creator>Fei Kou</dc:creator>
			<dc:creator>Yu-Qing Liu</dc:creator>
			<dc:creator>Chen-Xi Li</dc:creator>
			<dc:creator>Hong-Bo Qin</dc:creator>
			<dc:creator>Yan Liu</dc:creator>
		<dc:identifier>doi: 10.3390/s26092805</dc:identifier>
	<dc:source>Sensors</dc:source>
	<dc:date>2026-04-30</dc:date>

	<prism:publicationName>Sensors</prism:publicationName>
	<prism:publicationDate>2026-04-30</prism:publicationDate>
	<prism:volume>26</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Review</prism:section>
	<prism:startingPage>2805</prism:startingPage>
		<prism:doi>10.3390/s26092805</prism:doi>
	<prism:url>https://www.mdpi.com/1424-8220/26/9/2805</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1424-8220/26/9/2804">

	<title>Sensors, Vol. 26, Pages 2804: Hardware-Agnostic Imitation Learning Method for Autonomous Ultrasound Scanning Addressing Physical Deployment Discrepancies</title>
	<link>https://www.mdpi.com/1424-8220/26/9/2804</link>
	<description>To achieve autonomous ultrasound scanning skill transfer across different physical equipment instances and address the limitations of traditional imitation learning methods&amp;amp;mdash;which struggle with cross-instance generalization due to their reliance on specific manipulator parameters&amp;amp;mdash;this study proposes a physical-parameter-decoupled imitation learning method based on waypoint representation. This approach utilizes a greedy algorithm to automatically extract key nodes within the task space from expert demonstration trajectories, constructing a trajectory representation decoupled from low-level kinematic parameters and base calibration errors. Simultaneously, a velocity-aware adaptive error precision adjustment mechanism is introduced to dynamically modulate waypoint extraction thresholds, simulating the speed-accuracy strategies employed by sonographers across different scanning phases. Cross-validation across two mainstream generative architectures&amp;amp;mdash;Action Chunking Transformer (ACT) and Diffusion Policy&amp;amp;mdash;on an offline dataset confirms the plug-and-play capability of waypoint representation in suppressing long-horizon error accumulation, with both architectures achieving significant reductions in prediction errors. For physical deployment, a complete ACT-waypoint system featuring low-level triple safety redundancy was validated. In kidney long-axis standard plane scanning tasks, the system achieved a 92% success rate on the source domain manipulator and maintained an 84% success rate on the target deployment manipulator, despite incompatible low-level kinematic parameters and base coordinates. Force control accuracy remained stable around the target value of 12 N. The results demonstrate that the proposed method effectively overcomes base coordinate and D-H parameter discrepancies to achieve cross-instance zero-shot skill transfer, significantly enhancing the adaptability across physical instances and the scanning success rate of imitation learning models.</description>
	<pubDate>2026-04-30</pubDate>

	<content:encoded><![CDATA[
	<p><b>Sensors, Vol. 26, Pages 2804: Hardware-Agnostic Imitation Learning Method for Autonomous Ultrasound Scanning Addressing Physical Deployment Discrepancies</b></p>
	<p>Sensors <a href="https://www.mdpi.com/1424-8220/26/9/2804">doi: 10.3390/s26092804</a></p>
	<p>Authors:
		 Ma
		 Xia
		 Gao
		 Zhu
		 Tang
		</p>
	<p>To achieve autonomous ultrasound scanning skill transfer across different physical equipment instances and address the limitations of traditional imitation learning methods&amp;amp;mdash;which struggle with cross-instance generalization due to their reliance on specific manipulator parameters&amp;amp;mdash;this study proposes a physical-parameter-decoupled imitation learning method based on waypoint representation. This approach utilizes a greedy algorithm to automatically extract key nodes within the task space from expert demonstration trajectories, constructing a trajectory representation decoupled from low-level kinematic parameters and base calibration errors. Simultaneously, a velocity-aware adaptive error precision adjustment mechanism is introduced to dynamically modulate waypoint extraction thresholds, simulating the speed-accuracy strategies employed by sonographers across different scanning phases. Cross-validation across two mainstream generative architectures&amp;amp;mdash;Action Chunking Transformer (ACT) and Diffusion Policy&amp;amp;mdash;on an offline dataset confirms the plug-and-play capability of waypoint representation in suppressing long-horizon error accumulation, with both architectures achieving significant reductions in prediction errors. For physical deployment, a complete ACT-waypoint system featuring low-level triple safety redundancy was validated. In kidney long-axis standard plane scanning tasks, the system achieved a 92% success rate on the source domain manipulator and maintained an 84% success rate on the target deployment manipulator, despite incompatible low-level kinematic parameters and base coordinates. Force control accuracy remained stable around the target value of 12 N. The results demonstrate that the proposed method effectively overcomes base coordinate and D-H parameter discrepancies to achieve cross-instance zero-shot skill transfer, significantly enhancing the adaptability across physical instances and the scanning success rate of imitation learning models.</p>
	]]></content:encoded>

	<dc:title>Hardware-Agnostic Imitation Learning Method for Autonomous Ultrasound Scanning Addressing Physical Deployment Discrepancies</dc:title>
			<dc:creator> Ma</dc:creator>
			<dc:creator> Xia</dc:creator>
			<dc:creator> Gao</dc:creator>
			<dc:creator> Zhu</dc:creator>
			<dc:creator> Tang</dc:creator>
		<dc:identifier>doi: 10.3390/s26092804</dc:identifier>
	<dc:source>Sensors</dc:source>
	<dc:date>2026-04-30</dc:date>

	<prism:publicationName>Sensors</prism:publicationName>
	<prism:publicationDate>2026-04-30</prism:publicationDate>
	<prism:volume>26</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>2804</prism:startingPage>
		<prism:doi>10.3390/s26092804</prism:doi>
	<prism:url>https://www.mdpi.com/1424-8220/26/9/2804</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1424-8220/26/9/2803">

	<title>Sensors, Vol. 26, Pages 2803: An Alternative Current Device to Simplify Leakage Detection in Complex DC Systems</title>
	<link>https://www.mdpi.com/1424-8220/26/9/2803</link>
	<description>An alternative, low-cost, current device to be used in leakage detection is presented in this work. The main advantages, besides the cost and portability, are the high efficiency and ease of operation, enabling a simplified and effective implementation in energized electrical power systems. Its main purpose includes detections in direct current auxiliary systems (DCAS), whose reliable and continuous operation is essential to guarantee safety and robustness in a large variety of assets, such as large power plants, substations and even industry. Such effectiveness along with the proof of concept are demonstrated through tests and real maintenance situations exhibited in the final sections.</description>
	<pubDate>2026-04-30</pubDate>

	<content:encoded><![CDATA[
	<p><b>Sensors, Vol. 26, Pages 2803: An Alternative Current Device to Simplify Leakage Detection in Complex DC Systems</b></p>
	<p>Sensors <a href="https://www.mdpi.com/1424-8220/26/9/2803">doi: 10.3390/s26092803</a></p>
	<p>Authors:
		Brunalice de Matos Mercer
		Rodrigo Antonio Sbardeloto Kraemer
		Luis Otavio Steffenmunsberg Grillo
		Durval da Silva Neto
		Henrique Monteiro Basso
		Mauricio Ibarra Dobes
		Marcos Damont Terra
		</p>
	<p>An alternative, low-cost, current device to be used in leakage detection is presented in this work. The main advantages, besides the cost and portability, are the high efficiency and ease of operation, enabling a simplified and effective implementation in energized electrical power systems. Its main purpose includes detections in direct current auxiliary systems (DCAS), whose reliable and continuous operation is essential to guarantee safety and robustness in a large variety of assets, such as large power plants, substations and even industry. Such effectiveness along with the proof of concept are demonstrated through tests and real maintenance situations exhibited in the final sections.</p>
	]]></content:encoded>

	<dc:title>An Alternative Current Device to Simplify Leakage Detection in Complex DC Systems</dc:title>
			<dc:creator>Brunalice de Matos Mercer</dc:creator>
			<dc:creator>Rodrigo Antonio Sbardeloto Kraemer</dc:creator>
			<dc:creator>Luis Otavio Steffenmunsberg Grillo</dc:creator>
			<dc:creator>Durval da Silva Neto</dc:creator>
			<dc:creator>Henrique Monteiro Basso</dc:creator>
			<dc:creator>Mauricio Ibarra Dobes</dc:creator>
			<dc:creator>Marcos Damont Terra</dc:creator>
		<dc:identifier>doi: 10.3390/s26092803</dc:identifier>
	<dc:source>Sensors</dc:source>
	<dc:date>2026-04-30</dc:date>

	<prism:publicationName>Sensors</prism:publicationName>
	<prism:publicationDate>2026-04-30</prism:publicationDate>
	<prism:volume>26</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>2803</prism:startingPage>
		<prism:doi>10.3390/s26092803</prism:doi>
	<prism:url>https://www.mdpi.com/1424-8220/26/9/2803</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1424-8220/26/9/2801">

	<title>Sensors, Vol. 26, Pages 2801: CUCT-Net: End-to-End Signal-to-Image Learning for Quantized Speed-of-Sound Estimation and Tissue Segmentation in Ultrasound Computed Tomography</title>
	<link>https://www.mdpi.com/1424-8220/26/9/2801</link>
	<description>Objective: Traditional Full Waveform Inversion (FWI) methods for Ultrasound Computed Tomography (UCT) are computationally expensive and can be sensitive to strong acoustic contrasts. In this work, we propose the Multi-Channel Transducer Network (CUCT-Net), a deep learning framework that directly maps received ultrasound signals to image-space outputs for quantized speed-of-sound (SoS) estimation and for direct tissue-level segmentation over both low- and high-contrast regions, enabling end-to-end recovery of both contrast-driven and anatomically meaningful structures from raw measurements. Method: CUCT-Net uses a multi-input encoder&amp;amp;ndash;decoder architecture that maps raw multi-static UCT measurements to quantized SoS (or tissue-class) maps without requiring an initial guess or iterative optimization. Parallel per-transducer encoders extract view-specific features that are fused and refined by a decoder, with Shift Units (SU) used to enhance fine-scale feature modeling under sparse sensing. Experiments are performed on k-Wave simulations using (i) Shepp&amp;amp;ndash;Logan-inspired disc phantoms with Original/Distorted/Mixed variants and (ii) DBB-derived anatomical brain phantoms, under clean and noisy measurement conditions. Results: The proposed network achieves accurate quantized SoS estimation and direct tissue-level segmentation across synthetic and anatomically derived phantom experiments. Strong robustness to noise is demonstrated through transfer learning. Compared with FWI, CUCT-Net significantly reduces computational cost while maintaining stable performance under reduced-sensor conditions for quantized SoS estimation and complex tissue heterogeneity for segmentation. Conclusions: CUCT-Net formulates UCT as a direct signal-to-image learning problem that supports both quantized SoS estimation and tissue-level segmentation. By learning an end-to-end mapping from raw ultrasound measurements to quantized SoS or tissue representations, the proposed framework bypasses iterative inversion and achieves efficient and robust performance under reduced-sensor and strong-contrast conditions. The multi-input architecture enables effective integration of information from multiple transducers, demonstrating the feasibility and potential of data-driven end-to-end quantized SoS estimation and tissue segmentation for UCT.</description>
	<pubDate>2026-04-30</pubDate>

	<content:encoded><![CDATA[
	<p><b>Sensors, Vol. 26, Pages 2801: CUCT-Net: End-to-End Signal-to-Image Learning for Quantized Speed-of-Sound Estimation and Tissue Segmentation in Ultrasound Computed Tomography</b></p>
	<p>Sensors <a href="https://www.mdpi.com/1424-8220/26/9/2801">doi: 10.3390/s26092801</a></p>
	<p>Authors:
		Qinhan Gao
		Mohamed Khaled Almekkawy
		</p>
	<p>Objective: Traditional Full Waveform Inversion (FWI) methods for Ultrasound Computed Tomography (UCT) are computationally expensive and can be sensitive to strong acoustic contrasts. In this work, we propose the Multi-Channel Transducer Network (CUCT-Net), a deep learning framework that directly maps received ultrasound signals to image-space outputs for quantized speed-of-sound (SoS) estimation and for direct tissue-level segmentation over both low- and high-contrast regions, enabling end-to-end recovery of both contrast-driven and anatomically meaningful structures from raw measurements. Method: CUCT-Net uses a multi-input encoder&amp;amp;ndash;decoder architecture that maps raw multi-static UCT measurements to quantized SoS (or tissue-class) maps without requiring an initial guess or iterative optimization. Parallel per-transducer encoders extract view-specific features that are fused and refined by a decoder, with Shift Units (SU) used to enhance fine-scale feature modeling under sparse sensing. Experiments are performed on k-Wave simulations using (i) Shepp&amp;amp;ndash;Logan-inspired disc phantoms with Original/Distorted/Mixed variants and (ii) DBB-derived anatomical brain phantoms, under clean and noisy measurement conditions. Results: The proposed network achieves accurate quantized SoS estimation and direct tissue-level segmentation across synthetic and anatomically derived phantom experiments. Strong robustness to noise is demonstrated through transfer learning. Compared with FWI, CUCT-Net significantly reduces computational cost while maintaining stable performance under reduced-sensor conditions for quantized SoS estimation and complex tissue heterogeneity for segmentation. Conclusions: CUCT-Net formulates UCT as a direct signal-to-image learning problem that supports both quantized SoS estimation and tissue-level segmentation. By learning an end-to-end mapping from raw ultrasound measurements to quantized SoS or tissue representations, the proposed framework bypasses iterative inversion and achieves efficient and robust performance under reduced-sensor and strong-contrast conditions. The multi-input architecture enables effective integration of information from multiple transducers, demonstrating the feasibility and potential of data-driven end-to-end quantized SoS estimation and tissue segmentation for UCT.</p>
	]]></content:encoded>

	<dc:title>CUCT-Net: End-to-End Signal-to-Image Learning for Quantized Speed-of-Sound Estimation and Tissue Segmentation in Ultrasound Computed Tomography</dc:title>
			<dc:creator>Qinhan Gao</dc:creator>
			<dc:creator>Mohamed Khaled Almekkawy</dc:creator>
		<dc:identifier>doi: 10.3390/s26092801</dc:identifier>
	<dc:source>Sensors</dc:source>
	<dc:date>2026-04-30</dc:date>

	<prism:publicationName>Sensors</prism:publicationName>
	<prism:publicationDate>2026-04-30</prism:publicationDate>
	<prism:volume>26</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>2801</prism:startingPage>
		<prism:doi>10.3390/s26092801</prism:doi>
	<prism:url>https://www.mdpi.com/1424-8220/26/9/2801</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1424-8220/26/9/2798">

	<title>Sensors, Vol. 26, Pages 2798: A New Type of Ultrasonic Gyroscopic Sensor Based on a Solid-State Standing-Wave Vibrator: Towards Shock-Resistant Design</title>
	<link>https://www.mdpi.com/1424-8220/26/9/2798</link>
	<description>This paper presents a new type of ultrasonic gyroscopic sensor based on a solid-state standing-wave vibrator, which is promising for shock-resistant applications. A theoretical model of the proposed design, which is a layered structure, and the numerical simulation of its frequency response using the developed software are presented. A test sample of the novel sensing element was made and experimental studies of its frequency response were conducted. The results showed a high correlation between the resonant frequencies both for the real sample research and numerical modeling; thus, the validity of the theoretical model was confirmed. The laboratory investigation of the developed sensing element on a test bench under rotating conditions was carried out and a shift in the standing-wave amplitude proportional to the angular velocity of rotation was revealed; thus, an informative signal for this type of gyroscopic sensor was found. It is shown that the amplitude of the output signal of the new sensor on standing waves compares favorably with the signal levels reported for similar traveling-wave solutions in previous studies. The optimization strategies for the new sensor&amp;amp;rsquo;s design and operating mode to increase signal to noise ratio are also identified. Thus, the potential of using the developed solid-state standing-wave vibrator as a shock-resistant ultrasonic gyroscopic sensor is supported.</description>
	<pubDate>2026-04-30</pubDate>

	<content:encoded><![CDATA[
	<p><b>Sensors, Vol. 26, Pages 2798: A New Type of Ultrasonic Gyroscopic Sensor Based on a Solid-State Standing-Wave Vibrator: Towards Shock-Resistant Design</b></p>
	<p>Sensors <a href="https://www.mdpi.com/1424-8220/26/9/2798">doi: 10.3390/s26092798</a></p>
	<p>Authors:
		Michail Shevelko
		Andrey Baranov
		Ekaterina Popkova
		Yasemin Staroverova
		Alexander Kukaev
		Sergey Shevchenko
		</p>
	<p>This paper presents a new type of ultrasonic gyroscopic sensor based on a solid-state standing-wave vibrator, which is promising for shock-resistant applications. A theoretical model of the proposed design, which is a layered structure, and the numerical simulation of its frequency response using the developed software are presented. A test sample of the novel sensing element was made and experimental studies of its frequency response were conducted. The results showed a high correlation between the resonant frequencies both for the real sample research and numerical modeling; thus, the validity of the theoretical model was confirmed. The laboratory investigation of the developed sensing element on a test bench under rotating conditions was carried out and a shift in the standing-wave amplitude proportional to the angular velocity of rotation was revealed; thus, an informative signal for this type of gyroscopic sensor was found. It is shown that the amplitude of the output signal of the new sensor on standing waves compares favorably with the signal levels reported for similar traveling-wave solutions in previous studies. The optimization strategies for the new sensor&amp;amp;rsquo;s design and operating mode to increase signal to noise ratio are also identified. Thus, the potential of using the developed solid-state standing-wave vibrator as a shock-resistant ultrasonic gyroscopic sensor is supported.</p>
	]]></content:encoded>

	<dc:title>A New Type of Ultrasonic Gyroscopic Sensor Based on a Solid-State Standing-Wave Vibrator: Towards Shock-Resistant Design</dc:title>
			<dc:creator>Michail Shevelko</dc:creator>
			<dc:creator>Andrey Baranov</dc:creator>
			<dc:creator>Ekaterina Popkova</dc:creator>
			<dc:creator>Yasemin Staroverova</dc:creator>
			<dc:creator>Alexander Kukaev</dc:creator>
			<dc:creator>Sergey Shevchenko</dc:creator>
		<dc:identifier>doi: 10.3390/s26092798</dc:identifier>
	<dc:source>Sensors</dc:source>
	<dc:date>2026-04-30</dc:date>

	<prism:publicationName>Sensors</prism:publicationName>
	<prism:publicationDate>2026-04-30</prism:publicationDate>
	<prism:volume>26</prism:volume>
	<prism:number>9</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>2798</prism:startingPage>
		<prism:doi>10.3390/s26092798</prism:doi>
	<prism:url>https://www.mdpi.com/1424-8220/26/9/2798</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
    
<cc:License rdf:about="https://creativecommons.org/licenses/by/4.0/">
	<cc:permits rdf:resource="https://creativecommons.org/ns#Reproduction" />
	<cc:permits rdf:resource="https://creativecommons.org/ns#Distribution" />
	<cc:permits rdf:resource="https://creativecommons.org/ns#DerivativeWorks" />
</cc:License>

</rdf:RDF>
