<?xml version="1.0" encoding="UTF-8"?>
<rdf:RDF xmlns="http://purl.org/rss/1.0/"
 xmlns:dc="http://purl.org/dc/elements/1.1/"
 xmlns:dcterms="http://purl.org/dc/terms/"
 xmlns:cc="http://web.resource.org/cc/"
 xmlns:prism="http://prismstandard.org/namespaces/basic/2.0/"
 xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
 xmlns:admin="http://webns.net/mvcb/"
 xmlns:content="http://purl.org/rss/1.0/modules/content/">
    <channel rdf:about="https://www.mdpi.com/rss/journal/IoT">
		<title>IoT</title>
		<description>Latest open access articles published in IoT at https://www.mdpi.com/journal/IoT</description>
		<link>https://www.mdpi.com/journal/IoT</link>
		<admin:generatorAgent rdf:resource="https://www.mdpi.com/journal/IoT"/>
		<admin:errorReportsTo rdf:resource="mailto:support@mdpi.com"/>
		<dc:publisher>MDPI</dc:publisher>
		<dc:language>en</dc:language>
		<dc:rights>Creative Commons Attribution (CC-BY)</dc:rights>
						<prism:copyright>MDPI</prism:copyright>
		<prism:rightsAgent>support@mdpi.com</prism:rightsAgent>
		<image rdf:resource="https://pub.mdpi-res.com/img/design/mdpi-pub-logo.png?13cf3b5bd783e021?1778678334"/>
				<items>
			<rdf:Seq>
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-831X/7/2/42" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-831X/7/2/41" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-831X/7/2/40" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-831X/7/2/39" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-831X/7/2/38" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-831X/7/2/37" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-831X/7/2/36" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-831X/7/2/35" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-831X/7/2/34" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-831X/7/2/33" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-831X/7/2/32" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-831X/7/2/31" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-831X/7/2/30" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-831X/7/1/29" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-831X/7/1/28" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-831X/7/1/27" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-831X/7/1/26" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-831X/7/1/25" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-831X/7/1/24" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-831X/7/1/23" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-831X/7/1/22" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-831X/7/1/21" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-831X/7/1/20" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-831X/7/1/19" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-831X/7/1/18" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-831X/7/1/17" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-831X/7/1/16" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-831X/7/1/15" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-831X/7/1/14" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-831X/7/1/13" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-831X/7/1/12" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-831X/7/1/11" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-831X/7/1/10" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-831X/7/1/9" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-831X/7/1/8" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-831X/7/1/7" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-831X/7/1/6" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-831X/7/1/5" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-831X/7/1/4" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-831X/7/1/3" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-831X/7/1/2" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-831X/7/1/1" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-831X/6/4/78" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-831X/6/4/77" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-831X/6/4/76" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-831X/6/4/75" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-831X/6/4/74" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-831X/6/4/73" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-831X/6/4/72" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-831X/6/4/71" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-831X/6/4/70" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-831X/6/4/69" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-831X/6/4/68" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-831X/6/4/67" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-831X/6/4/66" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-831X/6/4/65" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-831X/6/4/64" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-831X/6/4/63" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-831X/6/4/62" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-831X/6/4/61" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-831X/6/4/60" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-831X/6/4/59" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-831X/6/4/58" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-831X/6/4/57" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-831X/6/3/56" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-831X/6/3/55" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-831X/6/3/54" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-831X/6/3/53" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-831X/6/3/52" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-831X/6/3/51" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-831X/6/3/50" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-831X/6/3/49" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-831X/6/3/48" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-831X/6/3/47" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-831X/6/3/46" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-831X/6/3/45" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-831X/6/3/44" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-831X/6/3/43" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-831X/6/3/42" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-831X/6/3/41" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-831X/6/3/40" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-831X/6/3/39" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-831X/6/3/38" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-831X/6/3/37" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-831X/6/3/36" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-831X/6/3/35" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-831X/6/3/34" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-831X/6/2/33" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-831X/6/2/32" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-831X/6/2/31" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-831X/6/2/30" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-831X/6/2/29" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-831X/6/2/28" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-831X/6/2/27" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-831X/6/2/26" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-831X/6/2/25" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-831X/6/2/24" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-831X/6/2/23" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-831X/6/2/22" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2624-831X/6/2/21" />
                    	</rdf:Seq>
		</items>
				<cc:license rdf:resource="https://creativecommons.org/licenses/by/4.0/" />
	</channel>

        <item rdf:about="https://www.mdpi.com/2624-831X/7/2/42">

	<title>IoT, Vol. 7, Pages 42: IoT-Based System for Real-Time Water Quality Monitoring and Advanced Turbidity and pH Sensor Calibration to Improve Accuracy and Reliability Using ThingSpeak</title>
	<link>https://www.mdpi.com/2624-831X/7/2/42</link>
	<description>Water quality has become a major concern for public health, agriculture, and industry, necessitating reliable and continuous monitoring. Conventional monitoring methods are often time-consuming, rely on manual sampling, and involve complex equipment or procedures, making them unsuitable for real-time applications. This study presents an Internet of Things (IoT)-based system for real-time water quality monitoring using ESP32 hardware integrated with the ThingSpeak platform. The system enhances the accuracy of turbidity and pH measurements using advanced sensor calibration techniques. Nephelometric methods and glass electrodes are employed for turbidity detection and pH sensing, respectively, across various water types&amp;amp;mdash;including tap water, groundwater, wastewater, saline water, and treated water&amp;amp;mdash;to address issues such as environmental drift and measurement inaccuracies. The turbidity sensor was calibrated using a standard six-point method with formazin solutions (0&amp;amp;ndash;1064 NTU), whereas pH calibration utilized a three-point approach with NIST-traceable buffer solutions (pH 4, 7, and 10). The results indicate that turbidity measurement errors, initially ranging from 15.75% to 422%, were reduced to below 10% after calibration. Similarly, pH accuracy was significantly improved across all tested water matrices. The system enables real-time data visualization via ThingSpeak, and the implementation of multi-point calibration ensures high data reliability for continuous monitoring. Overall, this approach offers an accurate, efficient, and practical solution for real-time water quality management.</description>
	<pubDate>2026-05-12</pubDate>

	<content:encoded><![CDATA[
	<p><b>IoT, Vol. 7, Pages 42: IoT-Based System for Real-Time Water Quality Monitoring and Advanced Turbidity and pH Sensor Calibration to Improve Accuracy and Reliability Using ThingSpeak</b></p>
	<p>IoT <a href="https://www.mdpi.com/2624-831X/7/2/42">doi: 10.3390/iot7020042</a></p>
	<p>Authors:
		Mulhim Al Drees
		Abbas E. Rahma
		Samah Daffalla
		Rawabi Alsudais
		Naser Fathi Alsubaie
		Mohammed Albrahim
		Hassan Abdullah Alghanim
		Mustafa I. Almaghasla
		</p>
	<p>Water quality has become a major concern for public health, agriculture, and industry, necessitating reliable and continuous monitoring. Conventional monitoring methods are often time-consuming, rely on manual sampling, and involve complex equipment or procedures, making them unsuitable for real-time applications. This study presents an Internet of Things (IoT)-based system for real-time water quality monitoring using ESP32 hardware integrated with the ThingSpeak platform. The system enhances the accuracy of turbidity and pH measurements using advanced sensor calibration techniques. Nephelometric methods and glass electrodes are employed for turbidity detection and pH sensing, respectively, across various water types&amp;amp;mdash;including tap water, groundwater, wastewater, saline water, and treated water&amp;amp;mdash;to address issues such as environmental drift and measurement inaccuracies. The turbidity sensor was calibrated using a standard six-point method with formazin solutions (0&amp;amp;ndash;1064 NTU), whereas pH calibration utilized a three-point approach with NIST-traceable buffer solutions (pH 4, 7, and 10). The results indicate that turbidity measurement errors, initially ranging from 15.75% to 422%, were reduced to below 10% after calibration. Similarly, pH accuracy was significantly improved across all tested water matrices. The system enables real-time data visualization via ThingSpeak, and the implementation of multi-point calibration ensures high data reliability for continuous monitoring. Overall, this approach offers an accurate, efficient, and practical solution for real-time water quality management.</p>
	]]></content:encoded>

	<dc:title>IoT-Based System for Real-Time Water Quality Monitoring and Advanced Turbidity and pH Sensor Calibration to Improve Accuracy and Reliability Using ThingSpeak</dc:title>
			<dc:creator>Mulhim Al Drees</dc:creator>
			<dc:creator>Abbas E. Rahma</dc:creator>
			<dc:creator>Samah Daffalla</dc:creator>
			<dc:creator>Rawabi Alsudais</dc:creator>
			<dc:creator>Naser Fathi Alsubaie</dc:creator>
			<dc:creator>Mohammed Albrahim</dc:creator>
			<dc:creator>Hassan Abdullah Alghanim</dc:creator>
			<dc:creator>Mustafa I. Almaghasla</dc:creator>
		<dc:identifier>doi: 10.3390/iot7020042</dc:identifier>
	<dc:source>IoT</dc:source>
	<dc:date>2026-05-12</dc:date>

	<prism:publicationName>IoT</prism:publicationName>
	<prism:publicationDate>2026-05-12</prism:publicationDate>
	<prism:volume>7</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>42</prism:startingPage>
		<prism:doi>10.3390/iot7020042</prism:doi>
	<prism:url>https://www.mdpi.com/2624-831X/7/2/42</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-831X/7/2/41">

	<title>IoT, Vol. 7, Pages 41: Hybrid Deep Architectures in Contrastive Latent Space: Performance Analysis of VAE-MLP, VAE-MoTE, and VAE-GAT for IoT Botnet Detection</title>
	<link>https://www.mdpi.com/2624-831X/7/2/41</link>
	<description>The rapid proliferation of Internet of Things (IoT) devices has significantly expanded the attack surface of modern networks leading to a surge in IoT-based botnet attacks. Detecting such attacks remains challenging due to the high dimensionality and heterogeneity of IoT network traffic. This study proposes and evaluates three hybrid deep learning architectures for IoT botnet detection that combine representation learning with supervised classification: VAE-encoder-MLP, VAE-encoder-GAT, and VAE-encoder-MoTE. A Variational Autoencoder is initially trained to learn a compact latent representation of the high-dimensional traffic features. Subsequently, the pretrained VAE-encoder component is employed to project the data into a lower-dimensional embedding space. These embeddings are then used to train three different downstream classifiers: a multilayer perceptron (MLP), a graph attention network (GAT), and a mixture of tiny experts (MoTE) model. To further enhance representation discriminability, supervised contrastive learning is incorporated to encourage intra-class compactness and inter-class separability. The proposed architectures are evaluated on two widely studied benchmark datasets&amp;amp;mdash;the CICIoT2022 and N-BaIoT dataset&amp;amp;mdash;under both binary and multiclass classification settings. Experimental results demonstrate that all three models achieve near-perfect performance in binary attack detection, with accuracy exceeding 99.8%. In the more challenging multiclass scenario, the VAE-encoder-MLP model achieves the best overall performance, reaching accuracies of 98.55% on CICIoT2022 and 99.75% on N-BaIoT. These findings provide insights into the design of efficient and scalable deep learning architectures for IoT intrusion detection.</description>
	<pubDate>2026-05-12</pubDate>

	<content:encoded><![CDATA[
	<p><b>IoT, Vol. 7, Pages 41: Hybrid Deep Architectures in Contrastive Latent Space: Performance Analysis of VAE-MLP, VAE-MoTE, and VAE-GAT for IoT Botnet Detection</b></p>
	<p>IoT <a href="https://www.mdpi.com/2624-831X/7/2/41">doi: 10.3390/iot7020041</a></p>
	<p>Authors:
		Hassan Wasswa
		Timothy Lynar
		</p>
	<p>The rapid proliferation of Internet of Things (IoT) devices has significantly expanded the attack surface of modern networks leading to a surge in IoT-based botnet attacks. Detecting such attacks remains challenging due to the high dimensionality and heterogeneity of IoT network traffic. This study proposes and evaluates three hybrid deep learning architectures for IoT botnet detection that combine representation learning with supervised classification: VAE-encoder-MLP, VAE-encoder-GAT, and VAE-encoder-MoTE. A Variational Autoencoder is initially trained to learn a compact latent representation of the high-dimensional traffic features. Subsequently, the pretrained VAE-encoder component is employed to project the data into a lower-dimensional embedding space. These embeddings are then used to train three different downstream classifiers: a multilayer perceptron (MLP), a graph attention network (GAT), and a mixture of tiny experts (MoTE) model. To further enhance representation discriminability, supervised contrastive learning is incorporated to encourage intra-class compactness and inter-class separability. The proposed architectures are evaluated on two widely studied benchmark datasets&amp;amp;mdash;the CICIoT2022 and N-BaIoT dataset&amp;amp;mdash;under both binary and multiclass classification settings. Experimental results demonstrate that all three models achieve near-perfect performance in binary attack detection, with accuracy exceeding 99.8%. In the more challenging multiclass scenario, the VAE-encoder-MLP model achieves the best overall performance, reaching accuracies of 98.55% on CICIoT2022 and 99.75% on N-BaIoT. These findings provide insights into the design of efficient and scalable deep learning architectures for IoT intrusion detection.</p>
	]]></content:encoded>

	<dc:title>Hybrid Deep Architectures in Contrastive Latent Space: Performance Analysis of VAE-MLP, VAE-MoTE, and VAE-GAT for IoT Botnet Detection</dc:title>
			<dc:creator>Hassan Wasswa</dc:creator>
			<dc:creator>Timothy Lynar</dc:creator>
		<dc:identifier>doi: 10.3390/iot7020041</dc:identifier>
	<dc:source>IoT</dc:source>
	<dc:date>2026-05-12</dc:date>

	<prism:publicationName>IoT</prism:publicationName>
	<prism:publicationDate>2026-05-12</prism:publicationDate>
	<prism:volume>7</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>41</prism:startingPage>
		<prism:doi>10.3390/iot7020041</prism:doi>
	<prism:url>https://www.mdpi.com/2624-831X/7/2/41</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-831X/7/2/40">

	<title>IoT, Vol. 7, Pages 40: An IoT-Aware Certificateless Signature Scheme for Protection Against Type-I and Type-II Super Adversaries</title>
	<link>https://www.mdpi.com/2624-831X/7/2/40</link>
	<description>Internet of Things (IoT) assists in efficient connectivity and automation of various applications by making use of wireless communication technology. Ensuring secure authentication and data integrity are the main challenges in this open wireless platform. Although existing cryptographic methods can address these security challenges, most of them incur additional computational and communication overhead, which is unsuitable for resource-constrained IoT devices. Nowadays, researchers have focused on proposing efficient schemes to satisfy security requirements in open wireless IoT frameworks. Recently, a Certificateless Signature (CLS) scheme was developed for the IoT environment. However, in this paper, we show that this CLS scheme is vulnerable to attacks by super Type-II adversaries. To strengthen this scheme, we propose a novel and efficient CLS scheme with existential unforgeability against super adversaries in the Random Oracle Model (ROM). The proposed CLS scheme achieves reduced computational complexity and communication cost. As such, it is suitable for wireless IoT networks to provide secure message authentication and data integrity.</description>
	<pubDate>2026-05-07</pubDate>

	<content:encoded><![CDATA[
	<p><b>IoT, Vol. 7, Pages 40: An IoT-Aware Certificateless Signature Scheme for Protection Against Type-I and Type-II Super Adversaries</b></p>
	<p>IoT <a href="https://www.mdpi.com/2624-831X/7/2/40">doi: 10.3390/iot7020040</a></p>
	<p>Authors:
		Parichehr Dadkhah
		Parvin Rastegari
		Mohammad Dakhilalian
		Phil Yeoh
		Mingzhong Wang
		Shahrzad Saremi
		Rania Shibl
		</p>
	<p>Internet of Things (IoT) assists in efficient connectivity and automation of various applications by making use of wireless communication technology. Ensuring secure authentication and data integrity are the main challenges in this open wireless platform. Although existing cryptographic methods can address these security challenges, most of them incur additional computational and communication overhead, which is unsuitable for resource-constrained IoT devices. Nowadays, researchers have focused on proposing efficient schemes to satisfy security requirements in open wireless IoT frameworks. Recently, a Certificateless Signature (CLS) scheme was developed for the IoT environment. However, in this paper, we show that this CLS scheme is vulnerable to attacks by super Type-II adversaries. To strengthen this scheme, we propose a novel and efficient CLS scheme with existential unforgeability against super adversaries in the Random Oracle Model (ROM). The proposed CLS scheme achieves reduced computational complexity and communication cost. As such, it is suitable for wireless IoT networks to provide secure message authentication and data integrity.</p>
	]]></content:encoded>

	<dc:title>An IoT-Aware Certificateless Signature Scheme for Protection Against Type-I and Type-II Super Adversaries</dc:title>
			<dc:creator>Parichehr Dadkhah</dc:creator>
			<dc:creator>Parvin Rastegari</dc:creator>
			<dc:creator>Mohammad Dakhilalian</dc:creator>
			<dc:creator>Phil Yeoh</dc:creator>
			<dc:creator>Mingzhong Wang</dc:creator>
			<dc:creator>Shahrzad Saremi</dc:creator>
			<dc:creator>Rania Shibl</dc:creator>
		<dc:identifier>doi: 10.3390/iot7020040</dc:identifier>
	<dc:source>IoT</dc:source>
	<dc:date>2026-05-07</dc:date>

	<prism:publicationName>IoT</prism:publicationName>
	<prism:publicationDate>2026-05-07</prism:publicationDate>
	<prism:volume>7</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>40</prism:startingPage>
		<prism:doi>10.3390/iot7020040</prism:doi>
	<prism:url>https://www.mdpi.com/2624-831X/7/2/40</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-831X/7/2/39">

	<title>IoT, Vol. 7, Pages 39: Assessing Internet of Things Readiness on University Campuses: A Smart Campus-Oriented Approach</title>
	<link>https://www.mdpi.com/2624-831X/7/2/39</link>
	<description>The Internet of Things (IoT) is increasingly recognized as a core digital infrastructure supporting digital transformation, particularly in complex environments such as university campuses, which can be conceptualized as smart campus ecosystems. However, many organizations encounter difficulties when implementing IoT due to insufficient organizational and technological readiness. This paper presents the University Campus IoT (UCIoT) readiness assessment model, which conceptualizes IoT readiness as a manifestation of organizational digital transformation readiness within the smart campus context. The model consists of 24 dimensions grouped into organizational and technological categories and is implemented through structured questionnaires and a supporting software tool. The model was developed using the design science research methodology and evaluated through a case study conducted at the University Campus of Novi Sad, Serbia. The results demonstrate that the model provides a structured and realistic assessment of IoT readiness and helps identify organizational and technological bottlenecks relevant to IoT implementation. The main contribution of this research is a context-specific readiness assessment framework tailored to university campuses that integrates organizational, technological, and client readiness dimensions.</description>
	<pubDate>2026-04-27</pubDate>

	<content:encoded><![CDATA[
	<p><b>IoT, Vol. 7, Pages 39: Assessing Internet of Things Readiness on University Campuses: A Smart Campus-Oriented Approach</b></p>
	<p>IoT <a href="https://www.mdpi.com/2624-831X/7/2/39">doi: 10.3390/iot7020039</a></p>
	<p>Authors:
		Dejan Arsenijević
		Jasmina Arsenijević
		Srđan Tegeltija
		Xiaoshuan Zhang
		Gordana Ostojić
		Stevan Stankovski
		</p>
	<p>The Internet of Things (IoT) is increasingly recognized as a core digital infrastructure supporting digital transformation, particularly in complex environments such as university campuses, which can be conceptualized as smart campus ecosystems. However, many organizations encounter difficulties when implementing IoT due to insufficient organizational and technological readiness. This paper presents the University Campus IoT (UCIoT) readiness assessment model, which conceptualizes IoT readiness as a manifestation of organizational digital transformation readiness within the smart campus context. The model consists of 24 dimensions grouped into organizational and technological categories and is implemented through structured questionnaires and a supporting software tool. The model was developed using the design science research methodology and evaluated through a case study conducted at the University Campus of Novi Sad, Serbia. The results demonstrate that the model provides a structured and realistic assessment of IoT readiness and helps identify organizational and technological bottlenecks relevant to IoT implementation. The main contribution of this research is a context-specific readiness assessment framework tailored to university campuses that integrates organizational, technological, and client readiness dimensions.</p>
	]]></content:encoded>

	<dc:title>Assessing Internet of Things Readiness on University Campuses: A Smart Campus-Oriented Approach</dc:title>
			<dc:creator>Dejan Arsenijević</dc:creator>
			<dc:creator>Jasmina Arsenijević</dc:creator>
			<dc:creator>Srđan Tegeltija</dc:creator>
			<dc:creator>Xiaoshuan Zhang</dc:creator>
			<dc:creator>Gordana Ostojić</dc:creator>
			<dc:creator>Stevan Stankovski</dc:creator>
		<dc:identifier>doi: 10.3390/iot7020039</dc:identifier>
	<dc:source>IoT</dc:source>
	<dc:date>2026-04-27</dc:date>

	<prism:publicationName>IoT</prism:publicationName>
	<prism:publicationDate>2026-04-27</prism:publicationDate>
	<prism:volume>7</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>39</prism:startingPage>
		<prism:doi>10.3390/iot7020039</prism:doi>
	<prism:url>https://www.mdpi.com/2624-831X/7/2/39</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-831X/7/2/38">

	<title>IoT, Vol. 7, Pages 38: HILANDER: High-Performance Intelligent Learning-Based Task Offloading for Network-Aware Dynamic Edge Resource Allocation</title>
	<link>https://www.mdpi.com/2624-831X/7/2/38</link>
	<description>Edge computing has emerged as a promising paradigm to minimize latency and energy consumption while improving computational efficiency for mobile devices. Latency-sensitive applications such as autonomous driving, augmented reality, and industrial automation require ultra-low response times, making efficient task offloading a necessity in edge computing. However, distributing optimally computational tasks among edge servers remains a challenge, especially when considering latency, energy consumption, and workload balancing simultaneously. Although existing approaches have focused on one or two of these objectives, they do not provide a holistic solution that incorporates all three factors. In addition, some existing solutions do not take advantage of parallelism at the edge layer, resulting in bottlenecks and inefficient resource usage. In this paper, we propose a novel learning-based task offloading model that integrates parallel processing at the edge layer, adaptive workload balancing, and joint latency&amp;amp;ndash;energy optimization. Moreover, by dynamically adjusting the number of selected edge servers for parallel execution, our approach achieves optimal trade-offs between performance and resource efficiency. Our experimental setup includes several edge servers and several randomly deployed devices. It employs Apache HTTP Benchmark (AB) to generate realistic Mobile Edge Computing workloads. The obtained results show that our method outperforms existing approaches by reducing latency, lowering energy consumption, and maintaining a balanced workload across edge nodes.</description>
	<pubDate>2026-04-27</pubDate>

	<content:encoded><![CDATA[
	<p><b>IoT, Vol. 7, Pages 38: HILANDER: High-Performance Intelligent Learning-Based Task Offloading for Network-Aware Dynamic Edge Resource Allocation</b></p>
	<p>IoT <a href="https://www.mdpi.com/2624-831X/7/2/38">doi: 10.3390/iot7020038</a></p>
	<p>Authors:
		Garrik Brel Jagho Mdemaya
		Armel Nkonjoh Ngomade
		Mthulisi Velempini
		</p>
	<p>Edge computing has emerged as a promising paradigm to minimize latency and energy consumption while improving computational efficiency for mobile devices. Latency-sensitive applications such as autonomous driving, augmented reality, and industrial automation require ultra-low response times, making efficient task offloading a necessity in edge computing. However, distributing optimally computational tasks among edge servers remains a challenge, especially when considering latency, energy consumption, and workload balancing simultaneously. Although existing approaches have focused on one or two of these objectives, they do not provide a holistic solution that incorporates all three factors. In addition, some existing solutions do not take advantage of parallelism at the edge layer, resulting in bottlenecks and inefficient resource usage. In this paper, we propose a novel learning-based task offloading model that integrates parallel processing at the edge layer, adaptive workload balancing, and joint latency&amp;amp;ndash;energy optimization. Moreover, by dynamically adjusting the number of selected edge servers for parallel execution, our approach achieves optimal trade-offs between performance and resource efficiency. Our experimental setup includes several edge servers and several randomly deployed devices. It employs Apache HTTP Benchmark (AB) to generate realistic Mobile Edge Computing workloads. The obtained results show that our method outperforms existing approaches by reducing latency, lowering energy consumption, and maintaining a balanced workload across edge nodes.</p>
	]]></content:encoded>

	<dc:title>HILANDER: High-Performance Intelligent Learning-Based Task Offloading for Network-Aware Dynamic Edge Resource Allocation</dc:title>
			<dc:creator>Garrik Brel Jagho Mdemaya</dc:creator>
			<dc:creator>Armel Nkonjoh Ngomade</dc:creator>
			<dc:creator>Mthulisi Velempini</dc:creator>
		<dc:identifier>doi: 10.3390/iot7020038</dc:identifier>
	<dc:source>IoT</dc:source>
	<dc:date>2026-04-27</dc:date>

	<prism:publicationName>IoT</prism:publicationName>
	<prism:publicationDate>2026-04-27</prism:publicationDate>
	<prism:volume>7</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>38</prism:startingPage>
		<prism:doi>10.3390/iot7020038</prism:doi>
	<prism:url>https://www.mdpi.com/2624-831X/7/2/38</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-831X/7/2/37">

	<title>IoT, Vol. 7, Pages 37: Distance-Aware Attenuation Modeling of a Helmet-Mounted Edge Thermal System Using MLX90640 and Raspberry Pi 5 for Industrial Safety Applications: Linear Regression Approach</title>
	<link>https://www.mdpi.com/2624-831X/7/2/37</link>
	<description>Thermal hazards in industrial environments often remain undetected until critical failure or injury occurs. Conventional handheld infrared cameras require manual operation and limit continuous situational awareness. This study presents the design and field validation of a wearable helmet-mounted real-time thermal system based on the MLX90640 infrared array sensor and a Raspberry Pi 5 edge computing platform. Experimental validation was performed across multiple scenarios of 400 measurements based on industrial distances of 100 cm and 150 cm. The performance of the system was tested against a pre-calibrated hotspot infrared thermometer using linear regression analysis and standard error metrics to determine proportional agreement. The results indicate a strong proportional relationship between the two systems at both industrial distances, with R2 values ranging from 0.9885 to 0.9973 at 100 cm and from 0.9586 to 0.9867 at 150 cm. A moderate increase in mean absolute error (MAE) was observed as the measurement distance increased. Statistically significant increases in error were identified in mechanically dynamic scenarios where statistically significant increases in measurement error were observed (p-value &amp;amp;lt; 0.05), indicating distance-dependent sensitivity under moving mechanical conditions. The higher absolute errors at longer distances mainly result from field-of-view expansion, reduced target occupancy, and mixed-pixel hotspot effects rather than weakened proportional trend stability. An industrial distance-aware linear regression model was developed to describe behavior and support calibrations under different deployment conditions. Despite minor absolute deviations during dynamic operations, the system maintained strong trend-tracking performance, suggesting suitability for daily preliminary hazard monitoring in industrial safety maintenance.</description>
	<pubDate>2026-04-26</pubDate>

	<content:encoded><![CDATA[
	<p><b>IoT, Vol. 7, Pages 37: Distance-Aware Attenuation Modeling of a Helmet-Mounted Edge Thermal System Using MLX90640 and Raspberry Pi 5 for Industrial Safety Applications: Linear Regression Approach</b></p>
	<p>IoT <a href="https://www.mdpi.com/2624-831X/7/2/37">doi: 10.3390/iot7020037</a></p>
	<p>Authors:
		Songwut Boonsong
		Paniti Netinant
		Rerkchai Fooprateepsiri
		Meennapa Rukhiran
		Manasanan Bunpalwong
		</p>
	<p>Thermal hazards in industrial environments often remain undetected until critical failure or injury occurs. Conventional handheld infrared cameras require manual operation and limit continuous situational awareness. This study presents the design and field validation of a wearable helmet-mounted real-time thermal system based on the MLX90640 infrared array sensor and a Raspberry Pi 5 edge computing platform. Experimental validation was performed across multiple scenarios of 400 measurements based on industrial distances of 100 cm and 150 cm. The performance of the system was tested against a pre-calibrated hotspot infrared thermometer using linear regression analysis and standard error metrics to determine proportional agreement. The results indicate a strong proportional relationship between the two systems at both industrial distances, with R2 values ranging from 0.9885 to 0.9973 at 100 cm and from 0.9586 to 0.9867 at 150 cm. A moderate increase in mean absolute error (MAE) was observed as the measurement distance increased. Statistically significant increases in error were identified in mechanically dynamic scenarios where statistically significant increases in measurement error were observed (p-value &amp;amp;lt; 0.05), indicating distance-dependent sensitivity under moving mechanical conditions. The higher absolute errors at longer distances mainly result from field-of-view expansion, reduced target occupancy, and mixed-pixel hotspot effects rather than weakened proportional trend stability. An industrial distance-aware linear regression model was developed to describe behavior and support calibrations under different deployment conditions. Despite minor absolute deviations during dynamic operations, the system maintained strong trend-tracking performance, suggesting suitability for daily preliminary hazard monitoring in industrial safety maintenance.</p>
	]]></content:encoded>

	<dc:title>Distance-Aware Attenuation Modeling of a Helmet-Mounted Edge Thermal System Using MLX90640 and Raspberry Pi 5 for Industrial Safety Applications: Linear Regression Approach</dc:title>
			<dc:creator>Songwut Boonsong</dc:creator>
			<dc:creator>Paniti Netinant</dc:creator>
			<dc:creator>Rerkchai Fooprateepsiri</dc:creator>
			<dc:creator>Meennapa Rukhiran</dc:creator>
			<dc:creator>Manasanan Bunpalwong</dc:creator>
		<dc:identifier>doi: 10.3390/iot7020037</dc:identifier>
	<dc:source>IoT</dc:source>
	<dc:date>2026-04-26</dc:date>

	<prism:publicationName>IoT</prism:publicationName>
	<prism:publicationDate>2026-04-26</prism:publicationDate>
	<prism:volume>7</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>37</prism:startingPage>
		<prism:doi>10.3390/iot7020037</prism:doi>
	<prism:url>https://www.mdpi.com/2624-831X/7/2/37</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-831X/7/2/36">

	<title>IoT, Vol. 7, Pages 36: PatternStudio: A Neuro-Symbolic Framework for Dynamic and High-Throughput Complex Event Processing</title>
	<link>https://www.mdpi.com/2624-831X/7/2/36</link>
	<description>Complex Event Processing (CEP) is essential for real-time analytics in domains such as industrial IoT, cybersecurity, and financial monitoring, yet CEP adoption is still hindered by the difficulty of authoring temporal rules and by rigid redeployment workflows. This paper presents PatternStudio, a neuro-symbolic CEP framework that translates natural language specifications into validated event-processing patterns and executes them on a deterministic Apache Flink-based runtime without interrupting service. The generative layer is constrained to produce a typed intermediate representation, while the symbolic layer enforces validation and runtime execution guarantees. We evaluate the prototype as a single-node system-characterization study on commodity hardware representative of edge and near-edge gateways rather than microcontroller-class devices. Under this setting, PatternStudio reaches 47,910 events per second at 250 active rules while maintaining a bounded memory footprint between 1.6 GB and 1.9 GB during the reported runs. Beyond 500 active rules, throughput degradation is driven primarily by CPU saturation and alert amplification, which also explains the sharp increase in tail latency. Additional measurements with parallelism 4, a static baseline, and a two-stage NL-to-IR evaluation further show that the architecture remains functional under partitioned execution, incurs moderate dynamic-orchestration overhead, preserves rule structure reliably under natural-language authoring, and supports interchangeable LLM backends at the semantic front end.</description>
	<pubDate>2026-04-22</pubDate>

	<content:encoded><![CDATA[
	<p><b>IoT, Vol. 7, Pages 36: PatternStudio: A Neuro-Symbolic Framework for Dynamic and High-Throughput Complex Event Processing</b></p>
	<p>IoT <a href="https://www.mdpi.com/2624-831X/7/2/36">doi: 10.3390/iot7020036</a></p>
	<p>Authors:
		Jesús Rosa-Bilbao
		</p>
	<p>Complex Event Processing (CEP) is essential for real-time analytics in domains such as industrial IoT, cybersecurity, and financial monitoring, yet CEP adoption is still hindered by the difficulty of authoring temporal rules and by rigid redeployment workflows. This paper presents PatternStudio, a neuro-symbolic CEP framework that translates natural language specifications into validated event-processing patterns and executes them on a deterministic Apache Flink-based runtime without interrupting service. The generative layer is constrained to produce a typed intermediate representation, while the symbolic layer enforces validation and runtime execution guarantees. We evaluate the prototype as a single-node system-characterization study on commodity hardware representative of edge and near-edge gateways rather than microcontroller-class devices. Under this setting, PatternStudio reaches 47,910 events per second at 250 active rules while maintaining a bounded memory footprint between 1.6 GB and 1.9 GB during the reported runs. Beyond 500 active rules, throughput degradation is driven primarily by CPU saturation and alert amplification, which also explains the sharp increase in tail latency. Additional measurements with parallelism 4, a static baseline, and a two-stage NL-to-IR evaluation further show that the architecture remains functional under partitioned execution, incurs moderate dynamic-orchestration overhead, preserves rule structure reliably under natural-language authoring, and supports interchangeable LLM backends at the semantic front end.</p>
	]]></content:encoded>

	<dc:title>PatternStudio: A Neuro-Symbolic Framework for Dynamic and High-Throughput Complex Event Processing</dc:title>
			<dc:creator>Jesús Rosa-Bilbao</dc:creator>
		<dc:identifier>doi: 10.3390/iot7020036</dc:identifier>
	<dc:source>IoT</dc:source>
	<dc:date>2026-04-22</dc:date>

	<prism:publicationName>IoT</prism:publicationName>
	<prism:publicationDate>2026-04-22</prism:publicationDate>
	<prism:volume>7</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>36</prism:startingPage>
		<prism:doi>10.3390/iot7020036</prism:doi>
	<prism:url>https://www.mdpi.com/2624-831X/7/2/36</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-831X/7/2/35">

	<title>IoT, Vol. 7, Pages 35: Privacy-Preserving Emergency Vehicle Authentication Scheme Using Zero-Knowledge Proofs and Blockchain</title>
	<link>https://www.mdpi.com/2624-831X/7/2/35</link>
	<description>Emergency vehicle authentication in vehicular ad hoc networks must satisfy strict latency, privacy, and trust constraints. Existing Public Key Infrastructure- and Conditional Privacy-Preserving Authentication-based schemes incur substantial overhead from certificate management and expensive per-hop verification, making them unsuitable for real-time emergency scenarios. We propose a lightweight zero-knowledge- and blockchain-assisted authentication scheme that eliminates certificates, pseudonym pools, and the requirement for online interaction with a trusted authority during the authentication phase. The Certificate Authority (CA) is involved only during offline initialization stages (vehicle enrollment and Merkle tree construction); once provisioning is complete, the runtime authentication process operates without any online CA interaction. Each emergency vehicle registers one-time hash commitments on-chain after proving membership in a category-specific Merkle tree, and authenticates messages by broadcasting a hash along with a zero-knowledge proof of preimage knowledge. Roadside units verify the proof and consult the on-chain state to enforce single-use semantics, creating a tamper-resistant audit trail. Evaluation using the Veins framework (OMNeT++/SUMO) demonstrated a constant 288-byte authenticated payload, millisecond-level end-to-end delay independent of hop count, and stable blockchain processing under sustained load.</description>
	<pubDate>2026-04-21</pubDate>

	<content:encoded><![CDATA[
	<p><b>IoT, Vol. 7, Pages 35: Privacy-Preserving Emergency Vehicle Authentication Scheme Using Zero-Knowledge Proofs and Blockchain</b></p>
	<p>IoT <a href="https://www.mdpi.com/2624-831X/7/2/35">doi: 10.3390/iot7020035</a></p>
	<p>Authors:
		Hanshi Li
		Drishti Oza
		Masami Yoshida
		Taku Noguchi
		</p>
	<p>Emergency vehicle authentication in vehicular ad hoc networks must satisfy strict latency, privacy, and trust constraints. Existing Public Key Infrastructure- and Conditional Privacy-Preserving Authentication-based schemes incur substantial overhead from certificate management and expensive per-hop verification, making them unsuitable for real-time emergency scenarios. We propose a lightweight zero-knowledge- and blockchain-assisted authentication scheme that eliminates certificates, pseudonym pools, and the requirement for online interaction with a trusted authority during the authentication phase. The Certificate Authority (CA) is involved only during offline initialization stages (vehicle enrollment and Merkle tree construction); once provisioning is complete, the runtime authentication process operates without any online CA interaction. Each emergency vehicle registers one-time hash commitments on-chain after proving membership in a category-specific Merkle tree, and authenticates messages by broadcasting a hash along with a zero-knowledge proof of preimage knowledge. Roadside units verify the proof and consult the on-chain state to enforce single-use semantics, creating a tamper-resistant audit trail. Evaluation using the Veins framework (OMNeT++/SUMO) demonstrated a constant 288-byte authenticated payload, millisecond-level end-to-end delay independent of hop count, and stable blockchain processing under sustained load.</p>
	]]></content:encoded>

	<dc:title>Privacy-Preserving Emergency Vehicle Authentication Scheme Using Zero-Knowledge Proofs and Blockchain</dc:title>
			<dc:creator>Hanshi Li</dc:creator>
			<dc:creator>Drishti Oza</dc:creator>
			<dc:creator>Masami Yoshida</dc:creator>
			<dc:creator>Taku Noguchi</dc:creator>
		<dc:identifier>doi: 10.3390/iot7020035</dc:identifier>
	<dc:source>IoT</dc:source>
	<dc:date>2026-04-21</dc:date>

	<prism:publicationName>IoT</prism:publicationName>
	<prism:publicationDate>2026-04-21</prism:publicationDate>
	<prism:volume>7</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>35</prism:startingPage>
		<prism:doi>10.3390/iot7020035</prism:doi>
	<prism:url>https://www.mdpi.com/2624-831X/7/2/35</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-831X/7/2/34">

	<title>IoT, Vol. 7, Pages 34: Transforming Opportunistic Routing: A Deep Reinforcement Learning Framework for Reliable and Energy-Efficient Communication in Mobile Cognitive Radio Sensor Networks</title>
	<link>https://www.mdpi.com/2624-831X/7/2/34</link>
	<description>The Mobile Reliable Opportunistic Routing (MROR) protocol improves data-forwarding reliability in Cognitive Radio Sensor Networks (CRSNs) through mobility-aware virtual contention groups and handover zoning. However, its heuristic decision logic is difficult to optimize under highly dynamic spectrum access and random node mobility. To address this limitation, we present DRL-MROR, a refined routing framework that incorporates deep reinforcement learning (DRL) to enable intelligent and adaptive forwarding decisions. In DRL-MROR, the secondary users (SUs) act as autonomous agents that observe local state information, including primary-user activity, link quality, residual energy, and neighbor-mobility patterns. Each agent learns a forwarding policy through a Deep Q-Network (DQN) optimized for long-term network utility in terms of throughput, delay, and energy efficiency. We formulate routing as a Markov Decision Process (MDP) and use experience replay with prioritized sampling to improve learning stability and convergence. The DQN used at each node is intentionally lightweight, requiring 5514 trainable parameters, about 21.5 kB of weight storage in 32-bit precision, and approximately 5.4k multiply-accumulate operations per inference, which supports practical deployment on edge-capable CRSN nodes. Extensive simulations show that DRL-MROR outperforms the original MROR protocol and representative AI-based routing baselines such as AIRoute under diverse operating conditions. The results indicate gains of up to 38% in throughput, 42% in goodput, a 29% reduction in energy consumed per packet, and an approximately 18% improvement in network lifetime, while maintaining high route stability and fairness. DRL-MROR also reduces control overhead by about 30% and average end-to-end delay by up to 32%, maintaining strong performance even under elevated PU activity and higher node mobility. These results show that augmenting opportunistic routing with lightweight DRL can substantially improve adaptability and efficiency in next-generation IoT-oriented CRSNs.</description>
	<pubDate>2026-04-21</pubDate>

	<content:encoded><![CDATA[
	<p><b>IoT, Vol. 7, Pages 34: Transforming Opportunistic Routing: A Deep Reinforcement Learning Framework for Reliable and Energy-Efficient Communication in Mobile Cognitive Radio Sensor Networks</b></p>
	<p>IoT <a href="https://www.mdpi.com/2624-831X/7/2/34">doi: 10.3390/iot7020034</a></p>
	<p>Authors:
		Suleiman Zubair
		Bala Alhaji Salihu
		Altyeb Altaher Taha
		Yakubu Suleiman Baguda
		Ahmed Hamza Osman
		Asif Hassan Syed
		</p>
	<p>The Mobile Reliable Opportunistic Routing (MROR) protocol improves data-forwarding reliability in Cognitive Radio Sensor Networks (CRSNs) through mobility-aware virtual contention groups and handover zoning. However, its heuristic decision logic is difficult to optimize under highly dynamic spectrum access and random node mobility. To address this limitation, we present DRL-MROR, a refined routing framework that incorporates deep reinforcement learning (DRL) to enable intelligent and adaptive forwarding decisions. In DRL-MROR, the secondary users (SUs) act as autonomous agents that observe local state information, including primary-user activity, link quality, residual energy, and neighbor-mobility patterns. Each agent learns a forwarding policy through a Deep Q-Network (DQN) optimized for long-term network utility in terms of throughput, delay, and energy efficiency. We formulate routing as a Markov Decision Process (MDP) and use experience replay with prioritized sampling to improve learning stability and convergence. The DQN used at each node is intentionally lightweight, requiring 5514 trainable parameters, about 21.5 kB of weight storage in 32-bit precision, and approximately 5.4k multiply-accumulate operations per inference, which supports practical deployment on edge-capable CRSN nodes. Extensive simulations show that DRL-MROR outperforms the original MROR protocol and representative AI-based routing baselines such as AIRoute under diverse operating conditions. The results indicate gains of up to 38% in throughput, 42% in goodput, a 29% reduction in energy consumed per packet, and an approximately 18% improvement in network lifetime, while maintaining high route stability and fairness. DRL-MROR also reduces control overhead by about 30% and average end-to-end delay by up to 32%, maintaining strong performance even under elevated PU activity and higher node mobility. These results show that augmenting opportunistic routing with lightweight DRL can substantially improve adaptability and efficiency in next-generation IoT-oriented CRSNs.</p>
	]]></content:encoded>

	<dc:title>Transforming Opportunistic Routing: A Deep Reinforcement Learning Framework for Reliable and Energy-Efficient Communication in Mobile Cognitive Radio Sensor Networks</dc:title>
			<dc:creator>Suleiman Zubair</dc:creator>
			<dc:creator>Bala Alhaji Salihu</dc:creator>
			<dc:creator>Altyeb Altaher Taha</dc:creator>
			<dc:creator>Yakubu Suleiman Baguda</dc:creator>
			<dc:creator>Ahmed Hamza Osman</dc:creator>
			<dc:creator>Asif Hassan Syed</dc:creator>
		<dc:identifier>doi: 10.3390/iot7020034</dc:identifier>
	<dc:source>IoT</dc:source>
	<dc:date>2026-04-21</dc:date>

	<prism:publicationName>IoT</prism:publicationName>
	<prism:publicationDate>2026-04-21</prism:publicationDate>
	<prism:volume>7</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>34</prism:startingPage>
		<prism:doi>10.3390/iot7020034</prism:doi>
	<prism:url>https://www.mdpi.com/2624-831X/7/2/34</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-831X/7/2/33">

	<title>IoT, Vol. 7, Pages 33: Edge AI Bridge: A Micro-Layer Intrusion Detection Architecture for Smart-City IoT Networks</title>
	<link>https://www.mdpi.com/2624-831X/7/2/33</link>
	<description>Smart-city IoT ecosystems depend on a large number of devices with limited resources, which often lack built-in security mechanisms. While traditional cloud-based or gateway-centric intrusion detection systems (IDSs) offer essential security, they are still characterized by high detection latency, considerable bandwidth demand, and a lack of precise monitoring of single device actions. This study proposes the Edge AI Bridge, a novel micro-computing security layer positioned between IoT devices and the gateway to enable early-stage threat interception. The architecture integrates embedded AI hardware with a hybrid pipeline, utilizing unsupervised anomaly detection for behavioral profiling and a lightweight signature-matching module to minimize false positives. System operations&amp;amp;mdash;including localized traffic inspection, protocol parsing, and feature extraction&amp;amp;mdash;are performed before data aggregation, which preserves device-level privacy and reduces the computational burden on the IoT gateway. The contemporary CIC-IoT-2023 dataset, which captures a wide range of smart-city protocols and attack vectors, is used to evaluate the architecture. The Edge AI Bridge leads to a significant reduction in detection latency&amp;amp;mdash;&amp;amp;asymp;50 ms on average as opposed to the 500 ms of cloud-based solutions&amp;amp;mdash;while the resource footprint is kept low to about 20% CPU utilization. The Edge AI Bridge demonstrates a potential solution that is scalable, modular, and can preserve privacy while improving the cyber resilience of the smart-city infrastructures that are large, heterogeneous, and difficult to manage.</description>
	<pubDate>2026-04-16</pubDate>

	<content:encoded><![CDATA[
	<p><b>IoT, Vol. 7, Pages 33: Edge AI Bridge: A Micro-Layer Intrusion Detection Architecture for Smart-City IoT Networks</b></p>
	<p>IoT <a href="https://www.mdpi.com/2624-831X/7/2/33">doi: 10.3390/iot7020033</a></p>
	<p>Authors:
		Sethu Subramanian N
		Prabu P
		Kurunandan Jain
		Prabhakar Krishnan
		</p>
	<p>Smart-city IoT ecosystems depend on a large number of devices with limited resources, which often lack built-in security mechanisms. While traditional cloud-based or gateway-centric intrusion detection systems (IDSs) offer essential security, they are still characterized by high detection latency, considerable bandwidth demand, and a lack of precise monitoring of single device actions. This study proposes the Edge AI Bridge, a novel micro-computing security layer positioned between IoT devices and the gateway to enable early-stage threat interception. The architecture integrates embedded AI hardware with a hybrid pipeline, utilizing unsupervised anomaly detection for behavioral profiling and a lightweight signature-matching module to minimize false positives. System operations&amp;amp;mdash;including localized traffic inspection, protocol parsing, and feature extraction&amp;amp;mdash;are performed before data aggregation, which preserves device-level privacy and reduces the computational burden on the IoT gateway. The contemporary CIC-IoT-2023 dataset, which captures a wide range of smart-city protocols and attack vectors, is used to evaluate the architecture. The Edge AI Bridge leads to a significant reduction in detection latency&amp;amp;mdash;&amp;amp;asymp;50 ms on average as opposed to the 500 ms of cloud-based solutions&amp;amp;mdash;while the resource footprint is kept low to about 20% CPU utilization. The Edge AI Bridge demonstrates a potential solution that is scalable, modular, and can preserve privacy while improving the cyber resilience of the smart-city infrastructures that are large, heterogeneous, and difficult to manage.</p>
	]]></content:encoded>

	<dc:title>Edge AI Bridge: A Micro-Layer Intrusion Detection Architecture for Smart-City IoT Networks</dc:title>
			<dc:creator>Sethu Subramanian N</dc:creator>
			<dc:creator>Prabu P</dc:creator>
			<dc:creator>Kurunandan Jain</dc:creator>
			<dc:creator>Prabhakar Krishnan</dc:creator>
		<dc:identifier>doi: 10.3390/iot7020033</dc:identifier>
	<dc:source>IoT</dc:source>
	<dc:date>2026-04-16</dc:date>

	<prism:publicationName>IoT</prism:publicationName>
	<prism:publicationDate>2026-04-16</prism:publicationDate>
	<prism:volume>7</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>33</prism:startingPage>
		<prism:doi>10.3390/iot7020033</prism:doi>
	<prism:url>https://www.mdpi.com/2624-831X/7/2/33</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-831X/7/2/32">

	<title>IoT, Vol. 7, Pages 32: Intelligent Railway Wagon Health Assessment Using IoT Sensors and Predictive Analytics for Safety-Critical Applications</title>
	<link>https://www.mdpi.com/2624-831X/7/2/32</link>
	<description>The safety and reliability of railway wagon operations largely depend on the timely detection of degradation in safety-critical components such as axle bearings, wheelsets, and braking systems. Conventional maintenance strategies based on fixed inspection intervals are often inadequate for capturing the actual operating conditions of wagon components, leading to delayed fault detection or unnecessary maintenance actions. To address these limitations, this paper proposes a sensor-based health assessment framework for the continuous monitoring of railway wagons under operational conditions. The proposed framework integrates multi-sensor data acquisition, systematic signal preprocessing, feature-based health indicator construction, and temporal degradation analysis to evaluate component health in real time. A safety-oriented decision logic is employed to classify operating conditions and generate reliable alerts while minimizing false detections caused by transient disturbances. The effectiveness of the proposed approach is validated using a publicly available run-to-failure bearing dataset that exhibits degradation characteristics similar to those observed in railway wagon axle bearings. Experimental results demonstrate that the proposed framework achieves improved classification accuracy, higher detection reliability, reduced false alarm rates, and lower detection latency compared to representative existing condition monitoring approaches. In addition, the computational efficiency of the proposed model confirms its suitability for real-time deployment. The results indicate that the proposed health assessment framework provides a practical and reliable solution for safety-critical railway wagon monitoring and forms a strong foundation for future extensions toward predictive maintenance and remaining useful life estimation.</description>
	<pubDate>2026-04-02</pubDate>

	<content:encoded><![CDATA[
	<p><b>IoT, Vol. 7, Pages 32: Intelligent Railway Wagon Health Assessment Using IoT Sensors and Predictive Analytics for Safety-Critical Applications</b></p>
	<p>IoT <a href="https://www.mdpi.com/2624-831X/7/2/32">doi: 10.3390/iot7020032</a></p>
	<p>Authors:
		Shiva Kumar Mysore Gangadhara
		Krishna Alabhujanahalli Neelegowda
		Anitha Arekattedoddi Chikkalingaiah
		Naveena Chikkaguddaiah
		</p>
	<p>The safety and reliability of railway wagon operations largely depend on the timely detection of degradation in safety-critical components such as axle bearings, wheelsets, and braking systems. Conventional maintenance strategies based on fixed inspection intervals are often inadequate for capturing the actual operating conditions of wagon components, leading to delayed fault detection or unnecessary maintenance actions. To address these limitations, this paper proposes a sensor-based health assessment framework for the continuous monitoring of railway wagons under operational conditions. The proposed framework integrates multi-sensor data acquisition, systematic signal preprocessing, feature-based health indicator construction, and temporal degradation analysis to evaluate component health in real time. A safety-oriented decision logic is employed to classify operating conditions and generate reliable alerts while minimizing false detections caused by transient disturbances. The effectiveness of the proposed approach is validated using a publicly available run-to-failure bearing dataset that exhibits degradation characteristics similar to those observed in railway wagon axle bearings. Experimental results demonstrate that the proposed framework achieves improved classification accuracy, higher detection reliability, reduced false alarm rates, and lower detection latency compared to representative existing condition monitoring approaches. In addition, the computational efficiency of the proposed model confirms its suitability for real-time deployment. The results indicate that the proposed health assessment framework provides a practical and reliable solution for safety-critical railway wagon monitoring and forms a strong foundation for future extensions toward predictive maintenance and remaining useful life estimation.</p>
	]]></content:encoded>

	<dc:title>Intelligent Railway Wagon Health Assessment Using IoT Sensors and Predictive Analytics for Safety-Critical Applications</dc:title>
			<dc:creator>Shiva Kumar Mysore Gangadhara</dc:creator>
			<dc:creator>Krishna Alabhujanahalli Neelegowda</dc:creator>
			<dc:creator>Anitha Arekattedoddi Chikkalingaiah</dc:creator>
			<dc:creator>Naveena Chikkaguddaiah</dc:creator>
		<dc:identifier>doi: 10.3390/iot7020032</dc:identifier>
	<dc:source>IoT</dc:source>
	<dc:date>2026-04-02</dc:date>

	<prism:publicationName>IoT</prism:publicationName>
	<prism:publicationDate>2026-04-02</prism:publicationDate>
	<prism:volume>7</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>32</prism:startingPage>
		<prism:doi>10.3390/iot7020032</prism:doi>
	<prism:url>https://www.mdpi.com/2624-831X/7/2/32</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-831X/7/2/31">

	<title>IoT, Vol. 7, Pages 31: Cryptanalysis and Improvement of the SMEP-IoV Protocol: A Secure and Lightweight Protocol for Message Exchange in IoV Paradigm</title>
	<link>https://www.mdpi.com/2624-831X/7/2/31</link>
	<description>The Internet of Vehicles (IoV) is a rapidly evolving technology that provides real-time connectivity, enhanced road safety, and reduced traffic congestion; however, its inherently open communication channels expose it to serious security and privacy threats. In 2021, Chaudhry proposed SMEP-IoV, a lightweight message authentication protocol designed to satisfy essential security requirements. This paper presents a comprehensive security analysis of SMEP-IoV and reveals several serious vulnerabilities. Specifically, sensitive credentials are stored in plaintext without tamper-resistant protection, and both authentication and session key derivation depend directly on these credentials. These structural flaws allow an adversary to extract the stored secrets, generate valid authentication messages, and derive the established session key, enabling vehicle impersonation and session key disclosure attacks. Moreover, compromise of long-term secrets facilitates key compromise impersonation attacks. It also fails to ensure anonymity and perfect forward secrecy. To address these issues, we propose an enhanced authentication protocol for resource-constrained IoV environments, leveraging a three-factor authentication mechanism combined with lightweight cryptographic primitives. Formal security analyses using BAN logic, Tamarin, and ProVerif confirm its resilience against known attacks, while NS-3 simulations validate its scalability, high throughput, and low End-to-End Delay (E2ED). The results highlight the protocol as a robust, efficient, and scalable solution for large-scale IoV deployments.</description>
	<pubDate>2026-03-31</pubDate>

	<content:encoded><![CDATA[
	<p><b>IoT, Vol. 7, Pages 31: Cryptanalysis and Improvement of the SMEP-IoV Protocol: A Secure and Lightweight Protocol for Message Exchange in IoV Paradigm</b></p>
	<p>IoT <a href="https://www.mdpi.com/2624-831X/7/2/31">doi: 10.3390/iot7020031</a></p>
	<p>Authors:
		Gelare Oudi Ghadim
		Parvin Rastegari
		Mohammad Dakhilalian
		Faramarz Hendessi
		Shahrzad Saremi
		Rania Shibl
		Yassine Himeur
		Shadi Atalla
		Wathiq Mansoor
		</p>
	<p>The Internet of Vehicles (IoV) is a rapidly evolving technology that provides real-time connectivity, enhanced road safety, and reduced traffic congestion; however, its inherently open communication channels expose it to serious security and privacy threats. In 2021, Chaudhry proposed SMEP-IoV, a lightweight message authentication protocol designed to satisfy essential security requirements. This paper presents a comprehensive security analysis of SMEP-IoV and reveals several serious vulnerabilities. Specifically, sensitive credentials are stored in plaintext without tamper-resistant protection, and both authentication and session key derivation depend directly on these credentials. These structural flaws allow an adversary to extract the stored secrets, generate valid authentication messages, and derive the established session key, enabling vehicle impersonation and session key disclosure attacks. Moreover, compromise of long-term secrets facilitates key compromise impersonation attacks. It also fails to ensure anonymity and perfect forward secrecy. To address these issues, we propose an enhanced authentication protocol for resource-constrained IoV environments, leveraging a three-factor authentication mechanism combined with lightweight cryptographic primitives. Formal security analyses using BAN logic, Tamarin, and ProVerif confirm its resilience against known attacks, while NS-3 simulations validate its scalability, high throughput, and low End-to-End Delay (E2ED). The results highlight the protocol as a robust, efficient, and scalable solution for large-scale IoV deployments.</p>
	]]></content:encoded>

	<dc:title>Cryptanalysis and Improvement of the SMEP-IoV Protocol: A Secure and Lightweight Protocol for Message Exchange in IoV Paradigm</dc:title>
			<dc:creator>Gelare Oudi Ghadim</dc:creator>
			<dc:creator>Parvin Rastegari</dc:creator>
			<dc:creator>Mohammad Dakhilalian</dc:creator>
			<dc:creator>Faramarz Hendessi</dc:creator>
			<dc:creator>Shahrzad Saremi</dc:creator>
			<dc:creator>Rania Shibl</dc:creator>
			<dc:creator>Yassine Himeur</dc:creator>
			<dc:creator>Shadi Atalla</dc:creator>
			<dc:creator>Wathiq Mansoor</dc:creator>
		<dc:identifier>doi: 10.3390/iot7020031</dc:identifier>
	<dc:source>IoT</dc:source>
	<dc:date>2026-03-31</dc:date>

	<prism:publicationName>IoT</prism:publicationName>
	<prism:publicationDate>2026-03-31</prism:publicationDate>
	<prism:volume>7</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>31</prism:startingPage>
		<prism:doi>10.3390/iot7020031</prism:doi>
	<prism:url>https://www.mdpi.com/2624-831X/7/2/31</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-831X/7/2/30">

	<title>IoT, Vol. 7, Pages 30: Optimal Security Task Offloading in Cognitive IoT Networks: Provably Optimal Threshold Policies and Model-Free Learning</title>
	<link>https://www.mdpi.com/2624-831X/7/2/30</link>
	<description>The proliferation of Internet of Things (IoT) devices has introduced significant security challenges. Resource-constrained devices face sophisticated threats but lack the computational capacity for advanced security analysis. This study investigates optimal security task allocation in Cognitive IoT (CIoT) networks. It specifically examines when IoT devices should process security tasks locally or offload them to Mobile Edge Computing (MEC) servers. The problem is formulated as a Continuous-Time Markov Decision Process (CTMDP). The study demonstrates that the optimal offloading policy has a threshold structure. Security tasks are offloaded to MEC servers when the offloading queue length is below a critical threshold, k&amp;amp;lowast;. Otherwise, tasks are processed locally. This structural property is robust to changes in MEC server configurations and threat arrival patterns. It ensures an optimal and easily implementable security policy under the exponential model. Theoretical analysis establishes upper bounds on the performance of AI-based security controllers using the same models. The results also show that standard model-free Q-learning algorithms can recover optimal thresholds without any prior knowledge of the system parameters. Simulations across multiple reinforcement learning architectures, including Q-learning, State&amp;amp;ndash;Action&amp;amp;ndash;Reward&amp;amp;ndash;State&amp;amp;ndash;Action (SARSA), and Deep Q-networks (DQN), confirm that all methods converge to the predicted threshold. This empirically validates the analytical findings. The threshold structure remains effective under practical imperfections such as imperfect sensing and parameter estimation errors. Systems maintain 85% to 93% of their optimal performance. This work extends threshold Markov Decision Process (MDP) analysis from classical queuing theory to the context of CIoT security offloading. It provides optimal and practical policies and model-free algorithms for use by resource-constrained devices.</description>
	<pubDate>2026-03-26</pubDate>

	<content:encoded><![CDATA[
	<p><b>IoT, Vol. 7, Pages 30: Optimal Security Task Offloading in Cognitive IoT Networks: Provably Optimal Threshold Policies and Model-Free Learning</b></p>
	<p>IoT <a href="https://www.mdpi.com/2624-831X/7/2/30">doi: 10.3390/iot7020030</a></p>
	<p>Authors:
		Ning Wang
		Yali Ren
		</p>
	<p>The proliferation of Internet of Things (IoT) devices has introduced significant security challenges. Resource-constrained devices face sophisticated threats but lack the computational capacity for advanced security analysis. This study investigates optimal security task allocation in Cognitive IoT (CIoT) networks. It specifically examines when IoT devices should process security tasks locally or offload them to Mobile Edge Computing (MEC) servers. The problem is formulated as a Continuous-Time Markov Decision Process (CTMDP). The study demonstrates that the optimal offloading policy has a threshold structure. Security tasks are offloaded to MEC servers when the offloading queue length is below a critical threshold, k&amp;amp;lowast;. Otherwise, tasks are processed locally. This structural property is robust to changes in MEC server configurations and threat arrival patterns. It ensures an optimal and easily implementable security policy under the exponential model. Theoretical analysis establishes upper bounds on the performance of AI-based security controllers using the same models. The results also show that standard model-free Q-learning algorithms can recover optimal thresholds without any prior knowledge of the system parameters. Simulations across multiple reinforcement learning architectures, including Q-learning, State&amp;amp;ndash;Action&amp;amp;ndash;Reward&amp;amp;ndash;State&amp;amp;ndash;Action (SARSA), and Deep Q-networks (DQN), confirm that all methods converge to the predicted threshold. This empirically validates the analytical findings. The threshold structure remains effective under practical imperfections such as imperfect sensing and parameter estimation errors. Systems maintain 85% to 93% of their optimal performance. This work extends threshold Markov Decision Process (MDP) analysis from classical queuing theory to the context of CIoT security offloading. It provides optimal and practical policies and model-free algorithms for use by resource-constrained devices.</p>
	]]></content:encoded>

	<dc:title>Optimal Security Task Offloading in Cognitive IoT Networks: Provably Optimal Threshold Policies and Model-Free Learning</dc:title>
			<dc:creator>Ning Wang</dc:creator>
			<dc:creator>Yali Ren</dc:creator>
		<dc:identifier>doi: 10.3390/iot7020030</dc:identifier>
	<dc:source>IoT</dc:source>
	<dc:date>2026-03-26</dc:date>

	<prism:publicationName>IoT</prism:publicationName>
	<prism:publicationDate>2026-03-26</prism:publicationDate>
	<prism:volume>7</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>30</prism:startingPage>
		<prism:doi>10.3390/iot7020030</prism:doi>
	<prism:url>https://www.mdpi.com/2624-831X/7/2/30</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-831X/7/1/29">

	<title>IoT, Vol. 7, Pages 29: EEDC: Energy-Efficient Distance-Controlled Clustering for Bottleneck Avoidance in Wireless Sensor Networks</title>
	<link>https://www.mdpi.com/2624-831X/7/1/29</link>
	<description>Wireless Sensor Networks (WSNs) commonly employ clustering to improve scalability and energy efficiency; however, cluster heads (CHs) located near the base station (BS) often suffer from excessive relay traffic, leading to rapid energy depletion and reduced network lifetime. This article proposes an Energy-Efficient Distance-Controlled Clustering (EEDC) scheme that adjusts CH density and transmission power according to each node&amp;amp;rsquo;s distance from the BS. In EEDC, a higher number of CHs is deployed near the BS to balance forwarding loads, while fewer CHs are selected in distant regions to conserve energy. Additionally, CHs adapt their transmission power to enable distance-proportional communication. A mathematical model is developed to analyze the relationship between CH distribution, transmission power, and overall energy consumption. Performance evaluation is conducted through simulations and compared with LEACH, HEED, DEEC, SEP, and EECS. The results show that EEDC improves the stability period by up to 42%, extends network lifetime by 23%, increases average residual energy by 13&amp;amp;ndash;29%, enhances throughput by 16&amp;amp;ndash;44%, and achieves 23&amp;amp;ndash;61% higher packet delivery efficiency. Moreover, cumulative CH energy consumption is reduced by 5&amp;amp;ndash;21%, leading to more balanced energy distribution. These findings indicate that distance-controlled CH selection and adaptive transmission power effectively alleviate the BS energy bottleneck and enhance the energy efficiency and operational longevity of clustered WSNs.</description>
	<pubDate>2026-03-15</pubDate>

	<content:encoded><![CDATA[
	<p><b>IoT, Vol. 7, Pages 29: EEDC: Energy-Efficient Distance-Controlled Clustering for Bottleneck Avoidance in Wireless Sensor Networks</b></p>
	<p>IoT <a href="https://www.mdpi.com/2624-831X/7/1/29">doi: 10.3390/iot7010029</a></p>
	<p>Authors:
		Ahmad Abuashour
		Yahia Jazyah
		Naser Zaeri
		</p>
	<p>Wireless Sensor Networks (WSNs) commonly employ clustering to improve scalability and energy efficiency; however, cluster heads (CHs) located near the base station (BS) often suffer from excessive relay traffic, leading to rapid energy depletion and reduced network lifetime. This article proposes an Energy-Efficient Distance-Controlled Clustering (EEDC) scheme that adjusts CH density and transmission power according to each node&amp;amp;rsquo;s distance from the BS. In EEDC, a higher number of CHs is deployed near the BS to balance forwarding loads, while fewer CHs are selected in distant regions to conserve energy. Additionally, CHs adapt their transmission power to enable distance-proportional communication. A mathematical model is developed to analyze the relationship between CH distribution, transmission power, and overall energy consumption. Performance evaluation is conducted through simulations and compared with LEACH, HEED, DEEC, SEP, and EECS. The results show that EEDC improves the stability period by up to 42%, extends network lifetime by 23%, increases average residual energy by 13&amp;amp;ndash;29%, enhances throughput by 16&amp;amp;ndash;44%, and achieves 23&amp;amp;ndash;61% higher packet delivery efficiency. Moreover, cumulative CH energy consumption is reduced by 5&amp;amp;ndash;21%, leading to more balanced energy distribution. These findings indicate that distance-controlled CH selection and adaptive transmission power effectively alleviate the BS energy bottleneck and enhance the energy efficiency and operational longevity of clustered WSNs.</p>
	]]></content:encoded>

	<dc:title>EEDC: Energy-Efficient Distance-Controlled Clustering for Bottleneck Avoidance in Wireless Sensor Networks</dc:title>
			<dc:creator>Ahmad Abuashour</dc:creator>
			<dc:creator>Yahia Jazyah</dc:creator>
			<dc:creator>Naser Zaeri</dc:creator>
		<dc:identifier>doi: 10.3390/iot7010029</dc:identifier>
	<dc:source>IoT</dc:source>
	<dc:date>2026-03-15</dc:date>

	<prism:publicationName>IoT</prism:publicationName>
	<prism:publicationDate>2026-03-15</prism:publicationDate>
	<prism:volume>7</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>29</prism:startingPage>
		<prism:doi>10.3390/iot7010029</prism:doi>
	<prism:url>https://www.mdpi.com/2624-831X/7/1/29</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-831X/7/1/28">

	<title>IoT, Vol. 7, Pages 28: IoT-Assisted Hydroponic System for Andrographis paniculata: Enhanced Productivity and Pharmaceutical-Grade Quality</title>
	<link>https://www.mdpi.com/2624-831X/7/1/28</link>
	<description>This study presents an Internet of Things (IoT)-assisted semi-open hydroponic system for cultivating Andrographis paniculata under tropical conditions, aiming to enhance biomass productivity, andrographolide (AG) yield, and production efficiency. IoT-assisted hydroponics, non-IoT hydroponics, and soil-based cultivation were compared in 10 m2 greenhouses. The IoT system enabled real-time monitoring and adaptive regulation of temperature, relative humidity, light intensity, nutrient solution pH, and electrical conductivity (EC). IoT-assisted hydroponics achieved earlier harvest (&amp;amp;asymp;90 days) and the highest fresh biomass yield (0.409 &amp;amp;plusmn; 0.014 kg m&amp;amp;minus;2) while maintaining per-plant productivity (15.74 &amp;amp;plusmn; 0.54 g plant&amp;amp;minus;1) comparable to soil-based cultivation. Andrographolide concentration reached 25.58 &amp;amp;plusmn; 3.36 mg g&amp;amp;minus;1 DW (2.56% w/w), meeting pharmacopeial requirements. Owing to stable environmental regulation and tolerance to high planting density, the IoT system produced the highest areal AG productivity (209.5 mg m&amp;amp;minus;2), representing a four- to tenfold increase over the other systems. Despite higher operational costs, IoT-assisted hydroponics achieved the lowest AG unit cost (&amp;amp;asymp;6.77 USD g&amp;amp;minus;1). While most previous studies emphasize tissue-level AG concentration, system-level productivity and cost efficiency under realistic cultivation conditions remain insufficiently explored. Overall, IoT-enabled semi-open hydroponics provides a scalable and economically viable approach for medicinal plant production, bridging the gap between open-field cultivation and fully controlled plant factory systems.</description>
	<pubDate>2026-03-10</pubDate>

	<content:encoded><![CDATA[
	<p><b>IoT, Vol. 7, Pages 28: IoT-Assisted Hydroponic System for Andrographis paniculata: Enhanced Productivity and Pharmaceutical-Grade Quality</b></p>
	<p>IoT <a href="https://www.mdpi.com/2624-831X/7/1/28">doi: 10.3390/iot7010028</a></p>
	<p>Authors:
		Krit Funsian
		Yaowarat Sirisathitkul
		Pumiphat Khotchanakhen
		Apiwit Bunta
		Kanittha Srikwan
		Kingkan Bunluepuech
		Athakorn Promwee
		Chih-Yi Chiu
		Karanrat Thammarak
		</p>
	<p>This study presents an Internet of Things (IoT)-assisted semi-open hydroponic system for cultivating Andrographis paniculata under tropical conditions, aiming to enhance biomass productivity, andrographolide (AG) yield, and production efficiency. IoT-assisted hydroponics, non-IoT hydroponics, and soil-based cultivation were compared in 10 m2 greenhouses. The IoT system enabled real-time monitoring and adaptive regulation of temperature, relative humidity, light intensity, nutrient solution pH, and electrical conductivity (EC). IoT-assisted hydroponics achieved earlier harvest (&amp;amp;asymp;90 days) and the highest fresh biomass yield (0.409 &amp;amp;plusmn; 0.014 kg m&amp;amp;minus;2) while maintaining per-plant productivity (15.74 &amp;amp;plusmn; 0.54 g plant&amp;amp;minus;1) comparable to soil-based cultivation. Andrographolide concentration reached 25.58 &amp;amp;plusmn; 3.36 mg g&amp;amp;minus;1 DW (2.56% w/w), meeting pharmacopeial requirements. Owing to stable environmental regulation and tolerance to high planting density, the IoT system produced the highest areal AG productivity (209.5 mg m&amp;amp;minus;2), representing a four- to tenfold increase over the other systems. Despite higher operational costs, IoT-assisted hydroponics achieved the lowest AG unit cost (&amp;amp;asymp;6.77 USD g&amp;amp;minus;1). While most previous studies emphasize tissue-level AG concentration, system-level productivity and cost efficiency under realistic cultivation conditions remain insufficiently explored. Overall, IoT-enabled semi-open hydroponics provides a scalable and economically viable approach for medicinal plant production, bridging the gap between open-field cultivation and fully controlled plant factory systems.</p>
	]]></content:encoded>

	<dc:title>IoT-Assisted Hydroponic System for Andrographis paniculata: Enhanced Productivity and Pharmaceutical-Grade Quality</dc:title>
			<dc:creator>Krit Funsian</dc:creator>
			<dc:creator>Yaowarat Sirisathitkul</dc:creator>
			<dc:creator>Pumiphat Khotchanakhen</dc:creator>
			<dc:creator>Apiwit Bunta</dc:creator>
			<dc:creator>Kanittha Srikwan</dc:creator>
			<dc:creator>Kingkan Bunluepuech</dc:creator>
			<dc:creator>Athakorn Promwee</dc:creator>
			<dc:creator>Chih-Yi Chiu</dc:creator>
			<dc:creator>Karanrat Thammarak</dc:creator>
		<dc:identifier>doi: 10.3390/iot7010028</dc:identifier>
	<dc:source>IoT</dc:source>
	<dc:date>2026-03-10</dc:date>

	<prism:publicationName>IoT</prism:publicationName>
	<prism:publicationDate>2026-03-10</prism:publicationDate>
	<prism:volume>7</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>28</prism:startingPage>
		<prism:doi>10.3390/iot7010028</prism:doi>
	<prism:url>https://www.mdpi.com/2624-831X/7/1/28</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-831X/7/1/27">

	<title>IoT, Vol. 7, Pages 27: Understanding Energy Efficiency of AI Deployments in IoT-Driven Smart Cities</title>
	<link>https://www.mdpi.com/2624-831X/7/1/27</link>
	<description>The pervasive adoption of AI and AIoT applications at the network edge presents both opportunities and challenges for smart cities. With a focus on the energy efficiency of AI in urban environments, this paper provides a systematic comparative analysis of representative edge hardware platforms, i.e., embedded GPUs, FPGAs, and ultra-low-power microcontroller-/sensor-class devices, assessing their suitability for AI workloads in IoT-driven smart city infrastructures. The evaluation, based on direct characterization of diverse neural networks and relevant datasets, quantifies computational performance and energy behavior through inference latency, throughput, and energy/per inference measurements. Across the evaluated network&amp;amp;ndash;board pairs, the measured inference power spans several orders of magnitude, ranging from 0.1&amp;amp;ndash;10 mW for ultra-low-power Intelligent Sensor Processing Units (ISPUs) up to 1&amp;amp;ndash;10 W for embedded GPUs, highlighting the wide design space between the least and most power-demanding configurations. Results indicate that embedded GPUs provide a favorable performance-to-power ratio for computationally intensive workloads, while MCU/ISPU-class solutions, despite throughput limitations, offer compelling advantages in ultra-low-power scenarios when combined with quantization and pruning, making them well-suited for distributed sensing and actuation typical of smart city deployments. Overall, this comparative analysis guides hardware selection for heterogeneous, sustainable AI-enabled urban services.</description>
	<pubDate>2026-03-08</pubDate>

	<content:encoded><![CDATA[
	<p><b>IoT, Vol. 7, Pages 27: Understanding Energy Efficiency of AI Deployments in IoT-Driven Smart Cities</b></p>
	<p>IoT <a href="https://www.mdpi.com/2624-831X/7/1/27">doi: 10.3390/iot7010027</a></p>
	<p>Authors:
		Salvatore Bramante
		Filippo Ferrandino
		Alessandro Cilardo
		</p>
	<p>The pervasive adoption of AI and AIoT applications at the network edge presents both opportunities and challenges for smart cities. With a focus on the energy efficiency of AI in urban environments, this paper provides a systematic comparative analysis of representative edge hardware platforms, i.e., embedded GPUs, FPGAs, and ultra-low-power microcontroller-/sensor-class devices, assessing their suitability for AI workloads in IoT-driven smart city infrastructures. The evaluation, based on direct characterization of diverse neural networks and relevant datasets, quantifies computational performance and energy behavior through inference latency, throughput, and energy/per inference measurements. Across the evaluated network&amp;amp;ndash;board pairs, the measured inference power spans several orders of magnitude, ranging from 0.1&amp;amp;ndash;10 mW for ultra-low-power Intelligent Sensor Processing Units (ISPUs) up to 1&amp;amp;ndash;10 W for embedded GPUs, highlighting the wide design space between the least and most power-demanding configurations. Results indicate that embedded GPUs provide a favorable performance-to-power ratio for computationally intensive workloads, while MCU/ISPU-class solutions, despite throughput limitations, offer compelling advantages in ultra-low-power scenarios when combined with quantization and pruning, making them well-suited for distributed sensing and actuation typical of smart city deployments. Overall, this comparative analysis guides hardware selection for heterogeneous, sustainable AI-enabled urban services.</p>
	]]></content:encoded>

	<dc:title>Understanding Energy Efficiency of AI Deployments in IoT-Driven Smart Cities</dc:title>
			<dc:creator>Salvatore Bramante</dc:creator>
			<dc:creator>Filippo Ferrandino</dc:creator>
			<dc:creator>Alessandro Cilardo</dc:creator>
		<dc:identifier>doi: 10.3390/iot7010027</dc:identifier>
	<dc:source>IoT</dc:source>
	<dc:date>2026-03-08</dc:date>

	<prism:publicationName>IoT</prism:publicationName>
	<prism:publicationDate>2026-03-08</prism:publicationDate>
	<prism:volume>7</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>27</prism:startingPage>
		<prism:doi>10.3390/iot7010027</prism:doi>
	<prism:url>https://www.mdpi.com/2624-831X/7/1/27</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-831X/7/1/26">

	<title>IoT, Vol. 7, Pages 26: Automated Framework for Testing Random Number Generators for IoT Security Applications Using NIST SP 800-22</title>
	<link>https://www.mdpi.com/2624-831X/7/1/26</link>
	<description>The continuous expansion of the Internet of Things (IoT) has intensified the need to evaluate and guarantee the quality of entropy sources used in random number generation, an essential element in securing communications used in IoT ecosystems. This work presents an automated and web-based framework designed to execute and analyze the results of statistical tests defined in the NIST SP 800-22 standard, enabling systematic assessment of entropy sources and random numbers generators in IoT devices and environments. The proposed system integrates a Python-based backend built upon an optimized implementation of the original NIST suite, along with an intuitive web interface that facilitates configuration, monitoring, and parallel execution of tests through Representational State Transfer (REST) endpoints. Session management based on Redis ensures reliable and concurrent operation of multiple users or devices while maintaining isolation and data integrity. To demonstrate its applicability, an emulated IoT ecosystem was implemented in which multiple virtual devices periodically and asynchronously request real-time validation of their local random numbers generators. The obtained results confirm the system&amp;amp;rsquo;s capability to detect deficiencies in pseudo random generators and validate true random number sources, highlighting its potential as a diagnostic and verification tool for distributed IoT security systems. The tool developed in this work is fully accessible to the public, allowing researchers, engineers, and practitioners to evaluate random number generators without requiring specialized hardware or proprietary software.</description>
	<pubDate>2026-03-07</pubDate>

	<content:encoded><![CDATA[
	<p><b>IoT, Vol. 7, Pages 26: Automated Framework for Testing Random Number Generators for IoT Security Applications Using NIST SP 800-22</b></p>
	<p>IoT <a href="https://www.mdpi.com/2624-831X/7/1/26">doi: 10.3390/iot7010026</a></p>
	<p>Authors:
		Juan Castillo
		Pere Aran Vila
		Francisco Palacio
		Blas Garrido
		Sergi Hernández
		Albert Cirera
		</p>
	<p>The continuous expansion of the Internet of Things (IoT) has intensified the need to evaluate and guarantee the quality of entropy sources used in random number generation, an essential element in securing communications used in IoT ecosystems. This work presents an automated and web-based framework designed to execute and analyze the results of statistical tests defined in the NIST SP 800-22 standard, enabling systematic assessment of entropy sources and random numbers generators in IoT devices and environments. The proposed system integrates a Python-based backend built upon an optimized implementation of the original NIST suite, along with an intuitive web interface that facilitates configuration, monitoring, and parallel execution of tests through Representational State Transfer (REST) endpoints. Session management based on Redis ensures reliable and concurrent operation of multiple users or devices while maintaining isolation and data integrity. To demonstrate its applicability, an emulated IoT ecosystem was implemented in which multiple virtual devices periodically and asynchronously request real-time validation of their local random numbers generators. The obtained results confirm the system&amp;amp;rsquo;s capability to detect deficiencies in pseudo random generators and validate true random number sources, highlighting its potential as a diagnostic and verification tool for distributed IoT security systems. The tool developed in this work is fully accessible to the public, allowing researchers, engineers, and practitioners to evaluate random number generators without requiring specialized hardware or proprietary software.</p>
	]]></content:encoded>

	<dc:title>Automated Framework for Testing Random Number Generators for IoT Security Applications Using NIST SP 800-22</dc:title>
			<dc:creator>Juan Castillo</dc:creator>
			<dc:creator>Pere Aran Vila</dc:creator>
			<dc:creator>Francisco Palacio</dc:creator>
			<dc:creator>Blas Garrido</dc:creator>
			<dc:creator>Sergi Hernández</dc:creator>
			<dc:creator>Albert Cirera</dc:creator>
		<dc:identifier>doi: 10.3390/iot7010026</dc:identifier>
	<dc:source>IoT</dc:source>
	<dc:date>2026-03-07</dc:date>

	<prism:publicationName>IoT</prism:publicationName>
	<prism:publicationDate>2026-03-07</prism:publicationDate>
	<prism:volume>7</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>26</prism:startingPage>
		<prism:doi>10.3390/iot7010026</prism:doi>
	<prism:url>https://www.mdpi.com/2624-831X/7/1/26</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-831X/7/1/25">

	<title>IoT, Vol. 7, Pages 25: A Foundational Edge-AI Sensing Framework for Occupancy-Driven Energy Management in SMOs</title>
	<link>https://www.mdpi.com/2624-831X/7/1/25</link>
	<description>Occupant presence is a primary driver of Heating, Ventilation, and Air Conditioning (HVAC) and lighting energy consumption in office environments. Existing occupancy-sensing solutions often rely on privacy-sensitive modalities or require costly infrastructure, limiting their applicability in Small and Medium Offices (SMOs). To address these limitations, this study proposes a lightweight CSI-based occupancy-sensing framework based on a dual-core ESP32-S3 architecture, enabling concurrent CSI processing, environmental sensing, and cloud communication. A multi-stage signal preprocessing pipeline compresses raw CSI streams into a compact 56&amp;amp;times;8 statistical feature matrix, achieving 98.86% classification accuracy for multi-level occupancy estimation. Compared with image-based baselines such as DenseNet121, the proposed approach reduces input data size to 24 kB and model parameters to 138 K, yielding over 129&amp;amp;times; reduction in transmission volume without sacrificing performance. These results demonstrate that the proposed framework provides a practical, privacy-preserving, and edge-deployable solution for occupancy-aware energy management in SMOs.</description>
	<pubDate>2026-03-05</pubDate>

	<content:encoded><![CDATA[
	<p><b>IoT, Vol. 7, Pages 25: A Foundational Edge-AI Sensing Framework for Occupancy-Driven Energy Management in SMOs</b></p>
	<p>IoT <a href="https://www.mdpi.com/2624-831X/7/1/25">doi: 10.3390/iot7010025</a></p>
	<p>Authors:
		Yutong Chen
		Daisuke Sumiyoshi
		Xiangyu Wang
		Takahiro Yamamoto
		Takahiro Ueno
		Jewon Oh
		</p>
	<p>Occupant presence is a primary driver of Heating, Ventilation, and Air Conditioning (HVAC) and lighting energy consumption in office environments. Existing occupancy-sensing solutions often rely on privacy-sensitive modalities or require costly infrastructure, limiting their applicability in Small and Medium Offices (SMOs). To address these limitations, this study proposes a lightweight CSI-based occupancy-sensing framework based on a dual-core ESP32-S3 architecture, enabling concurrent CSI processing, environmental sensing, and cloud communication. A multi-stage signal preprocessing pipeline compresses raw CSI streams into a compact 56&amp;amp;times;8 statistical feature matrix, achieving 98.86% classification accuracy for multi-level occupancy estimation. Compared with image-based baselines such as DenseNet121, the proposed approach reduces input data size to 24 kB and model parameters to 138 K, yielding over 129&amp;amp;times; reduction in transmission volume without sacrificing performance. These results demonstrate that the proposed framework provides a practical, privacy-preserving, and edge-deployable solution for occupancy-aware energy management in SMOs.</p>
	]]></content:encoded>

	<dc:title>A Foundational Edge-AI Sensing Framework for Occupancy-Driven Energy Management in SMOs</dc:title>
			<dc:creator>Yutong Chen</dc:creator>
			<dc:creator>Daisuke Sumiyoshi</dc:creator>
			<dc:creator>Xiangyu Wang</dc:creator>
			<dc:creator>Takahiro Yamamoto</dc:creator>
			<dc:creator>Takahiro Ueno</dc:creator>
			<dc:creator>Jewon Oh</dc:creator>
		<dc:identifier>doi: 10.3390/iot7010025</dc:identifier>
	<dc:source>IoT</dc:source>
	<dc:date>2026-03-05</dc:date>

	<prism:publicationName>IoT</prism:publicationName>
	<prism:publicationDate>2026-03-05</prism:publicationDate>
	<prism:volume>7</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>25</prism:startingPage>
		<prism:doi>10.3390/iot7010025</prism:doi>
	<prism:url>https://www.mdpi.com/2624-831X/7/1/25</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-831X/7/1/24">

	<title>IoT, Vol. 7, Pages 24: A Novel Hybrid Opcode Feature Selection Framework for Efficient and Effective IoT Malware Detection</title>
	<link>https://www.mdpi.com/2624-831X/7/1/24</link>
	<description>Malware&amp;amp;rsquo;s proliferation in the Internet of Things (IoT) ecosystem requires precise, efficient detection systems capable of operating on IoT devices. Existing static analysis approaches often fail due to computational inefficiency stemming from high feature dimensionality inherent in raw opcode features. This research addresses this limitation by proposing a novel machine-learning (ML)-driven Intelligent Hybrid Feature Selection (IHFS) framework with two distinct architectures. IHFS1 combines a filter method (variance threshold) with an embedded method (LGBM feature importance). Conversely, IHFS2 integrates variance thresholding with a wrapper method (Recursive Feature Elimination with Cross-Validation using LGBM) for optimal selection. This framework is specifically designed to select an optimally stable and minimal feature subset from the initial 1183 opcode frequency vector extracted from ARM binaries. Applying this framework to a multi-family IoT malware dataset, the IHFS architectures yielded distinct and highly efficient feature subsets: IHFS1 achieved a 95.77% reduction (to 50 features), while IHFS2 attained a 98.06% reduction (to 23 features). Evaluation across eight ML models confirmed that the Random Forest (with IHFS1 subset) and Decision Tree (with IHFS2 subset) classifiers were the best performing, achieving robust classification metrics that outperform current state-of-the-art solutions. The Decision Tree model demonstrated exceptional detection capabilities, with an accuracy of 99.87%, a precision of 99.82%, a recall of 99.88%, and an F1-score of 99.85%. It achieved an average inference time of 0.058 ms per sample. Experimental results attained on a native ARM64 environment validate the deployment feasibility of the proposed system for resource-constrained IoT devices, such as the Raspberry Pi. The proposed system achieves a high-throughput, low-overhead security posture while maintaining host operational stability, processing a single ELF binary in just 3.431 ms.</description>
	<pubDate>2026-03-02</pubDate>

	<content:encoded><![CDATA[
	<p><b>IoT, Vol. 7, Pages 24: A Novel Hybrid Opcode Feature Selection Framework for Efficient and Effective IoT Malware Detection</b></p>
	<p>IoT <a href="https://www.mdpi.com/2624-831X/7/1/24">doi: 10.3390/iot7010024</a></p>
	<p>Authors:
		Bakhan Tofiq Ahmed
		Noor Ghazi M. Jameel
		Bakhtiar Ibrahim Saeed
		</p>
	<p>Malware&amp;amp;rsquo;s proliferation in the Internet of Things (IoT) ecosystem requires precise, efficient detection systems capable of operating on IoT devices. Existing static analysis approaches often fail due to computational inefficiency stemming from high feature dimensionality inherent in raw opcode features. This research addresses this limitation by proposing a novel machine-learning (ML)-driven Intelligent Hybrid Feature Selection (IHFS) framework with two distinct architectures. IHFS1 combines a filter method (variance threshold) with an embedded method (LGBM feature importance). Conversely, IHFS2 integrates variance thresholding with a wrapper method (Recursive Feature Elimination with Cross-Validation using LGBM) for optimal selection. This framework is specifically designed to select an optimally stable and minimal feature subset from the initial 1183 opcode frequency vector extracted from ARM binaries. Applying this framework to a multi-family IoT malware dataset, the IHFS architectures yielded distinct and highly efficient feature subsets: IHFS1 achieved a 95.77% reduction (to 50 features), while IHFS2 attained a 98.06% reduction (to 23 features). Evaluation across eight ML models confirmed that the Random Forest (with IHFS1 subset) and Decision Tree (with IHFS2 subset) classifiers were the best performing, achieving robust classification metrics that outperform current state-of-the-art solutions. The Decision Tree model demonstrated exceptional detection capabilities, with an accuracy of 99.87%, a precision of 99.82%, a recall of 99.88%, and an F1-score of 99.85%. It achieved an average inference time of 0.058 ms per sample. Experimental results attained on a native ARM64 environment validate the deployment feasibility of the proposed system for resource-constrained IoT devices, such as the Raspberry Pi. The proposed system achieves a high-throughput, low-overhead security posture while maintaining host operational stability, processing a single ELF binary in just 3.431 ms.</p>
	]]></content:encoded>

	<dc:title>A Novel Hybrid Opcode Feature Selection Framework for Efficient and Effective IoT Malware Detection</dc:title>
			<dc:creator>Bakhan Tofiq Ahmed</dc:creator>
			<dc:creator>Noor Ghazi M. Jameel</dc:creator>
			<dc:creator>Bakhtiar Ibrahim Saeed</dc:creator>
		<dc:identifier>doi: 10.3390/iot7010024</dc:identifier>
	<dc:source>IoT</dc:source>
	<dc:date>2026-03-02</dc:date>

	<prism:publicationName>IoT</prism:publicationName>
	<prism:publicationDate>2026-03-02</prism:publicationDate>
	<prism:volume>7</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>24</prism:startingPage>
		<prism:doi>10.3390/iot7010024</prism:doi>
	<prism:url>https://www.mdpi.com/2624-831X/7/1/24</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-831X/7/1/23">

	<title>IoT, Vol. 7, Pages 23: Edge AI for SD-IoT: A Systematic Review on Scalability and Latency</title>
	<link>https://www.mdpi.com/2624-831X/7/1/23</link>
	<description>The growing demand for IoT applications in highly dynamic environments with multiple connected devices introduces significant scalability and low-latency challenges. In the context of software-defined networking (SDN) integrated with Edge environments, adopting machine learning (ML) techniques has emerged as a promising approach to meet these requirements. This study presents a Systematic Literature Review (SLR) that identifies and analyzes ML-based solutions applied to Software-Defined Internet of Things (SD-IoT) infrastructures in Edge environments, emphasizing improving low latency and scalability. Following established methodological best practices, we conducted the review, including a clear definition of research questions, well-defined inclusion and exclusion criteria, a structured search protocol, and multiple scientific databases. Based on the analysis of selected studies, the main strategies employed to enhance network performance are categorized, along with the level of fidelity and complexity of the experimental environments used, and the realism and applicability of the proposed solutions are discussed. Furthermore, drawing from the context of the selected studies, the most recurrent ML approaches are presented&amp;amp;mdash;including supervised, unsupervised, and reinforcement learning methods&amp;amp;mdash;along with a discussion of their advantages and limitations in dynamic network scenarios. By compiling and organizing the contributions from the literature, this paper provides a comprehensive overview of the state of the art in applying ML to SD-IoT networks, shedding light on current trends, existing gaps, and research opportunities aimed at building more intelligent and adaptable solutions for IoT environments.</description>
	<pubDate>2026-02-27</pubDate>

	<content:encoded><![CDATA[
	<p><b>IoT, Vol. 7, Pages 23: Edge AI for SD-IoT: A Systematic Review on Scalability and Latency</b></p>
	<p>IoT <a href="https://www.mdpi.com/2624-831X/7/1/23">doi: 10.3390/iot7010023</a></p>
	<p>Authors:
		Ernando P. Batista
		Alex Santos
		Maycon Peixoto
		Gustavo Figueiredo
		Cassio Prazeres
		</p>
	<p>The growing demand for IoT applications in highly dynamic environments with multiple connected devices introduces significant scalability and low-latency challenges. In the context of software-defined networking (SDN) integrated with Edge environments, adopting machine learning (ML) techniques has emerged as a promising approach to meet these requirements. This study presents a Systematic Literature Review (SLR) that identifies and analyzes ML-based solutions applied to Software-Defined Internet of Things (SD-IoT) infrastructures in Edge environments, emphasizing improving low latency and scalability. Following established methodological best practices, we conducted the review, including a clear definition of research questions, well-defined inclusion and exclusion criteria, a structured search protocol, and multiple scientific databases. Based on the analysis of selected studies, the main strategies employed to enhance network performance are categorized, along with the level of fidelity and complexity of the experimental environments used, and the realism and applicability of the proposed solutions are discussed. Furthermore, drawing from the context of the selected studies, the most recurrent ML approaches are presented&amp;amp;mdash;including supervised, unsupervised, and reinforcement learning methods&amp;amp;mdash;along with a discussion of their advantages and limitations in dynamic network scenarios. By compiling and organizing the contributions from the literature, this paper provides a comprehensive overview of the state of the art in applying ML to SD-IoT networks, shedding light on current trends, existing gaps, and research opportunities aimed at building more intelligent and adaptable solutions for IoT environments.</p>
	]]></content:encoded>

	<dc:title>Edge AI for SD-IoT: A Systematic Review on Scalability and Latency</dc:title>
			<dc:creator>Ernando P. Batista</dc:creator>
			<dc:creator>Alex Santos</dc:creator>
			<dc:creator>Maycon Peixoto</dc:creator>
			<dc:creator>Gustavo Figueiredo</dc:creator>
			<dc:creator>Cassio Prazeres</dc:creator>
		<dc:identifier>doi: 10.3390/iot7010023</dc:identifier>
	<dc:source>IoT</dc:source>
	<dc:date>2026-02-27</dc:date>

	<prism:publicationName>IoT</prism:publicationName>
	<prism:publicationDate>2026-02-27</prism:publicationDate>
	<prism:volume>7</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Review</prism:section>
	<prism:startingPage>23</prism:startingPage>
		<prism:doi>10.3390/iot7010023</prism:doi>
	<prism:url>https://www.mdpi.com/2624-831X/7/1/23</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-831X/7/1/22">

	<title>IoT, Vol. 7, Pages 22: Ensemble Machine Learning Approach for Traffic Congestion and Travel Time Prediction in Urban Bus Rapid Transit Systems: A Case Study of Trans Metro Bandung</title>
	<link>https://www.mdpi.com/2624-831X/7/1/22</link>
	<description>Traffic congestion and travel time uncertainty remain major challenges to the operational efficiency of Bus Rapid Transit (BRT) systems in urban areas of developing countries. This study proposes an integrated solution for the Trans Metro Bandung (TMB) system by leveraging Internet of Things (IoT)&amp;amp;ndash;based GPS data and tree-based ensemble machine learning algorithms. Spatio-temporal data collected from on-board GPS modules are processed to predict traffic congestion levels and estimate travel time across route segments. The performance of Decision Tree, Random Forest, and XGBoost models is evaluated in terms of prediction accuracy, interpretability, and computational efficiency, with particular consideration for deployment on resource-constrained hardware. Experiments conducted on 20,156 data samples show that the Decision Tree model achieves the highest congestion classification accuracy of 96.8%, while Random Forest outperforms other models in travel time regression, achieving an R2 value of 0.95 and a root mean square error (RMSE) of 5.80 min. The trained models are successfully deployed on a Raspberry Pi 3B microcontroller for real-time inference, enabling fleet management and travel planning without reliance on cloud connectivity. The results demonstrate that cost-effective and interpretable machine learning solutions can deliver reliable performance in heterogeneous urban infrastructures while providing a replicable framework for medium-sized cities seeking to implement affordable smart transportation systems.</description>
	<pubDate>2026-02-27</pubDate>

	<content:encoded><![CDATA[
	<p><b>IoT, Vol. 7, Pages 22: Ensemble Machine Learning Approach for Traffic Congestion and Travel Time Prediction in Urban Bus Rapid Transit Systems: A Case Study of Trans Metro Bandung</b></p>
	<p>IoT <a href="https://www.mdpi.com/2624-831X/7/1/22">doi: 10.3390/iot7010022</a></p>
	<p>Authors:
		Rendy Munadi
		Dadan Nur Ramadan
		 Sussi
		Nurwulan Fitriyanti
		Hilal H. Nuha
		</p>
	<p>Traffic congestion and travel time uncertainty remain major challenges to the operational efficiency of Bus Rapid Transit (BRT) systems in urban areas of developing countries. This study proposes an integrated solution for the Trans Metro Bandung (TMB) system by leveraging Internet of Things (IoT)&amp;amp;ndash;based GPS data and tree-based ensemble machine learning algorithms. Spatio-temporal data collected from on-board GPS modules are processed to predict traffic congestion levels and estimate travel time across route segments. The performance of Decision Tree, Random Forest, and XGBoost models is evaluated in terms of prediction accuracy, interpretability, and computational efficiency, with particular consideration for deployment on resource-constrained hardware. Experiments conducted on 20,156 data samples show that the Decision Tree model achieves the highest congestion classification accuracy of 96.8%, while Random Forest outperforms other models in travel time regression, achieving an R2 value of 0.95 and a root mean square error (RMSE) of 5.80 min. The trained models are successfully deployed on a Raspberry Pi 3B microcontroller for real-time inference, enabling fleet management and travel planning without reliance on cloud connectivity. The results demonstrate that cost-effective and interpretable machine learning solutions can deliver reliable performance in heterogeneous urban infrastructures while providing a replicable framework for medium-sized cities seeking to implement affordable smart transportation systems.</p>
	]]></content:encoded>

	<dc:title>Ensemble Machine Learning Approach for Traffic Congestion and Travel Time Prediction in Urban Bus Rapid Transit Systems: A Case Study of Trans Metro Bandung</dc:title>
			<dc:creator>Rendy Munadi</dc:creator>
			<dc:creator>Dadan Nur Ramadan</dc:creator>
			<dc:creator> Sussi</dc:creator>
			<dc:creator>Nurwulan Fitriyanti</dc:creator>
			<dc:creator>Hilal H. Nuha</dc:creator>
		<dc:identifier>doi: 10.3390/iot7010022</dc:identifier>
	<dc:source>IoT</dc:source>
	<dc:date>2026-02-27</dc:date>

	<prism:publicationName>IoT</prism:publicationName>
	<prism:publicationDate>2026-02-27</prism:publicationDate>
	<prism:volume>7</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>22</prism:startingPage>
		<prism:doi>10.3390/iot7010022</prism:doi>
	<prism:url>https://www.mdpi.com/2624-831X/7/1/22</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-831X/7/1/21">

	<title>IoT, Vol. 7, Pages 21: Performance Trade-Offs in Multi-Tenant IoT&amp;ndash;Cloud Security: A Systematic Review of Emerging Technologies</title>
	<link>https://www.mdpi.com/2624-831X/7/1/21</link>
	<description>Multi-tenancy is essential for scalable IoT&amp;amp;ndash;Cloud systems; however, it introduces complex security vulnerabilities at the intersection of shared cloud infrastructures and resource-constrained IoT environments. This systematic review evaluates next-generation security frameworks designed to enforce tenant isolation without violating the strict latency (&amp;amp;lt;10 ms) and energy bounds of lightweight sensors. Adhering to PRISMA guidelines, we analyze selected high-quality studies to categorize intersectional threats, including cross-tenant data leakage, side-channel attacks, and privilege escalation. Our analysis identifies a critical, unresolved conflict: existing mitigation strategies often incur a 12% computational and communication overhead, creating a significant barrier for real-time applications. Furthermore, we critically analyze emerging technologies, including Zero Trust Architectures (ZTA), adaptive Artificial Intelligence (AI), blockchain, and Post-Quantum Cryptography (PQC). We find that direct PQC deployment is currently infeasible for LPWAN protocols due to key-size constraints (1.6 KB) that exceed typical payload limits. To address these challenges, we propose a novel multi-layer security design principle that offloads heavy isolation and cryptographic workloads to hardware-accelerated edge gateways, thereby maintaining tenant isolation without compromising real-time performance. Finally, this review serves as a roadmap for future research, highlighting federated learning and hardware enclaves as essential pathways for securing next-generation multi-tenant IoT ecosystems.</description>
	<pubDate>2026-02-22</pubDate>

	<content:encoded><![CDATA[
	<p><b>IoT, Vol. 7, Pages 21: Performance Trade-Offs in Multi-Tenant IoT&amp;ndash;Cloud Security: A Systematic Review of Emerging Technologies</b></p>
	<p>IoT <a href="https://www.mdpi.com/2624-831X/7/1/21">doi: 10.3390/iot7010021</a></p>
	<p>Authors:
		Bader Alobaywi
		Mohammed G. Almutairi
		Frederick T. Sheldon
		</p>
	<p>Multi-tenancy is essential for scalable IoT&amp;amp;ndash;Cloud systems; however, it introduces complex security vulnerabilities at the intersection of shared cloud infrastructures and resource-constrained IoT environments. This systematic review evaluates next-generation security frameworks designed to enforce tenant isolation without violating the strict latency (&amp;amp;lt;10 ms) and energy bounds of lightweight sensors. Adhering to PRISMA guidelines, we analyze selected high-quality studies to categorize intersectional threats, including cross-tenant data leakage, side-channel attacks, and privilege escalation. Our analysis identifies a critical, unresolved conflict: existing mitigation strategies often incur a 12% computational and communication overhead, creating a significant barrier for real-time applications. Furthermore, we critically analyze emerging technologies, including Zero Trust Architectures (ZTA), adaptive Artificial Intelligence (AI), blockchain, and Post-Quantum Cryptography (PQC). We find that direct PQC deployment is currently infeasible for LPWAN protocols due to key-size constraints (1.6 KB) that exceed typical payload limits. To address these challenges, we propose a novel multi-layer security design principle that offloads heavy isolation and cryptographic workloads to hardware-accelerated edge gateways, thereby maintaining tenant isolation without compromising real-time performance. Finally, this review serves as a roadmap for future research, highlighting federated learning and hardware enclaves as essential pathways for securing next-generation multi-tenant IoT ecosystems.</p>
	]]></content:encoded>

	<dc:title>Performance Trade-Offs in Multi-Tenant IoT&amp;amp;ndash;Cloud Security: A Systematic Review of Emerging Technologies</dc:title>
			<dc:creator>Bader Alobaywi</dc:creator>
			<dc:creator>Mohammed G. Almutairi</dc:creator>
			<dc:creator>Frederick T. Sheldon</dc:creator>
		<dc:identifier>doi: 10.3390/iot7010021</dc:identifier>
	<dc:source>IoT</dc:source>
	<dc:date>2026-02-22</dc:date>

	<prism:publicationName>IoT</prism:publicationName>
	<prism:publicationDate>2026-02-22</prism:publicationDate>
	<prism:volume>7</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Systematic Review</prism:section>
	<prism:startingPage>21</prism:startingPage>
		<prism:doi>10.3390/iot7010021</prism:doi>
	<prism:url>https://www.mdpi.com/2624-831X/7/1/21</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-831X/7/1/20">

	<title>IoT, Vol. 7, Pages 20: A Layered Architecture for Concurrent CSI-Based Applications in Smart Environments</title>
	<link>https://www.mdpi.com/2624-831X/7/1/20</link>
	<description>The prevalence of radio frequency signals in indoor environments has in recent years given rise to new technologies across many domains such as robotics, healthcare, and surveillance. Radio frequency signals propagate in the wireless medium through multiple paths and carry useful environment-dependent information. Capturing and analyzing these signal patterns can offer new solutions for a number of applications relevant to ranging, tracking, perception and recognition. In this work we propose a novel architecture, separating physical, back-bone networks, and inference layers, towards fully ubiquitous passive recognition systems that scale with the number of environments and applications. We propose a back-bone architecture that utilizes a novel Cross Dual-Path Attention (CDPA) block to capture spatial and temporal correlations from Channel State Information (CSI) for device-free, multi-task applications. Subsequently, a distill and transfer algorithm is proposed to generalize the inference capabilities of CDPA over multiple target environments for scalable training and reduced computational costs. By sharing knowledge between models across a shared network, experimentation shows that edge devices can be deployed with improved performance while simultaneously meeting strict computation and memory requirements. Our distributed learning paradigm demonstrates that CDPA-based models are capable of using passive signals in a non-intrusive and privacy-protecting manner, in order to achieve ubiquitous recognition at scale in smart environments.</description>
	<pubDate>2026-02-17</pubDate>

	<content:encoded><![CDATA[
	<p><b>IoT, Vol. 7, Pages 20: A Layered Architecture for Concurrent CSI-Based Applications in Smart Environments</b></p>
	<p>IoT <a href="https://www.mdpi.com/2624-831X/7/1/20">doi: 10.3390/iot7010020</a></p>
	<p>Authors:
		Shervin Mehryar
		</p>
	<p>The prevalence of radio frequency signals in indoor environments has in recent years given rise to new technologies across many domains such as robotics, healthcare, and surveillance. Radio frequency signals propagate in the wireless medium through multiple paths and carry useful environment-dependent information. Capturing and analyzing these signal patterns can offer new solutions for a number of applications relevant to ranging, tracking, perception and recognition. In this work we propose a novel architecture, separating physical, back-bone networks, and inference layers, towards fully ubiquitous passive recognition systems that scale with the number of environments and applications. We propose a back-bone architecture that utilizes a novel Cross Dual-Path Attention (CDPA) block to capture spatial and temporal correlations from Channel State Information (CSI) for device-free, multi-task applications. Subsequently, a distill and transfer algorithm is proposed to generalize the inference capabilities of CDPA over multiple target environments for scalable training and reduced computational costs. By sharing knowledge between models across a shared network, experimentation shows that edge devices can be deployed with improved performance while simultaneously meeting strict computation and memory requirements. Our distributed learning paradigm demonstrates that CDPA-based models are capable of using passive signals in a non-intrusive and privacy-protecting manner, in order to achieve ubiquitous recognition at scale in smart environments.</p>
	]]></content:encoded>

	<dc:title>A Layered Architecture for Concurrent CSI-Based Applications in Smart Environments</dc:title>
			<dc:creator>Shervin Mehryar</dc:creator>
		<dc:identifier>doi: 10.3390/iot7010020</dc:identifier>
	<dc:source>IoT</dc:source>
	<dc:date>2026-02-17</dc:date>

	<prism:publicationName>IoT</prism:publicationName>
	<prism:publicationDate>2026-02-17</prism:publicationDate>
	<prism:volume>7</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>20</prism:startingPage>
		<prism:doi>10.3390/iot7010020</prism:doi>
	<prism:url>https://www.mdpi.com/2624-831X/7/1/20</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-831X/7/1/19">

	<title>IoT, Vol. 7, Pages 19: Research Communities in Smart Homes Security: A Systematic Mapping Study</title>
	<link>https://www.mdpi.com/2624-831X/7/1/19</link>
	<description>Smart homes are becoming increasingly common, bringing convenience to users but also raising serious security concerns. As the number of connected devices grows, so does the research interest in securing smart homes. However, the literature is broad, making it difficult to understand the main research directions and how they are connected. Given the scope and diversity of existing research, a systematic mapping study was chosen to provide a high-level overview of smart home security research by mapping research communities, identifying dominant themes, and examining their evolution over time. We retrieved articles from the Scopus database published between 2000 and April 2025, resulting in approximately 13,600 articles. After filtering out unrelated domains such as smart vehicles, smart industry, and general IoT, a final set of 6313 publications specifically focused on smart home security was used for analysis. We applied a citation-based network analysis approach, constructed an author citation graph, and used the Louvain community detection algorithm to identify 12 main research communities. Each community was further analyzed based on its keywords, most-cited publications, leading authors, and institutions. Our results provide a structured overview of the field, highlighting its key themes and evolution over time. This work can help researchers better navigate the smart home security landscape and identify future research opportunities.</description>
	<pubDate>2026-02-11</pubDate>

	<content:encoded><![CDATA[
	<p><b>IoT, Vol. 7, Pages 19: Research Communities in Smart Homes Security: A Systematic Mapping Study</b></p>
	<p>IoT <a href="https://www.mdpi.com/2624-831X/7/1/19">doi: 10.3390/iot7010019</a></p>
	<p>Authors:
		Fazeleh Dehghani Ashkezari
		Andreas Jacobsson
		Kayode S. Adewole
		Klara Svalin
		Martin Höst
		</p>
	<p>Smart homes are becoming increasingly common, bringing convenience to users but also raising serious security concerns. As the number of connected devices grows, so does the research interest in securing smart homes. However, the literature is broad, making it difficult to understand the main research directions and how they are connected. Given the scope and diversity of existing research, a systematic mapping study was chosen to provide a high-level overview of smart home security research by mapping research communities, identifying dominant themes, and examining their evolution over time. We retrieved articles from the Scopus database published between 2000 and April 2025, resulting in approximately 13,600 articles. After filtering out unrelated domains such as smart vehicles, smart industry, and general IoT, a final set of 6313 publications specifically focused on smart home security was used for analysis. We applied a citation-based network analysis approach, constructed an author citation graph, and used the Louvain community detection algorithm to identify 12 main research communities. Each community was further analyzed based on its keywords, most-cited publications, leading authors, and institutions. Our results provide a structured overview of the field, highlighting its key themes and evolution over time. This work can help researchers better navigate the smart home security landscape and identify future research opportunities.</p>
	]]></content:encoded>

	<dc:title>Research Communities in Smart Homes Security: A Systematic Mapping Study</dc:title>
			<dc:creator>Fazeleh Dehghani Ashkezari</dc:creator>
			<dc:creator>Andreas Jacobsson</dc:creator>
			<dc:creator>Kayode S. Adewole</dc:creator>
			<dc:creator>Klara Svalin</dc:creator>
			<dc:creator>Martin Höst</dc:creator>
		<dc:identifier>doi: 10.3390/iot7010019</dc:identifier>
	<dc:source>IoT</dc:source>
	<dc:date>2026-02-11</dc:date>

	<prism:publicationName>IoT</prism:publicationName>
	<prism:publicationDate>2026-02-11</prism:publicationDate>
	<prism:volume>7</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>19</prism:startingPage>
		<prism:doi>10.3390/iot7010019</prism:doi>
	<prism:url>https://www.mdpi.com/2624-831X/7/1/19</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-831X/7/1/18">

	<title>IoT, Vol. 7, Pages 18: Lightweight Protection Mechanisms for IoT Networks Based on Trust Modelling</title>
	<link>https://www.mdpi.com/2624-831X/7/1/18</link>
	<description>Since the deployment of the Internet of Things (IoT), it has transformed everyday life by enabling intelligent environments that improve efficiency and automate services in domains such as agriculture, healthcare, smart cities, and industry. However, the rapid proliferation of IoT devices has introduced significant security challenges, largely driven by the heterogeneity of devices, resource constraints, and the increasing exposure of network communications. This work proposes a lightweight security protection mechanism for IoT networks based on trust modelling. The proposed approach integrates machine learning techniques to evaluate IoT node behavior using network-layer (Layer 3) traffic features under different labeling granularities, including binary, categorical, and subcategorical classifications. By focusing on network-layer observations, the model remains applicable across heterogeneous IoT devices while preserving a low computational footprint. In addition, the Common Vulnerability Scoring System (CVSS) is incorporated as a standardized vulnerability severity metric, enabling the integration of probabilistic security evidence with contextual information about potential impact. This combination allows the estimation of trust to reflect not only the likelihood of anomalous behavior but also its associated severity. Experimental evaluation was conducted using a representative IoT traffic dataset, multiple preprocessing strategies, and several classical machine learning models. The results demonstrate that aggregating traffic-based intrusion detection outputs with vulnerability severity metrics enables a more robust, flexible, and interpretable trust estimation process. This approach supports the early identification of potentially compromised nodes while maintaining scalability and efficiency, making it suitable for deployment in heterogeneous IoT environments.</description>
	<pubDate>2026-02-10</pubDate>

	<content:encoded><![CDATA[
	<p><b>IoT, Vol. 7, Pages 18: Lightweight Protection Mechanisms for IoT Networks Based on Trust Modelling</b></p>
	<p>IoT <a href="https://www.mdpi.com/2624-831X/7/1/18">doi: 10.3390/iot7010018</a></p>
	<p>Authors:
		Andric Rodríguez
		Asdrúbal López-Chau
		Leticia Dávila-Nicanor
		Víctor Landassuri-Moreno
		Saul Lazcano-Salas
		</p>
	<p>Since the deployment of the Internet of Things (IoT), it has transformed everyday life by enabling intelligent environments that improve efficiency and automate services in domains such as agriculture, healthcare, smart cities, and industry. However, the rapid proliferation of IoT devices has introduced significant security challenges, largely driven by the heterogeneity of devices, resource constraints, and the increasing exposure of network communications. This work proposes a lightweight security protection mechanism for IoT networks based on trust modelling. The proposed approach integrates machine learning techniques to evaluate IoT node behavior using network-layer (Layer 3) traffic features under different labeling granularities, including binary, categorical, and subcategorical classifications. By focusing on network-layer observations, the model remains applicable across heterogeneous IoT devices while preserving a low computational footprint. In addition, the Common Vulnerability Scoring System (CVSS) is incorporated as a standardized vulnerability severity metric, enabling the integration of probabilistic security evidence with contextual information about potential impact. This combination allows the estimation of trust to reflect not only the likelihood of anomalous behavior but also its associated severity. Experimental evaluation was conducted using a representative IoT traffic dataset, multiple preprocessing strategies, and several classical machine learning models. The results demonstrate that aggregating traffic-based intrusion detection outputs with vulnerability severity metrics enables a more robust, flexible, and interpretable trust estimation process. This approach supports the early identification of potentially compromised nodes while maintaining scalability and efficiency, making it suitable for deployment in heterogeneous IoT environments.</p>
	]]></content:encoded>

	<dc:title>Lightweight Protection Mechanisms for IoT Networks Based on Trust Modelling</dc:title>
			<dc:creator>Andric Rodríguez</dc:creator>
			<dc:creator>Asdrúbal López-Chau</dc:creator>
			<dc:creator>Leticia Dávila-Nicanor</dc:creator>
			<dc:creator>Víctor Landassuri-Moreno</dc:creator>
			<dc:creator>Saul Lazcano-Salas</dc:creator>
		<dc:identifier>doi: 10.3390/iot7010018</dc:identifier>
	<dc:source>IoT</dc:source>
	<dc:date>2026-02-10</dc:date>

	<prism:publicationName>IoT</prism:publicationName>
	<prism:publicationDate>2026-02-10</prism:publicationDate>
	<prism:volume>7</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>18</prism:startingPage>
		<prism:doi>10.3390/iot7010018</prism:doi>
	<prism:url>https://www.mdpi.com/2624-831X/7/1/18</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-831X/7/1/17">

	<title>IoT, Vol. 7, Pages 17: Methodology and Architecture for Benchmarking End-to-End PQC Protocol Resilience in an IoT Context</title>
	<link>https://www.mdpi.com/2624-831X/7/1/17</link>
	<description>Migrating to Post-Quantum Cryptography (PQC) is critical for securing resource-constrained Internet of Things (IoT) devices against the &amp;amp;ldquo;harvest-now, decrypt-later&amp;amp;rdquo; threat. While ML-KEM (CRYSTALS-Kyber) has been standardized under FIPS 203 for general encryption, these devices often operate on unreliable networks suffering from high latency and packet loss. Our recent systematic review identified a critical gap that existing research overwhelmingly focuses on Transport Layer Security (TLS). This leaves the resilience of lightweight protocols like MQTT and CoAP under challenging network conditions largely unexplored. This paper introduces PQC-IoTNet, a novel Software-in-the-Loop (SITL) framework to address this gap. Our three-tier architecture integrates a Python-based IoT client with kernel-level emulation to test the full protocol stack. Validation results comparing Kyber and ECC demonstrate the framework&amp;amp;rsquo;s ability to capture critical performance cliffs caused by TCP retransmissions. Notably, the framework revealed that while Kyber maintained an 18% speed advantage over ECC at 5% packet loss, both protocols experienced nonlinear latency spikes. This work provides a reproducible blueprint to identify operational boundaries and select resilient protocols for secure IoT systems.</description>
	<pubDate>2026-02-10</pubDate>

	<content:encoded><![CDATA[
	<p><b>IoT, Vol. 7, Pages 17: Methodology and Architecture for Benchmarking End-to-End PQC Protocol Resilience in an IoT Context</b></p>
	<p>IoT <a href="https://www.mdpi.com/2624-831X/7/1/17">doi: 10.3390/iot7010017</a></p>
	<p>Authors:
		Mohammed G. Almutairi
		Frederick T. Sheldon
		</p>
	<p>Migrating to Post-Quantum Cryptography (PQC) is critical for securing resource-constrained Internet of Things (IoT) devices against the &amp;amp;ldquo;harvest-now, decrypt-later&amp;amp;rdquo; threat. While ML-KEM (CRYSTALS-Kyber) has been standardized under FIPS 203 for general encryption, these devices often operate on unreliable networks suffering from high latency and packet loss. Our recent systematic review identified a critical gap that existing research overwhelmingly focuses on Transport Layer Security (TLS). This leaves the resilience of lightweight protocols like MQTT and CoAP under challenging network conditions largely unexplored. This paper introduces PQC-IoTNet, a novel Software-in-the-Loop (SITL) framework to address this gap. Our three-tier architecture integrates a Python-based IoT client with kernel-level emulation to test the full protocol stack. Validation results comparing Kyber and ECC demonstrate the framework&amp;amp;rsquo;s ability to capture critical performance cliffs caused by TCP retransmissions. Notably, the framework revealed that while Kyber maintained an 18% speed advantage over ECC at 5% packet loss, both protocols experienced nonlinear latency spikes. This work provides a reproducible blueprint to identify operational boundaries and select resilient protocols for secure IoT systems.</p>
	]]></content:encoded>

	<dc:title>Methodology and Architecture for Benchmarking End-to-End PQC Protocol Resilience in an IoT Context</dc:title>
			<dc:creator>Mohammed G. Almutairi</dc:creator>
			<dc:creator>Frederick T. Sheldon</dc:creator>
		<dc:identifier>doi: 10.3390/iot7010017</dc:identifier>
	<dc:source>IoT</dc:source>
	<dc:date>2026-02-10</dc:date>

	<prism:publicationName>IoT</prism:publicationName>
	<prism:publicationDate>2026-02-10</prism:publicationDate>
	<prism:volume>7</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>17</prism:startingPage>
		<prism:doi>10.3390/iot7010017</prism:doi>
	<prism:url>https://www.mdpi.com/2624-831X/7/1/17</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-831X/7/1/16">

	<title>IoT, Vol. 7, Pages 16: Intrusion Detection on the Internet of Things: A Comprehensive Review and Gap Analysis Toward Real-Time, Lightweight, Adaptive, and Autonomous Security</title>
	<link>https://www.mdpi.com/2624-831X/7/1/16</link>
	<description>The rapid growth of the Internet of Things (IoT) has exposed billions of interconnected, heterogeneous, and resource-constrained devices to increasingly sophisticated threats. To evaluate the readiness of current intrusion detection systems (IDSs), this study reviews 32 recent IoT-IDS proposals spanning conventional, machine-learning, deep-learning, and hybrid approaches. Each system is assessed against 10 criteria that reflect practical IoT requirements, including real-time performance, latency, lightweight design, detection accuracy, mitigation capabilities, integrated detection-and-mitigation workflows, adaptability, resilience to advanced attacks, validation in realistic environments, and scalability. The results indicate that although many approaches achieve high detection accuracy, most do not meet real-time and lightweight thresholds commonly cited in IoT deployment literature. Mitigation features are often absent, adaptability is rarely implemented, and 29 out of 32 studies rely solely on offline datasets, thereby limiting confidence in their robustness to deployment. Scalability remains the most significant limitation, as none of the reviewed IDSs have tested their performance under realistic multi-node or high-traffic conditions, even though scalability is critical for large IoT ecosystems. Overall, the review suggests that future IoT IDS research should move beyond accuracy-focused models and toward lightweight, adaptive, and autonomous solutions that incorporate mitigation, support real-time inference, and undergo standardized evaluations under real-world operating conditions.</description>
	<pubDate>2026-02-07</pubDate>

	<content:encoded><![CDATA[
	<p><b>IoT, Vol. 7, Pages 16: Intrusion Detection on the Internet of Things: A Comprehensive Review and Gap Analysis Toward Real-Time, Lightweight, Adaptive, and Autonomous Security</b></p>
	<p>IoT <a href="https://www.mdpi.com/2624-831X/7/1/16">doi: 10.3390/iot7010016</a></p>
	<p>Authors:
		Suzan Sallam
		May El Barachi
		Nan Li
		</p>
	<p>The rapid growth of the Internet of Things (IoT) has exposed billions of interconnected, heterogeneous, and resource-constrained devices to increasingly sophisticated threats. To evaluate the readiness of current intrusion detection systems (IDSs), this study reviews 32 recent IoT-IDS proposals spanning conventional, machine-learning, deep-learning, and hybrid approaches. Each system is assessed against 10 criteria that reflect practical IoT requirements, including real-time performance, latency, lightweight design, detection accuracy, mitigation capabilities, integrated detection-and-mitigation workflows, adaptability, resilience to advanced attacks, validation in realistic environments, and scalability. The results indicate that although many approaches achieve high detection accuracy, most do not meet real-time and lightweight thresholds commonly cited in IoT deployment literature. Mitigation features are often absent, adaptability is rarely implemented, and 29 out of 32 studies rely solely on offline datasets, thereby limiting confidence in their robustness to deployment. Scalability remains the most significant limitation, as none of the reviewed IDSs have tested their performance under realistic multi-node or high-traffic conditions, even though scalability is critical for large IoT ecosystems. Overall, the review suggests that future IoT IDS research should move beyond accuracy-focused models and toward lightweight, adaptive, and autonomous solutions that incorporate mitigation, support real-time inference, and undergo standardized evaluations under real-world operating conditions.</p>
	]]></content:encoded>

	<dc:title>Intrusion Detection on the Internet of Things: A Comprehensive Review and Gap Analysis Toward Real-Time, Lightweight, Adaptive, and Autonomous Security</dc:title>
			<dc:creator>Suzan Sallam</dc:creator>
			<dc:creator>May El Barachi</dc:creator>
			<dc:creator>Nan Li</dc:creator>
		<dc:identifier>doi: 10.3390/iot7010016</dc:identifier>
	<dc:source>IoT</dc:source>
	<dc:date>2026-02-07</dc:date>

	<prism:publicationName>IoT</prism:publicationName>
	<prism:publicationDate>2026-02-07</prism:publicationDate>
	<prism:volume>7</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Review</prism:section>
	<prism:startingPage>16</prism:startingPage>
		<prism:doi>10.3390/iot7010016</prism:doi>
	<prism:url>https://www.mdpi.com/2624-831X/7/1/16</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-831X/7/1/15">

	<title>IoT, Vol. 7, Pages 15: Audiovisual Gun Detection with Automated Lockdown and PA Announcing IoT System for Schools</title>
	<link>https://www.mdpi.com/2624-831X/7/1/15</link>
	<description>Gun violence in U.S. schools not only causes loss of life and physical injury but also leaves enduring psychological trauma, damages property, and results in significant economic losses. One way to reduce this loss is to detect the gun early, notify the police as soon as possible, and implement lockdown procedures immediately. In this project, a novel gun detector Internet of Things (IoT) system is developed that automatically detects the presence of a gun either from images or from gunshot sounds, and sends notifications with exact location information to the first responder&amp;amp;rsquo;s smartphones using the Internet within a second. The device also sends wireless commands using Message Queuing Telemetry Transport (MQTT) protocol to close the smart door locks in classrooms and announce to act using public address (PA) system automatically. The proposed system will remove the burden of manually calling the police and implementing the lockdown procedure during such traumatic situations. Police will arrive sooner, and thus it will help to stop the shooter early, the injured people can be taken to the hospital quickly, and more lives can be saved. Two custom deep learning AI models are used: (a) to detect guns from image data having an accuracy of 94.6%, and (b) the gunshot sounds from audio data having an accuracy of 99%. No single gun detector device is available in the literature that can detect guns from both image and audio data, implement lockdown and make PA announcement automatically. A prototype of the proposed gunshot detector IoT system, and a smartphone app is developed, and tested with gun replicas and blank guns in real-time.</description>
	<pubDate>2026-01-31</pubDate>

	<content:encoded><![CDATA[
	<p><b>IoT, Vol. 7, Pages 15: Audiovisual Gun Detection with Automated Lockdown and PA Announcing IoT System for Schools</b></p>
	<p>IoT <a href="https://www.mdpi.com/2624-831X/7/1/15">doi: 10.3390/iot7010015</a></p>
	<p>Authors:
		Tareq Khan
		</p>
	<p>Gun violence in U.S. schools not only causes loss of life and physical injury but also leaves enduring psychological trauma, damages property, and results in significant economic losses. One way to reduce this loss is to detect the gun early, notify the police as soon as possible, and implement lockdown procedures immediately. In this project, a novel gun detector Internet of Things (IoT) system is developed that automatically detects the presence of a gun either from images or from gunshot sounds, and sends notifications with exact location information to the first responder&amp;amp;rsquo;s smartphones using the Internet within a second. The device also sends wireless commands using Message Queuing Telemetry Transport (MQTT) protocol to close the smart door locks in classrooms and announce to act using public address (PA) system automatically. The proposed system will remove the burden of manually calling the police and implementing the lockdown procedure during such traumatic situations. Police will arrive sooner, and thus it will help to stop the shooter early, the injured people can be taken to the hospital quickly, and more lives can be saved. Two custom deep learning AI models are used: (a) to detect guns from image data having an accuracy of 94.6%, and (b) the gunshot sounds from audio data having an accuracy of 99%. No single gun detector device is available in the literature that can detect guns from both image and audio data, implement lockdown and make PA announcement automatically. A prototype of the proposed gunshot detector IoT system, and a smartphone app is developed, and tested with gun replicas and blank guns in real-time.</p>
	]]></content:encoded>

	<dc:title>Audiovisual Gun Detection with Automated Lockdown and PA Announcing IoT System for Schools</dc:title>
			<dc:creator>Tareq Khan</dc:creator>
		<dc:identifier>doi: 10.3390/iot7010015</dc:identifier>
	<dc:source>IoT</dc:source>
	<dc:date>2026-01-31</dc:date>

	<prism:publicationName>IoT</prism:publicationName>
	<prism:publicationDate>2026-01-31</prism:publicationDate>
	<prism:volume>7</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>15</prism:startingPage>
		<prism:doi>10.3390/iot7010015</prism:doi>
	<prism:url>https://www.mdpi.com/2624-831X/7/1/15</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-831X/7/1/14">

	<title>IoT, Vol. 7, Pages 14: The Network and Information Systems 2 Directive: Toward Scalable Cyber Risk Management in the Remote Patient Monitoring Domain: A Systematic Review</title>
	<link>https://www.mdpi.com/2624-831X/7/1/14</link>
	<description>Healthcare 5.0 and the Internet of Medical Things (IoMT) is emerging as a scalable model for the delivery of customised healthcare and chronic disease management, through Remote Patient Monitoring (RPM) in patient smart home environments. Large-scale RPM initiatives are being rolled out by healthcare providers (HCPs); however, the constrained nature of IoMT devices and proximity to poorly administered smart home technologies create a cyber risk for highly personalised patient data. The recent Network and Information Systems (NIS 2) directive requires HCPs to improve their cyber risk management approaches, mandating heavy penalties for non-compliance. Current research into cyber risk management in smart home-based RPM does not address scalability. This research examines scalability through the lens of the Non-adoption, Abandonment, Scale-up, Spread and Sustainability (NASSS) framework and develops a novel Scalability Index (SI), informed by a PRISMA guided systematic literature review. Our search strategy identified 57 studies across major databases including ACM, IEEE, MDPI, Elsevier, and Springer, authored between January 2016 and March 2025 (final search 21 March 2025), which focussed on cyber security risk management in the RPM context. Studies focussing solely on healthcare institutional settings were excluded. To mitigate bias, a sample of the papers (30/57) were assessed by two other raters; the resulting Cohen&amp;amp;rsquo;s Kappa inter-rater agreement statistic (0.8) indicating strong agreement on study selection. The results, presented in graphical and tabular format, provide evidence that most cyber risk approaches do not consider scalability from the HCP perspective. Applying the SI to the 57 studies in our review resulted in a low to medium scalability potential of most cyber risk management proposals, indicating that they would not support the requirements of NIS 2 in the RPM context. A limitation of our work is that it was not tested in a live large-scale setting. However, future research could validate the proposed SI, providing guidance for researchers and practitioners in enhancing cyber risk management of large-scale RPM initiatives.</description>
	<pubDate>2026-01-29</pubDate>

	<content:encoded><![CDATA[
	<p><b>IoT, Vol. 7, Pages 14: The Network and Information Systems 2 Directive: Toward Scalable Cyber Risk Management in the Remote Patient Monitoring Domain: A Systematic Review</b></p>
	<p>IoT <a href="https://www.mdpi.com/2624-831X/7/1/14">doi: 10.3390/iot7010014</a></p>
	<p>Authors:
		Brian Mulhern
		Chitra Balakrishna
		Jan Collie
		</p>
	<p>Healthcare 5.0 and the Internet of Medical Things (IoMT) is emerging as a scalable model for the delivery of customised healthcare and chronic disease management, through Remote Patient Monitoring (RPM) in patient smart home environments. Large-scale RPM initiatives are being rolled out by healthcare providers (HCPs); however, the constrained nature of IoMT devices and proximity to poorly administered smart home technologies create a cyber risk for highly personalised patient data. The recent Network and Information Systems (NIS 2) directive requires HCPs to improve their cyber risk management approaches, mandating heavy penalties for non-compliance. Current research into cyber risk management in smart home-based RPM does not address scalability. This research examines scalability through the lens of the Non-adoption, Abandonment, Scale-up, Spread and Sustainability (NASSS) framework and develops a novel Scalability Index (SI), informed by a PRISMA guided systematic literature review. Our search strategy identified 57 studies across major databases including ACM, IEEE, MDPI, Elsevier, and Springer, authored between January 2016 and March 2025 (final search 21 March 2025), which focussed on cyber security risk management in the RPM context. Studies focussing solely on healthcare institutional settings were excluded. To mitigate bias, a sample of the papers (30/57) were assessed by two other raters; the resulting Cohen&amp;amp;rsquo;s Kappa inter-rater agreement statistic (0.8) indicating strong agreement on study selection. The results, presented in graphical and tabular format, provide evidence that most cyber risk approaches do not consider scalability from the HCP perspective. Applying the SI to the 57 studies in our review resulted in a low to medium scalability potential of most cyber risk management proposals, indicating that they would not support the requirements of NIS 2 in the RPM context. A limitation of our work is that it was not tested in a live large-scale setting. However, future research could validate the proposed SI, providing guidance for researchers and practitioners in enhancing cyber risk management of large-scale RPM initiatives.</p>
	]]></content:encoded>

	<dc:title>The Network and Information Systems 2 Directive: Toward Scalable Cyber Risk Management in the Remote Patient Monitoring Domain: A Systematic Review</dc:title>
			<dc:creator>Brian Mulhern</dc:creator>
			<dc:creator>Chitra Balakrishna</dc:creator>
			<dc:creator>Jan Collie</dc:creator>
		<dc:identifier>doi: 10.3390/iot7010014</dc:identifier>
	<dc:source>IoT</dc:source>
	<dc:date>2026-01-29</dc:date>

	<prism:publicationName>IoT</prism:publicationName>
	<prism:publicationDate>2026-01-29</prism:publicationDate>
	<prism:volume>7</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Review</prism:section>
	<prism:startingPage>14</prism:startingPage>
		<prism:doi>10.3390/iot7010014</prism:doi>
	<prism:url>https://www.mdpi.com/2624-831X/7/1/14</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-831X/7/1/13">

	<title>IoT, Vol. 7, Pages 13: G-PFL-ID: Graph-Driven Personalized Federated Learning for Unsupervised Intrusion Detection in Non-IID IoT Systems</title>
	<link>https://www.mdpi.com/2624-831X/7/1/13</link>
	<description>Intrusion detection in IoT networks is challenged by data heterogeneity, label scarcity, and privacy constraints. Traditional federated learning (FL) methods often assume IID data or require supervised labels, limiting their practicality. We propose G-PFL-ID, a graph-driven personalized federated learning framework for unsupervised intrusion detection in non-IID IoT systems. Our method trains a global graph encoder (GCN or GAE) with a DeepSVDD objective under a federated regularizer (FedReg) that combines proximal and variance penalties, then personalizes local models via a lightweight fine-tuning head. We evaluate G-PFL-ID on the IoT-23 (Mirai-based captures) and N-BaIoT (device-level dataset) under realistic heterogeneity (Dirichlet-based partitioning with concentration parameters &amp;amp;alpha;&amp;amp;isin;{0.1,0.5,&amp;amp;infin;} and client counts K&amp;amp;isin;{10,15,20} for IoT-23, and natural device-based partitioning for N-BaIoT). G-PFL-ID outperforms global FL baselines and recent graph-based federated anomaly detectors, achieving up to 99.46% AUROC on IoT-23 and 97.74% AUROC on N-BaIoT. Ablation studies confirm that the proximal and variance penalties reduce inter-round drift and representation collapse, and that lightweight personalization recovers local sensitivity&amp;amp;mdash;especially for clients with limited data. Our work bridges graph-based anomaly detection with personalized FL for scalable, privacy-preserving IoT security.</description>
	<pubDate>2026-01-29</pubDate>

	<content:encoded><![CDATA[
	<p><b>IoT, Vol. 7, Pages 13: G-PFL-ID: Graph-Driven Personalized Federated Learning for Unsupervised Intrusion Detection in Non-IID IoT Systems</b></p>
	<p>IoT <a href="https://www.mdpi.com/2624-831X/7/1/13">doi: 10.3390/iot7010013</a></p>
	<p>Authors:
		Daniel Ayo Oladele
		Ayokunle Ige
		Olatunbosun Agbo-Ajala
		Olufisayo Ekundayo
		Sree Ganesh Thottempudi
		Malusi Sibiya
		Ernest Mnkandla
		</p>
	<p>Intrusion detection in IoT networks is challenged by data heterogeneity, label scarcity, and privacy constraints. Traditional federated learning (FL) methods often assume IID data or require supervised labels, limiting their practicality. We propose G-PFL-ID, a graph-driven personalized federated learning framework for unsupervised intrusion detection in non-IID IoT systems. Our method trains a global graph encoder (GCN or GAE) with a DeepSVDD objective under a federated regularizer (FedReg) that combines proximal and variance penalties, then personalizes local models via a lightweight fine-tuning head. We evaluate G-PFL-ID on the IoT-23 (Mirai-based captures) and N-BaIoT (device-level dataset) under realistic heterogeneity (Dirichlet-based partitioning with concentration parameters &amp;amp;alpha;&amp;amp;isin;{0.1,0.5,&amp;amp;infin;} and client counts K&amp;amp;isin;{10,15,20} for IoT-23, and natural device-based partitioning for N-BaIoT). G-PFL-ID outperforms global FL baselines and recent graph-based federated anomaly detectors, achieving up to 99.46% AUROC on IoT-23 and 97.74% AUROC on N-BaIoT. Ablation studies confirm that the proximal and variance penalties reduce inter-round drift and representation collapse, and that lightweight personalization recovers local sensitivity&amp;amp;mdash;especially for clients with limited data. Our work bridges graph-based anomaly detection with personalized FL for scalable, privacy-preserving IoT security.</p>
	]]></content:encoded>

	<dc:title>G-PFL-ID: Graph-Driven Personalized Federated Learning for Unsupervised Intrusion Detection in Non-IID IoT Systems</dc:title>
			<dc:creator>Daniel Ayo Oladele</dc:creator>
			<dc:creator>Ayokunle Ige</dc:creator>
			<dc:creator>Olatunbosun Agbo-Ajala</dc:creator>
			<dc:creator>Olufisayo Ekundayo</dc:creator>
			<dc:creator>Sree Ganesh Thottempudi</dc:creator>
			<dc:creator>Malusi Sibiya</dc:creator>
			<dc:creator>Ernest Mnkandla</dc:creator>
		<dc:identifier>doi: 10.3390/iot7010013</dc:identifier>
	<dc:source>IoT</dc:source>
	<dc:date>2026-01-29</dc:date>

	<prism:publicationName>IoT</prism:publicationName>
	<prism:publicationDate>2026-01-29</prism:publicationDate>
	<prism:volume>7</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>13</prism:startingPage>
		<prism:doi>10.3390/iot7010013</prism:doi>
	<prism:url>https://www.mdpi.com/2624-831X/7/1/13</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-831X/7/1/12">

	<title>IoT, Vol. 7, Pages 12: H-CLAS: A Hybrid Continual Learning Framework for Adaptive Fault Detection and Self-Healing in IoT-Enabled Smart Grids</title>
	<link>https://www.mdpi.com/2624-831X/7/1/12</link>
	<description>The rapid expansion of Internet of Things (IoT)-enabled smart grids has intensified the need for reliable fault detection and autonomous self-healing under non-stationary operating conditions characterized by frequent concept drift. To address the limitations of static and single-strategy adaptive models, this paper proposes H-CLAS, a novel Hybrid Continual Learning for Adaptive Self-healing framework that unifies regularization-based, memory-based, architectural, and meta-learning strategies within a single adaptive pipeline. The framework integrates convolutional neural networks (CNNs) for fault detection, graph neural networks for topology-aware fault localization, reinforcement learning for self-healing control, and a hybrid drift detection mechanism combining ADWIN and Page&amp;amp;ndash;Hinkley tests. Continual adaptation is achieved through the synergistic use of Elastic Weight Consolidation, memory-augmented replay, progressive neural network expansion, and Model-Agnostic Meta-Learning for rapid adaptation to emerging drifts. Extensive experiments conducted on the Smart City Air Quality and Network Intrusion Detection Dataset (NSL-KDD) demonstrate that H-CLAS achieves accuracy improvements of 12&amp;amp;ndash;15% over baseline methods, reduces false positives by over 50%, and enables 2&amp;amp;ndash;3&amp;amp;times; faster recovery after drift events. By enhancing resilience, reliability, and autonomy in critical IoT-driven infrastructures, the proposed framework contributes to improved grid stability, reduced downtime, and safer, more sustainable energy and urban monitoring systems, thereby providing significant societal and environmental benefits.</description>
	<pubDate>2026-01-27</pubDate>

	<content:encoded><![CDATA[
	<p><b>IoT, Vol. 7, Pages 12: H-CLAS: A Hybrid Continual Learning Framework for Adaptive Fault Detection and Self-Healing in IoT-Enabled Smart Grids</b></p>
	<p>IoT <a href="https://www.mdpi.com/2624-831X/7/1/12">doi: 10.3390/iot7010012</a></p>
	<p>Authors:
		Tina Babu
		Rekha R. Nair
		Balamurugan Balusamy
		Sumendra Yogarayan
		</p>
	<p>The rapid expansion of Internet of Things (IoT)-enabled smart grids has intensified the need for reliable fault detection and autonomous self-healing under non-stationary operating conditions characterized by frequent concept drift. To address the limitations of static and single-strategy adaptive models, this paper proposes H-CLAS, a novel Hybrid Continual Learning for Adaptive Self-healing framework that unifies regularization-based, memory-based, architectural, and meta-learning strategies within a single adaptive pipeline. The framework integrates convolutional neural networks (CNNs) for fault detection, graph neural networks for topology-aware fault localization, reinforcement learning for self-healing control, and a hybrid drift detection mechanism combining ADWIN and Page&amp;amp;ndash;Hinkley tests. Continual adaptation is achieved through the synergistic use of Elastic Weight Consolidation, memory-augmented replay, progressive neural network expansion, and Model-Agnostic Meta-Learning for rapid adaptation to emerging drifts. Extensive experiments conducted on the Smart City Air Quality and Network Intrusion Detection Dataset (NSL-KDD) demonstrate that H-CLAS achieves accuracy improvements of 12&amp;amp;ndash;15% over baseline methods, reduces false positives by over 50%, and enables 2&amp;amp;ndash;3&amp;amp;times; faster recovery after drift events. By enhancing resilience, reliability, and autonomy in critical IoT-driven infrastructures, the proposed framework contributes to improved grid stability, reduced downtime, and safer, more sustainable energy and urban monitoring systems, thereby providing significant societal and environmental benefits.</p>
	]]></content:encoded>

	<dc:title>H-CLAS: A Hybrid Continual Learning Framework for Adaptive Fault Detection and Self-Healing in IoT-Enabled Smart Grids</dc:title>
			<dc:creator>Tina Babu</dc:creator>
			<dc:creator>Rekha R. Nair</dc:creator>
			<dc:creator>Balamurugan Balusamy</dc:creator>
			<dc:creator>Sumendra Yogarayan</dc:creator>
		<dc:identifier>doi: 10.3390/iot7010012</dc:identifier>
	<dc:source>IoT</dc:source>
	<dc:date>2026-01-27</dc:date>

	<prism:publicationName>IoT</prism:publicationName>
	<prism:publicationDate>2026-01-27</prism:publicationDate>
	<prism:volume>7</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>12</prism:startingPage>
		<prism:doi>10.3390/iot7010012</prism:doi>
	<prism:url>https://www.mdpi.com/2624-831X/7/1/12</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-831X/7/1/11">

	<title>IoT, Vol. 7, Pages 11: Data Fusion Method for Multi-Sensor Internet of Things Systems Including Data Imputation</title>
	<link>https://www.mdpi.com/2624-831X/7/1/11</link>
	<description>In Internet of Things (IoT) systems, data collected by geographically distributed sensors is often incomplete due to device failures, harsh deployment conditions, energy constraints, and unreliable communication. Such data gaps can significantly degrade downstream data processing and decision-making, particularly when failures result in the loss of all locally redundant sensors. Conventional imputation approaches typically rely on historical trends or multi-sensor fusion within the same target environment; however, historical methods struggle to capture emerging patterns, while same-location fusion remains vulnerable to single-point failures when local redundancy is unavailable. This article proposes a correlation-aware, cross-location data fusion framework for data imputation in IoT networks that explicitly addresses single-point failure scenarios. Instead of relying on co-located sensors, the framework selectively fuses semantically similar features from independent and geographically distributed gateways using summary statistics-based and correlation screening to minimize communication overhead. The resulting fused dataset is then processed using a lightweight KNN with an Iterative PCA imputation method, which combines local neighborhood similarity with global covariance structure to generate synthetic data for missing values. The proposed framework is evaluated using real-world weather station data collected from eight geographically diverse locations across the United States. The experimental results show that the proposed approach achieves improved or comparable imputation accuracy relative to conventional same-location fusion methods when sufficient cross-location feature correlation exists and degrades gracefully when correlation is weak. By enabling data recovery without requiring redundant local sensors, the proposed approach provides a resource-efficient and failure-resilient solution for handling missing data in IoT systems.</description>
	<pubDate>2026-01-26</pubDate>

	<content:encoded><![CDATA[
	<p><b>IoT, Vol. 7, Pages 11: Data Fusion Method for Multi-Sensor Internet of Things Systems Including Data Imputation</b></p>
	<p>IoT <a href="https://www.mdpi.com/2624-831X/7/1/11">doi: 10.3390/iot7010011</a></p>
	<p>Authors:
		Saugat Sharma
		Grzegorz Chmaj
		Henry Selvaraj
		</p>
	<p>In Internet of Things (IoT) systems, data collected by geographically distributed sensors is often incomplete due to device failures, harsh deployment conditions, energy constraints, and unreliable communication. Such data gaps can significantly degrade downstream data processing and decision-making, particularly when failures result in the loss of all locally redundant sensors. Conventional imputation approaches typically rely on historical trends or multi-sensor fusion within the same target environment; however, historical methods struggle to capture emerging patterns, while same-location fusion remains vulnerable to single-point failures when local redundancy is unavailable. This article proposes a correlation-aware, cross-location data fusion framework for data imputation in IoT networks that explicitly addresses single-point failure scenarios. Instead of relying on co-located sensors, the framework selectively fuses semantically similar features from independent and geographically distributed gateways using summary statistics-based and correlation screening to minimize communication overhead. The resulting fused dataset is then processed using a lightweight KNN with an Iterative PCA imputation method, which combines local neighborhood similarity with global covariance structure to generate synthetic data for missing values. The proposed framework is evaluated using real-world weather station data collected from eight geographically diverse locations across the United States. The experimental results show that the proposed approach achieves improved or comparable imputation accuracy relative to conventional same-location fusion methods when sufficient cross-location feature correlation exists and degrades gracefully when correlation is weak. By enabling data recovery without requiring redundant local sensors, the proposed approach provides a resource-efficient and failure-resilient solution for handling missing data in IoT systems.</p>
	]]></content:encoded>

	<dc:title>Data Fusion Method for Multi-Sensor Internet of Things Systems Including Data Imputation</dc:title>
			<dc:creator>Saugat Sharma</dc:creator>
			<dc:creator>Grzegorz Chmaj</dc:creator>
			<dc:creator>Henry Selvaraj</dc:creator>
		<dc:identifier>doi: 10.3390/iot7010011</dc:identifier>
	<dc:source>IoT</dc:source>
	<dc:date>2026-01-26</dc:date>

	<prism:publicationName>IoT</prism:publicationName>
	<prism:publicationDate>2026-01-26</prism:publicationDate>
	<prism:volume>7</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>11</prism:startingPage>
		<prism:doi>10.3390/iot7010011</prism:doi>
	<prism:url>https://www.mdpi.com/2624-831X/7/1/11</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-831X/7/1/10">

	<title>IoT, Vol. 7, Pages 10: Monitoring and Control System Based on Mixed Reality and the S7.Net Library</title>
	<link>https://www.mdpi.com/2624-831X/7/1/10</link>
	<description>The predominant approach in the realm of industrial process monitoring and control involves the utilization of HMI (Human&amp;amp;ndash;Machine Interface) interfaces and conventional SCADA (Supervisory Control and Data Acquisition) systems. This limitation restricts user mobility, interaction with industrial equipment, and process status assessment. In the context of Industry 4.0, the ability to monitor and control industrial processes in real time is paramount. The present paper designs and implements a system for monitoring and controlling an industrial assembly line based on mixed reality. The technology employed to facilitate communication between the system and the industrial line is S7.Net. These elements facilitate direct communication with the industrial process equipment. The system facilitates the visualization of operating parameters and the status of the equipment utilized in the industrial process and its control. All data is superimposed on the physical environment through virtual operational panels. The system functions independently, negating the necessity for intermediate servers or other complex structures. The system&amp;amp;rsquo;s operation is predicted on a series of algorithms. These instruments facilitate the automated analysis of industrial process parameters. These devices are utilized to ascertain the operational dynamics of the industrial line. The experimental results were obtained using a real industrial line. These models are employed to demonstrate the performance of data transmission, the identification of the system&amp;amp;rsquo;s operating states, and the system&amp;amp;rsquo;s ability to shut down in the event of operating errors. The proposed system is designed to function in a variety of industrial environments within the paradigm of Industry 4.0, facilitating the utilization of multiple virtual interfaces that enable user interaction with various elements through which the assembly process is monitored and controlled.</description>
	<pubDate>2026-01-23</pubDate>

	<content:encoded><![CDATA[
	<p><b>IoT, Vol. 7, Pages 10: Monitoring and Control System Based on Mixed Reality and the S7.Net Library</b></p>
	<p>IoT <a href="https://www.mdpi.com/2624-831X/7/1/10">doi: 10.3390/iot7010010</a></p>
	<p>Authors:
		Tudor Covrig
		Adrian Duka
		Liviu Miclea
		</p>
	<p>The predominant approach in the realm of industrial process monitoring and control involves the utilization of HMI (Human&amp;amp;ndash;Machine Interface) interfaces and conventional SCADA (Supervisory Control and Data Acquisition) systems. This limitation restricts user mobility, interaction with industrial equipment, and process status assessment. In the context of Industry 4.0, the ability to monitor and control industrial processes in real time is paramount. The present paper designs and implements a system for monitoring and controlling an industrial assembly line based on mixed reality. The technology employed to facilitate communication between the system and the industrial line is S7.Net. These elements facilitate direct communication with the industrial process equipment. The system facilitates the visualization of operating parameters and the status of the equipment utilized in the industrial process and its control. All data is superimposed on the physical environment through virtual operational panels. The system functions independently, negating the necessity for intermediate servers or other complex structures. The system&amp;amp;rsquo;s operation is predicted on a series of algorithms. These instruments facilitate the automated analysis of industrial process parameters. These devices are utilized to ascertain the operational dynamics of the industrial line. The experimental results were obtained using a real industrial line. These models are employed to demonstrate the performance of data transmission, the identification of the system&amp;amp;rsquo;s operating states, and the system&amp;amp;rsquo;s ability to shut down in the event of operating errors. The proposed system is designed to function in a variety of industrial environments within the paradigm of Industry 4.0, facilitating the utilization of multiple virtual interfaces that enable user interaction with various elements through which the assembly process is monitored and controlled.</p>
	]]></content:encoded>

	<dc:title>Monitoring and Control System Based on Mixed Reality and the S7.Net Library</dc:title>
			<dc:creator>Tudor Covrig</dc:creator>
			<dc:creator>Adrian Duka</dc:creator>
			<dc:creator>Liviu Miclea</dc:creator>
		<dc:identifier>doi: 10.3390/iot7010010</dc:identifier>
	<dc:source>IoT</dc:source>
	<dc:date>2026-01-23</dc:date>

	<prism:publicationName>IoT</prism:publicationName>
	<prism:publicationDate>2026-01-23</prism:publicationDate>
	<prism:volume>7</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>10</prism:startingPage>
		<prism:doi>10.3390/iot7010010</prism:doi>
	<prism:url>https://www.mdpi.com/2624-831X/7/1/10</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-831X/7/1/9">

	<title>IoT, Vol. 7, Pages 9: Performance Evaluation of LoRaWAN for Monitoring People with Disabilities at University Campus</title>
	<link>https://www.mdpi.com/2624-831X/7/1/9</link>
	<description>The growing need to foster inclusive education in university environments has driven the development of technological solutions aimed at improving the academic experiences of students with disabilities. These individuals often face barriers to autonomy and participation, especially on large and complex campuses. This article presents the performance evaluation of a LoRaWAN network specifically designed for monitoring people with disabilities on a university campus. The system aims to provide equitable access to campus resources and real-time support to students with disabilities. Leveraging the advantages of Low-Power Wide-Area Networks (LPWAN), particularly LoRaWAN, the proposed system enables real-time tracking with broad coverage and minimal power consumption, without requiring any active user interaction. Each student receives a wearable LoRa-enabled device that wirelessly communicates with a network of gateways strategically installed throughout the campus. To evaluate the system&amp;amp;rsquo;s performance, this work conducts link-level experiments focusing on the communication between the LoRa end devices (nodes) and the central gateway. The analysis focuses on the network coverage, signal strength (RSSI), signal-to-noise ratio (SNR), and packet reception rate (PRR). The experimental results confirmed that the proposed system is technically robust and operationally effective under real campus conditions. Beyond its technical contributions, the proposed solution represents a concrete step toward building safer and more accessible academic environments that reinforce the autonomy and inclusion of students with disabilities.</description>
	<pubDate>2026-01-23</pubDate>

	<content:encoded><![CDATA[
	<p><b>IoT, Vol. 7, Pages 9: Performance Evaluation of LoRaWAN for Monitoring People with Disabilities at University Campus</b></p>
	<p>IoT <a href="https://www.mdpi.com/2624-831X/7/1/9">doi: 10.3390/iot7010009</a></p>
	<p>Authors:
		Jorge Rendulich
		Rony Almiron
		Xiomara Vilca
		Miguel Zea
		</p>
	<p>The growing need to foster inclusive education in university environments has driven the development of technological solutions aimed at improving the academic experiences of students with disabilities. These individuals often face barriers to autonomy and participation, especially on large and complex campuses. This article presents the performance evaluation of a LoRaWAN network specifically designed for monitoring people with disabilities on a university campus. The system aims to provide equitable access to campus resources and real-time support to students with disabilities. Leveraging the advantages of Low-Power Wide-Area Networks (LPWAN), particularly LoRaWAN, the proposed system enables real-time tracking with broad coverage and minimal power consumption, without requiring any active user interaction. Each student receives a wearable LoRa-enabled device that wirelessly communicates with a network of gateways strategically installed throughout the campus. To evaluate the system&amp;amp;rsquo;s performance, this work conducts link-level experiments focusing on the communication between the LoRa end devices (nodes) and the central gateway. The analysis focuses on the network coverage, signal strength (RSSI), signal-to-noise ratio (SNR), and packet reception rate (PRR). The experimental results confirmed that the proposed system is technically robust and operationally effective under real campus conditions. Beyond its technical contributions, the proposed solution represents a concrete step toward building safer and more accessible academic environments that reinforce the autonomy and inclusion of students with disabilities.</p>
	]]></content:encoded>

	<dc:title>Performance Evaluation of LoRaWAN for Monitoring People with Disabilities at University Campus</dc:title>
			<dc:creator>Jorge Rendulich</dc:creator>
			<dc:creator>Rony Almiron</dc:creator>
			<dc:creator>Xiomara Vilca</dc:creator>
			<dc:creator>Miguel Zea</dc:creator>
		<dc:identifier>doi: 10.3390/iot7010009</dc:identifier>
	<dc:source>IoT</dc:source>
	<dc:date>2026-01-23</dc:date>

	<prism:publicationName>IoT</prism:publicationName>
	<prism:publicationDate>2026-01-23</prism:publicationDate>
	<prism:volume>7</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>9</prism:startingPage>
		<prism:doi>10.3390/iot7010009</prism:doi>
	<prism:url>https://www.mdpi.com/2624-831X/7/1/9</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-831X/7/1/8">

	<title>IoT, Vol. 7, Pages 8: Impact of Router Count on Network Performance in OpenThread</title>
	<link>https://www.mdpi.com/2624-831X/7/1/8</link>
	<description>A low-power IPv6 mesh standard, Thread, is gaining traction in smart-home, building-automation, and industrial IoT deployments. It extends mesh connectivity with the help of Router-Eligible End Devices (REEDs), which can be promoted to, or demoted from, the router status. Promotion and demotion hinge on two tunable parameters, the router upgrade and the router downgrade thresholds. Yet the OpenThread reference stack ships with fixed values (16/23) for these thresholds. This paper presents a systematic study of how these thresholds shape router-election dynamics across diverse traffic loads and network topologies. Leveraging an extended OpenThread Network Simulator, a sweep through both router upgrade and router downgrade thresholds with different gaps was performed. Results reveal that the default settings may over-provision routing capacity and may result in increased frame retransmissions, wasting airtime and reducing energy efficiency.</description>
	<pubDate>2026-01-19</pubDate>

	<content:encoded><![CDATA[
	<p><b>IoT, Vol. 7, Pages 8: Impact of Router Count on Network Performance in OpenThread</b></p>
	<p>IoT <a href="https://www.mdpi.com/2624-831X/7/1/8">doi: 10.3390/iot7010008</a></p>
	<p>Authors:
		Xaver Zak
		Peter Brida
		Juraj Machaj
		</p>
	<p>A low-power IPv6 mesh standard, Thread, is gaining traction in smart-home, building-automation, and industrial IoT deployments. It extends mesh connectivity with the help of Router-Eligible End Devices (REEDs), which can be promoted to, or demoted from, the router status. Promotion and demotion hinge on two tunable parameters, the router upgrade and the router downgrade thresholds. Yet the OpenThread reference stack ships with fixed values (16/23) for these thresholds. This paper presents a systematic study of how these thresholds shape router-election dynamics across diverse traffic loads and network topologies. Leveraging an extended OpenThread Network Simulator, a sweep through both router upgrade and router downgrade thresholds with different gaps was performed. Results reveal that the default settings may over-provision routing capacity and may result in increased frame retransmissions, wasting airtime and reducing energy efficiency.</p>
	]]></content:encoded>

	<dc:title>Impact of Router Count on Network Performance in OpenThread</dc:title>
			<dc:creator>Xaver Zak</dc:creator>
			<dc:creator>Peter Brida</dc:creator>
			<dc:creator>Juraj Machaj</dc:creator>
		<dc:identifier>doi: 10.3390/iot7010008</dc:identifier>
	<dc:source>IoT</dc:source>
	<dc:date>2026-01-19</dc:date>

	<prism:publicationName>IoT</prism:publicationName>
	<prism:publicationDate>2026-01-19</prism:publicationDate>
	<prism:volume>7</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>8</prism:startingPage>
		<prism:doi>10.3390/iot7010008</prism:doi>
	<prism:url>https://www.mdpi.com/2624-831X/7/1/8</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-831X/7/1/7">

	<title>IoT, Vol. 7, Pages 7: Experimental Evaluation of NB-IoT Power Consumption and Energy Source Feasibility for Long-Term IoT Deployments</title>
	<link>https://www.mdpi.com/2624-831X/7/1/7</link>
	<description>Narrowband Internet of Things (NB-IoT) is widely used for connecting low-power devices that must operate for years without maintenance. To design reliable systems, it is essential to understand how much energy these devices consume under different conditions and which power sources can support long lifetimes. This study presents a detailed experimental evaluation of NB-IoT power consumption using a commercial System-on-Module (LMT-SoM). We measured various transmissions across different payload sizes, signal strengths, and temperatures. The results show that sending larger packets is far more efficient: a 1280-byte message requires about 7 times less energy per bit than an 80-byte message. However, standby currents varied widely between devices, from 6.7 &amp;amp;micro;A to 23 &amp;amp;micro;A, which has a major impact on battery life. Alongside these experiments, we compared different power sources for a 5-year deployment. Alkaline and lithium-thionyl chloride batteries were the most cost-effective solutions for indoor use, while solar panels combined with supercapacitors provided a sustainable option for outdoor applications. These findings offer practical guidance for engineers and researchers to design NB-IoT devices that balance energy efficiency, cost, and sustainability.</description>
	<pubDate>2026-01-13</pubDate>

	<content:encoded><![CDATA[
	<p><b>IoT, Vol. 7, Pages 7: Experimental Evaluation of NB-IoT Power Consumption and Energy Source Feasibility for Long-Term IoT Deployments</b></p>
	<p>IoT <a href="https://www.mdpi.com/2624-831X/7/1/7">doi: 10.3390/iot7010007</a></p>
	<p>Authors:
		Valters Skrastins
		Vladislavs Medvedevs
		Dmitrijs Orlovs
		Juris Ormanis
		Janis Judvaitis
		</p>
	<p>Narrowband Internet of Things (NB-IoT) is widely used for connecting low-power devices that must operate for years without maintenance. To design reliable systems, it is essential to understand how much energy these devices consume under different conditions and which power sources can support long lifetimes. This study presents a detailed experimental evaluation of NB-IoT power consumption using a commercial System-on-Module (LMT-SoM). We measured various transmissions across different payload sizes, signal strengths, and temperatures. The results show that sending larger packets is far more efficient: a 1280-byte message requires about 7 times less energy per bit than an 80-byte message. However, standby currents varied widely between devices, from 6.7 &amp;amp;micro;A to 23 &amp;amp;micro;A, which has a major impact on battery life. Alongside these experiments, we compared different power sources for a 5-year deployment. Alkaline and lithium-thionyl chloride batteries were the most cost-effective solutions for indoor use, while solar panels combined with supercapacitors provided a sustainable option for outdoor applications. These findings offer practical guidance for engineers and researchers to design NB-IoT devices that balance energy efficiency, cost, and sustainability.</p>
	]]></content:encoded>

	<dc:title>Experimental Evaluation of NB-IoT Power Consumption and Energy Source Feasibility for Long-Term IoT Deployments</dc:title>
			<dc:creator>Valters Skrastins</dc:creator>
			<dc:creator>Vladislavs Medvedevs</dc:creator>
			<dc:creator>Dmitrijs Orlovs</dc:creator>
			<dc:creator>Juris Ormanis</dc:creator>
			<dc:creator>Janis Judvaitis</dc:creator>
		<dc:identifier>doi: 10.3390/iot7010007</dc:identifier>
	<dc:source>IoT</dc:source>
	<dc:date>2026-01-13</dc:date>

	<prism:publicationName>IoT</prism:publicationName>
	<prism:publicationDate>2026-01-13</prism:publicationDate>
	<prism:volume>7</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>7</prism:startingPage>
		<prism:doi>10.3390/iot7010007</prism:doi>
	<prism:url>https://www.mdpi.com/2624-831X/7/1/7</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-831X/7/1/6">

	<title>IoT, Vol. 7, Pages 6: A Systematic Review of Self-Adaptive Mobile Applications with Cooperative Dimension</title>
	<link>https://www.mdpi.com/2624-831X/7/1/6</link>
	<description>The proliferation of mobile devices has driven significant growth in adaptive mobile applications (AMAs) that dynamically adjust their behavior based on contextual changes. While existing research has extensively studied individual adaptive systems, limited attention has been given to cooperative adaptation&amp;amp;mdash;where multiple AMAs coordinate their adaptive behaviors within shared mobile ecosystems. This systematic literature review addresses this research gap by analyzing 95 peer-reviewed studies published between 2010 and 2025 to characterize the current state of cooperative adaptation in mobile applications. Following established systematic review protocols, we searched six academic databases and applied rigorous inclusion/exclusion criteria to identify relevant studies. Our analysis reveals eight critical dimensions of cooperative adaptation: Monitor&amp;amp;ndash;Analyze&amp;amp;ndash;Plan&amp;amp;ndash;Execute&amp;amp;ndash;Knowledge (MAPE-K) structure, application domain, adaptation goals, context management, adaptation triggers, aspect considerations, coordination mechanisms, and cooperation levels. The findings indicate that 63.2% of studies demonstrate some form of cooperative behavior, ranging from basic context sharing to sophisticated conflict resolution mechanisms. However, only 7.4% of studies explicitly address high-level cooperative adaptation involving global goal optimization or comprehensive conflict resolution. Energy efficiency (21.1%) and usability (33.7%) emerge as the most frequently addressed adaptation goals, with Android platforms dominating the research landscape (36.8%). The review identifies significant gaps in comprehensive lifecycle support, standardized evaluation methodologies, and theoretical frameworks for multi-application cooperation. These findings establish a foundation for advancing research in cooperative adaptive mobile systems and provide a classification framework to guide future investigations in this emerging domain.</description>
	<pubDate>2025-12-31</pubDate>

	<content:encoded><![CDATA[
	<p><b>IoT, Vol. 7, Pages 6: A Systematic Review of Self-Adaptive Mobile Applications with Cooperative Dimension</b></p>
	<p>IoT <a href="https://www.mdpi.com/2624-831X/7/1/6">doi: 10.3390/iot7010006</a></p>
	<p>Authors:
		Berhanyikun Amanuel Gebreselassie
		Nuno M. Garcia
		Dida Midekso
		</p>
	<p>The proliferation of mobile devices has driven significant growth in adaptive mobile applications (AMAs) that dynamically adjust their behavior based on contextual changes. While existing research has extensively studied individual adaptive systems, limited attention has been given to cooperative adaptation&amp;amp;mdash;where multiple AMAs coordinate their adaptive behaviors within shared mobile ecosystems. This systematic literature review addresses this research gap by analyzing 95 peer-reviewed studies published between 2010 and 2025 to characterize the current state of cooperative adaptation in mobile applications. Following established systematic review protocols, we searched six academic databases and applied rigorous inclusion/exclusion criteria to identify relevant studies. Our analysis reveals eight critical dimensions of cooperative adaptation: Monitor&amp;amp;ndash;Analyze&amp;amp;ndash;Plan&amp;amp;ndash;Execute&amp;amp;ndash;Knowledge (MAPE-K) structure, application domain, adaptation goals, context management, adaptation triggers, aspect considerations, coordination mechanisms, and cooperation levels. The findings indicate that 63.2% of studies demonstrate some form of cooperative behavior, ranging from basic context sharing to sophisticated conflict resolution mechanisms. However, only 7.4% of studies explicitly address high-level cooperative adaptation involving global goal optimization or comprehensive conflict resolution. Energy efficiency (21.1%) and usability (33.7%) emerge as the most frequently addressed adaptation goals, with Android platforms dominating the research landscape (36.8%). The review identifies significant gaps in comprehensive lifecycle support, standardized evaluation methodologies, and theoretical frameworks for multi-application cooperation. These findings establish a foundation for advancing research in cooperative adaptive mobile systems and provide a classification framework to guide future investigations in this emerging domain.</p>
	]]></content:encoded>

	<dc:title>A Systematic Review of Self-Adaptive Mobile Applications with Cooperative Dimension</dc:title>
			<dc:creator>Berhanyikun Amanuel Gebreselassie</dc:creator>
			<dc:creator>Nuno M. Garcia</dc:creator>
			<dc:creator>Dida Midekso</dc:creator>
		<dc:identifier>doi: 10.3390/iot7010006</dc:identifier>
	<dc:source>IoT</dc:source>
	<dc:date>2025-12-31</dc:date>

	<prism:publicationName>IoT</prism:publicationName>
	<prism:publicationDate>2025-12-31</prism:publicationDate>
	<prism:volume>7</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Systematic Review</prism:section>
	<prism:startingPage>6</prism:startingPage>
		<prism:doi>10.3390/iot7010006</prism:doi>
	<prism:url>https://www.mdpi.com/2624-831X/7/1/6</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-831X/7/1/5">

	<title>IoT, Vol. 7, Pages 5: Intelligent Water Management Through Edge-Enabled IoT, AI, and Big Data Technologies</title>
	<link>https://www.mdpi.com/2624-831X/7/1/5</link>
	<description>In the 21st century, Urbanization, population growth, and climate change have created significant problems in water resource management. Recent advancements in technologies such as Internet of Things (IoT), Edge Computing (EC), Artificial Intelligence (AI), and Big Data Analytics (BDA) are changing the operations of the water resource management systems. In this study, we present a systematic review, highlighting the contributions of these technologies in water management systems. More specifically, we highlight the IoT and EC water monitoring systems that enable real-time sensing of water quality and consumption. In addition, AI methods for anomaly detection and predictive maintenance are reviewed, focusing on water demand forecasting. BDA methods are also discussed, highlighting their ability to integrate data from different data sources, such as sensors and historical data. Additionally, a discussion is provided of how Water management systems could enhance sustainability, resilience, and efficiency by combining big data, IoT, EC, and AI. Lastly, future directions are outlined regarding how state-of-the-art technologies may further support efficient water resources management.</description>
	<pubDate>2025-12-31</pubDate>

	<content:encoded><![CDATA[
	<p><b>IoT, Vol. 7, Pages 5: Intelligent Water Management Through Edge-Enabled IoT, AI, and Big Data Technologies</b></p>
	<p>IoT <a href="https://www.mdpi.com/2624-831X/7/1/5">doi: 10.3390/iot7010005</a></p>
	<p>Authors:
		Petros Amanatidis
		Eleftherios Lyratzis
		Vasileios Angelopoulos
		Eleftherios Kouloumpris
		Efstratios Skaperdas
		Nick Bassiliades
		Ioannis Vlahavas
		Fotios Maris
		Dimitrios Emmanouloudis
		Dimitris Karampatzakis
		</p>
	<p>In the 21st century, Urbanization, population growth, and climate change have created significant problems in water resource management. Recent advancements in technologies such as Internet of Things (IoT), Edge Computing (EC), Artificial Intelligence (AI), and Big Data Analytics (BDA) are changing the operations of the water resource management systems. In this study, we present a systematic review, highlighting the contributions of these technologies in water management systems. More specifically, we highlight the IoT and EC water monitoring systems that enable real-time sensing of water quality and consumption. In addition, AI methods for anomaly detection and predictive maintenance are reviewed, focusing on water demand forecasting. BDA methods are also discussed, highlighting their ability to integrate data from different data sources, such as sensors and historical data. Additionally, a discussion is provided of how Water management systems could enhance sustainability, resilience, and efficiency by combining big data, IoT, EC, and AI. Lastly, future directions are outlined regarding how state-of-the-art technologies may further support efficient water resources management.</p>
	]]></content:encoded>

	<dc:title>Intelligent Water Management Through Edge-Enabled IoT, AI, and Big Data Technologies</dc:title>
			<dc:creator>Petros Amanatidis</dc:creator>
			<dc:creator>Eleftherios Lyratzis</dc:creator>
			<dc:creator>Vasileios Angelopoulos</dc:creator>
			<dc:creator>Eleftherios Kouloumpris</dc:creator>
			<dc:creator>Efstratios Skaperdas</dc:creator>
			<dc:creator>Nick Bassiliades</dc:creator>
			<dc:creator>Ioannis Vlahavas</dc:creator>
			<dc:creator>Fotios Maris</dc:creator>
			<dc:creator>Dimitrios Emmanouloudis</dc:creator>
			<dc:creator>Dimitris Karampatzakis</dc:creator>
		<dc:identifier>doi: 10.3390/iot7010005</dc:identifier>
	<dc:source>IoT</dc:source>
	<dc:date>2025-12-31</dc:date>

	<prism:publicationName>IoT</prism:publicationName>
	<prism:publicationDate>2025-12-31</prism:publicationDate>
	<prism:volume>7</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Review</prism:section>
	<prism:startingPage>5</prism:startingPage>
		<prism:doi>10.3390/iot7010005</prism:doi>
	<prism:url>https://www.mdpi.com/2624-831X/7/1/5</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-831X/7/1/4">

	<title>IoT, Vol. 7, Pages 4: Trust-Aware Distributed and Hybrid Intrusion Detection for Rank Attacks in RPL IoT Environments</title>
	<link>https://www.mdpi.com/2624-831X/7/1/4</link>
	<description>The rapid expansion of Internet of Things (IoT) systems in critical infrastructures has raised significant concerns regarding network security and reliability. In particular, RPL (Routing Protocol for Low-Power and Lossy Networks), widely adopted in IoT communications, remains vulnerable to topological manipulation attacks such as Decreased Rank, Increased Rank, and the less-explored Worst Parent Selection (WPS). While several RPL security approaches address rank manipulation attacks, most assume static topologies and offer limited support for mobility. Moreover, trust-based routing and hybrid IDS (Intrusion Detection System) approaches are seldom integrated, which limits detection reliability under mobility. This study introduces a unified IDS framework that combines mobility awareness with trust-based decision-making to detect multiple rank-based attacks. We evaluate two lightweight, rule-based IDS architectures: a fully distributed model and a hybrid model supported by designated monitoring nodes. A trust-based mechanism is incorporated into both architectures, and their performance is assessed under static and mobile scenarios. Results show that while the distributed IDS provides rapid local responsiveness, the hybrid IDS maintains more stable latency and packet delivery under mobility. Additionally, incorporating trust metrics reduces false alerts and improves detection reliability while preserving low latency and energy usage, supporting time-sensitive applications such as healthcare monitoring.</description>
	<pubDate>2025-12-30</pubDate>

	<content:encoded><![CDATA[
	<p><b>IoT, Vol. 7, Pages 4: Trust-Aware Distributed and Hybrid Intrusion Detection for Rank Attacks in RPL IoT Environments</b></p>
	<p>IoT <a href="https://www.mdpi.com/2624-831X/7/1/4">doi: 10.3390/iot7010004</a></p>
	<p>Authors:
		Bruno Monteiro
		Jorge Granjal
		</p>
	<p>The rapid expansion of Internet of Things (IoT) systems in critical infrastructures has raised significant concerns regarding network security and reliability. In particular, RPL (Routing Protocol for Low-Power and Lossy Networks), widely adopted in IoT communications, remains vulnerable to topological manipulation attacks such as Decreased Rank, Increased Rank, and the less-explored Worst Parent Selection (WPS). While several RPL security approaches address rank manipulation attacks, most assume static topologies and offer limited support for mobility. Moreover, trust-based routing and hybrid IDS (Intrusion Detection System) approaches are seldom integrated, which limits detection reliability under mobility. This study introduces a unified IDS framework that combines mobility awareness with trust-based decision-making to detect multiple rank-based attacks. We evaluate two lightweight, rule-based IDS architectures: a fully distributed model and a hybrid model supported by designated monitoring nodes. A trust-based mechanism is incorporated into both architectures, and their performance is assessed under static and mobile scenarios. Results show that while the distributed IDS provides rapid local responsiveness, the hybrid IDS maintains more stable latency and packet delivery under mobility. Additionally, incorporating trust metrics reduces false alerts and improves detection reliability while preserving low latency and energy usage, supporting time-sensitive applications such as healthcare monitoring.</p>
	]]></content:encoded>

	<dc:title>Trust-Aware Distributed and Hybrid Intrusion Detection for Rank Attacks in RPL IoT Environments</dc:title>
			<dc:creator>Bruno Monteiro</dc:creator>
			<dc:creator>Jorge Granjal</dc:creator>
		<dc:identifier>doi: 10.3390/iot7010004</dc:identifier>
	<dc:source>IoT</dc:source>
	<dc:date>2025-12-30</dc:date>

	<prism:publicationName>IoT</prism:publicationName>
	<prism:publicationDate>2025-12-30</prism:publicationDate>
	<prism:volume>7</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>4</prism:startingPage>
		<prism:doi>10.3390/iot7010004</prism:doi>
	<prism:url>https://www.mdpi.com/2624-831X/7/1/4</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-831X/7/1/3">

	<title>IoT, Vol. 7, Pages 3: FG-RCA: Kernel-Anchored Post-Exploitation Containment for IoT with Policy Synthesis and Mitigation of Zero-Day Attacks</title>
	<link>https://www.mdpi.com/2624-831X/7/1/3</link>
	<description>Zero-day intrusions on IoT endpoints demand defenses that curtail attacker impact and persistence after breach. This article presents Fine-Grained Runtime Containment Agent (FG-RCA), a lightweight post-exploitation containment system that learns least-privilege behavior from execution and enforces it in the kernel via eBPF with Linux Security Modules (LSM). In a learn phase, LSM/eBPF probes stream security-relevant events to a Rust agent that synthesizes policies per device role. In an enforce phase, policies are compiled into eBPF maps and evaluated at an extended hook set spanning process execution (bprm_check_security), file access (file_open), network egress and exfiltration (socket_connect, socket_sendmsg), privilege use (capable), process injection (ptrace_access_check), tamper/anti-forensics (inode_unlink). Policies bind to kernel-truth identities&amp;amp;mdash;inode, device, mount intrusion detection system (IDS), executable SHA-256, and cgroup/namespace identifiers&amp;amp;mdash;rather than paths, mitigating time-of-check to time-of-use (TOCTOU) and aliasing. Operational safeguards include Ed25519-signed policies, atomic rollback, and shadow mode logging events to enable policy evolution. Evaluation on embedded Linux demonstrates containment with low overhead.</description>
	<pubDate>2025-12-25</pubDate>

	<content:encoded><![CDATA[
	<p><b>IoT, Vol. 7, Pages 3: FG-RCA: Kernel-Anchored Post-Exploitation Containment for IoT with Policy Synthesis and Mitigation of Zero-Day Attacks</b></p>
	<p>IoT <a href="https://www.mdpi.com/2624-831X/7/1/3">doi: 10.3390/iot7010003</a></p>
	<p>Authors:
		Fouad Ailabouni
		Jesús-Ángel Román-Gallego
		María-Luisa Pérez-Delgado
		</p>
	<p>Zero-day intrusions on IoT endpoints demand defenses that curtail attacker impact and persistence after breach. This article presents Fine-Grained Runtime Containment Agent (FG-RCA), a lightweight post-exploitation containment system that learns least-privilege behavior from execution and enforces it in the kernel via eBPF with Linux Security Modules (LSM). In a learn phase, LSM/eBPF probes stream security-relevant events to a Rust agent that synthesizes policies per device role. In an enforce phase, policies are compiled into eBPF maps and evaluated at an extended hook set spanning process execution (bprm_check_security), file access (file_open), network egress and exfiltration (socket_connect, socket_sendmsg), privilege use (capable), process injection (ptrace_access_check), tamper/anti-forensics (inode_unlink). Policies bind to kernel-truth identities&amp;amp;mdash;inode, device, mount intrusion detection system (IDS), executable SHA-256, and cgroup/namespace identifiers&amp;amp;mdash;rather than paths, mitigating time-of-check to time-of-use (TOCTOU) and aliasing. Operational safeguards include Ed25519-signed policies, atomic rollback, and shadow mode logging events to enable policy evolution. Evaluation on embedded Linux demonstrates containment with low overhead.</p>
	]]></content:encoded>

	<dc:title>FG-RCA: Kernel-Anchored Post-Exploitation Containment for IoT with Policy Synthesis and Mitigation of Zero-Day Attacks</dc:title>
			<dc:creator>Fouad Ailabouni</dc:creator>
			<dc:creator>Jesús-Ángel Román-Gallego</dc:creator>
			<dc:creator>María-Luisa Pérez-Delgado</dc:creator>
		<dc:identifier>doi: 10.3390/iot7010003</dc:identifier>
	<dc:source>IoT</dc:source>
	<dc:date>2025-12-25</dc:date>

	<prism:publicationName>IoT</prism:publicationName>
	<prism:publicationDate>2025-12-25</prism:publicationDate>
	<prism:volume>7</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>3</prism:startingPage>
		<prism:doi>10.3390/iot7010003</prism:doi>
	<prism:url>https://www.mdpi.com/2624-831X/7/1/3</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-831X/7/1/2">

	<title>IoT, Vol. 7, Pages 2: Orthogonal Space-Time Bluetooth System for IoT Communications</title>
	<link>https://www.mdpi.com/2624-831X/7/1/2</link>
	<description>There is increasing interest in improving the reliability of short-range wireless links in dense IoT deployments, where BLE is widely used due to its low power consumption and robust GFSK modulation. For this purpose, this work presents a novel Orthogonal Space-Time (OST) scheme for transmission and detection of BLE signals while preserving the BLE GFSK waveform and modulation constraints. The proposed signal processing system integrates advanced OST coding techniques with nonlinear GFSK modulation to achieve high-quality communication while maintaining phase continuity. This implies that the standard BLE GFSK modulator and demodulator blocks can be reused, with additional processing introduced only in the multi-antenna encoder and combiner. A detailed theoretical analysis demonstrates the feasibility of employing the Rayleigh fading channel model in BLE communications and establishes the BER performance bounds for various MIMO configurations. Simulations confirm the advantages of the proposed OST-GFSK signal processing scheme, maintaining a consistent performance when compared with OST linear modulation approaches under Rayleigh fading channels. As a result, the proposed IoT-enabling technology integrates the advantages of widely used OST linear modulation with nonlinear GFSK modulation required for BLE.</description>
	<pubDate>2025-12-22</pubDate>

	<content:encoded><![CDATA[
	<p><b>IoT, Vol. 7, Pages 2: Orthogonal Space-Time Bluetooth System for IoT Communications</b></p>
	<p>IoT <a href="https://www.mdpi.com/2624-831X/7/1/2">doi: 10.3390/iot7010002</a></p>
	<p>Authors:
		Rodrigo Aldana-López
		Omar Longoria-Gandara
		Jose Valencia-Velasco
		Javier Vázquez-Castillo
		Luis Pizano-Escalante
		</p>
	<p>There is increasing interest in improving the reliability of short-range wireless links in dense IoT deployments, where BLE is widely used due to its low power consumption and robust GFSK modulation. For this purpose, this work presents a novel Orthogonal Space-Time (OST) scheme for transmission and detection of BLE signals while preserving the BLE GFSK waveform and modulation constraints. The proposed signal processing system integrates advanced OST coding techniques with nonlinear GFSK modulation to achieve high-quality communication while maintaining phase continuity. This implies that the standard BLE GFSK modulator and demodulator blocks can be reused, with additional processing introduced only in the multi-antenna encoder and combiner. A detailed theoretical analysis demonstrates the feasibility of employing the Rayleigh fading channel model in BLE communications and establishes the BER performance bounds for various MIMO configurations. Simulations confirm the advantages of the proposed OST-GFSK signal processing scheme, maintaining a consistent performance when compared with OST linear modulation approaches under Rayleigh fading channels. As a result, the proposed IoT-enabling technology integrates the advantages of widely used OST linear modulation with nonlinear GFSK modulation required for BLE.</p>
	]]></content:encoded>

	<dc:title>Orthogonal Space-Time Bluetooth System for IoT Communications</dc:title>
			<dc:creator>Rodrigo Aldana-López</dc:creator>
			<dc:creator>Omar Longoria-Gandara</dc:creator>
			<dc:creator>Jose Valencia-Velasco</dc:creator>
			<dc:creator>Javier Vázquez-Castillo</dc:creator>
			<dc:creator>Luis Pizano-Escalante</dc:creator>
		<dc:identifier>doi: 10.3390/iot7010002</dc:identifier>
	<dc:source>IoT</dc:source>
	<dc:date>2025-12-22</dc:date>

	<prism:publicationName>IoT</prism:publicationName>
	<prism:publicationDate>2025-12-22</prism:publicationDate>
	<prism:volume>7</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>2</prism:startingPage>
		<prism:doi>10.3390/iot7010002</prism:doi>
	<prism:url>https://www.mdpi.com/2624-831X/7/1/2</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-831X/7/1/1">

	<title>IoT, Vol. 7, Pages 1: Blockchain-Based Certification in Fisheries: A Survey of Technologies and Methodologies</title>
	<link>https://www.mdpi.com/2624-831X/7/1/1</link>
	<description>The integrity of certification processes in the agrifood and fishing industries is essential for combating fraud, ensuring food safety, and meeting rising consumer expectations for transparency and sustainability. Yet, current certification systems remain fragmented, and they are vulnerable to tampering and highly dependent on manual or centralized procedures. This study addresses these gaps by providing a comprehensive survey that systematically classifies blockchain-based certification technologies and methodologies applied to the fisheries sector. The survey examines how the blockchain enhances trust through immutable record-keeping, smart contracts, and decentralized verification mechanisms, ensuring authenticity and accountability across the supply chain. Special attention is given to case studies and implementations that focus on ensuring food safety, verifying sustainability claims, and fostering consumer trust through transparent labeling. Furthermore, the paper identifies technological barriers, such as scalability and interoperability, and puts forward a collection of functional and non-functional requirements for holistic blockchain implementation. By providing a detailed overview of current trends and gaps, this study aims to guide researchers, industry stakeholders, and policymakers in adopting and optimizing blockchain technologies for certification. The findings highlight the potential of blockchain to innovate certification systems, easing the way for more resilient, sustainable, and consumer-centric agrifood and fishing industries.</description>
	<pubDate>2025-12-22</pubDate>

	<content:encoded><![CDATA[
	<p><b>IoT, Vol. 7, Pages 1: Blockchain-Based Certification in Fisheries: A Survey of Technologies and Methodologies</b></p>
	<p>IoT <a href="https://www.mdpi.com/2624-831X/7/1/1">doi: 10.3390/iot7010001</a></p>
	<p>Authors:
		Isaac Olayemi Olaleye
		Oluwafemi Olowojuni
		Asoro Ojevwe Blessing
		Jesús Rodríguez-Molina
		</p>
	<p>The integrity of certification processes in the agrifood and fishing industries is essential for combating fraud, ensuring food safety, and meeting rising consumer expectations for transparency and sustainability. Yet, current certification systems remain fragmented, and they are vulnerable to tampering and highly dependent on manual or centralized procedures. This study addresses these gaps by providing a comprehensive survey that systematically classifies blockchain-based certification technologies and methodologies applied to the fisheries sector. The survey examines how the blockchain enhances trust through immutable record-keeping, smart contracts, and decentralized verification mechanisms, ensuring authenticity and accountability across the supply chain. Special attention is given to case studies and implementations that focus on ensuring food safety, verifying sustainability claims, and fostering consumer trust through transparent labeling. Furthermore, the paper identifies technological barriers, such as scalability and interoperability, and puts forward a collection of functional and non-functional requirements for holistic blockchain implementation. By providing a detailed overview of current trends and gaps, this study aims to guide researchers, industry stakeholders, and policymakers in adopting and optimizing blockchain technologies for certification. The findings highlight the potential of blockchain to innovate certification systems, easing the way for more resilient, sustainable, and consumer-centric agrifood and fishing industries.</p>
	]]></content:encoded>

	<dc:title>Blockchain-Based Certification in Fisheries: A Survey of Technologies and Methodologies</dc:title>
			<dc:creator>Isaac Olayemi Olaleye</dc:creator>
			<dc:creator>Oluwafemi Olowojuni</dc:creator>
			<dc:creator>Asoro Ojevwe Blessing</dc:creator>
			<dc:creator>Jesús Rodríguez-Molina</dc:creator>
		<dc:identifier>doi: 10.3390/iot7010001</dc:identifier>
	<dc:source>IoT</dc:source>
	<dc:date>2025-12-22</dc:date>

	<prism:publicationName>IoT</prism:publicationName>
	<prism:publicationDate>2025-12-22</prism:publicationDate>
	<prism:volume>7</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Review</prism:section>
	<prism:startingPage>1</prism:startingPage>
		<prism:doi>10.3390/iot7010001</prism:doi>
	<prism:url>https://www.mdpi.com/2624-831X/7/1/1</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-831X/6/4/78">

	<title>IoT, Vol. 6, Pages 78: Privacy-Preserving Federated Learning for Distributed Financial IoT: A Blockchain-Based Framework for Secure Cryptocurrency Market Analytics</title>
	<link>https://www.mdpi.com/2624-831X/6/4/78</link>
	<description>The proliferation of Internet of Things (IoT) devices in financial markets has created distributed ecosystems where cryptocurrency exchanges, trading platforms, and market data providers operate as autonomous edge nodes generating massive volumes of sensitive financial data. Collaborative machine learning across these distributed financial IoT nodes faces fundamental challenges: institutions possess valuable proprietary data but cannot share it directly due to competitive concerns, regulatory constraints, and trust management requirements in decentralized networks. This study presents a privacy-preserving federated learning framework tailored for distributed financial IoT systems, combining differential privacy with Shamir secret sharing to enable secure collaborative intelligence across blockchain-based cryptocurrency trading networks. We implement per-layer gradient clipping and R&amp;amp;eacute;nyi differential privacy composition to minimize utility loss while maintaining formal privacy guarantees in edge computing scenarios. Using 5.6 million orderbook observations from 11 cryptocurrency pairs collected across distributed exchange nodes, we evaluate three data partitioning strategies simulating realistic heterogeneity patterns in financial IoT deployments. Our experiments reveal that federated edge learning imposes 9&amp;amp;ndash;15 percentage point accuracy degradation compared to centralized cloud processing, driven primarily by data distribution heterogeneity across autonomous nodes. Critically, adding differential privacy (&amp;amp;epsilon; = 3.0) and cryptographic secret sharing increases this degradation by less than 0.3 percentage points when mechanisms are calibrated appropriately for edge devices. The framework achieves 62&amp;amp;ndash;66.5% direction accuracy on cryptocurrency price movements, with confidence-based execution generating 71&amp;amp;ndash;137 basis points average profit per trade. These results demonstrate the practical viability of privacy-preserving collaborative intelligence for distributed financial IoT while identifying that the federated optimization gap dominates privacy mechanism costs. Our findings offer architectural insights for designing trustworthy distributed systems in blockchain-enabled financial IoT ecosystems.</description>
	<pubDate>2025-12-11</pubDate>

	<content:encoded><![CDATA[
	<p><b>IoT, Vol. 6, Pages 78: Privacy-Preserving Federated Learning for Distributed Financial IoT: A Blockchain-Based Framework for Secure Cryptocurrency Market Analytics</b></p>
	<p>IoT <a href="https://www.mdpi.com/2624-831X/6/4/78">doi: 10.3390/iot6040078</a></p>
	<p>Authors:
		Oleksandr Kuznetsov
		Saltanat Adilzhanova
		Serhiy Florov
		Valerii Bushkov
		Danylo Peremetchyk
		</p>
	<p>The proliferation of Internet of Things (IoT) devices in financial markets has created distributed ecosystems where cryptocurrency exchanges, trading platforms, and market data providers operate as autonomous edge nodes generating massive volumes of sensitive financial data. Collaborative machine learning across these distributed financial IoT nodes faces fundamental challenges: institutions possess valuable proprietary data but cannot share it directly due to competitive concerns, regulatory constraints, and trust management requirements in decentralized networks. This study presents a privacy-preserving federated learning framework tailored for distributed financial IoT systems, combining differential privacy with Shamir secret sharing to enable secure collaborative intelligence across blockchain-based cryptocurrency trading networks. We implement per-layer gradient clipping and R&amp;amp;eacute;nyi differential privacy composition to minimize utility loss while maintaining formal privacy guarantees in edge computing scenarios. Using 5.6 million orderbook observations from 11 cryptocurrency pairs collected across distributed exchange nodes, we evaluate three data partitioning strategies simulating realistic heterogeneity patterns in financial IoT deployments. Our experiments reveal that federated edge learning imposes 9&amp;amp;ndash;15 percentage point accuracy degradation compared to centralized cloud processing, driven primarily by data distribution heterogeneity across autonomous nodes. Critically, adding differential privacy (&amp;amp;epsilon; = 3.0) and cryptographic secret sharing increases this degradation by less than 0.3 percentage points when mechanisms are calibrated appropriately for edge devices. The framework achieves 62&amp;amp;ndash;66.5% direction accuracy on cryptocurrency price movements, with confidence-based execution generating 71&amp;amp;ndash;137 basis points average profit per trade. These results demonstrate the practical viability of privacy-preserving collaborative intelligence for distributed financial IoT while identifying that the federated optimization gap dominates privacy mechanism costs. Our findings offer architectural insights for designing trustworthy distributed systems in blockchain-enabled financial IoT ecosystems.</p>
	]]></content:encoded>

	<dc:title>Privacy-Preserving Federated Learning for Distributed Financial IoT: A Blockchain-Based Framework for Secure Cryptocurrency Market Analytics</dc:title>
			<dc:creator>Oleksandr Kuznetsov</dc:creator>
			<dc:creator>Saltanat Adilzhanova</dc:creator>
			<dc:creator>Serhiy Florov</dc:creator>
			<dc:creator>Valerii Bushkov</dc:creator>
			<dc:creator>Danylo Peremetchyk</dc:creator>
		<dc:identifier>doi: 10.3390/iot6040078</dc:identifier>
	<dc:source>IoT</dc:source>
	<dc:date>2025-12-11</dc:date>

	<prism:publicationName>IoT</prism:publicationName>
	<prism:publicationDate>2025-12-11</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>78</prism:startingPage>
		<prism:doi>10.3390/iot6040078</prism:doi>
	<prism:url>https://www.mdpi.com/2624-831X/6/4/78</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-831X/6/4/77">

	<title>IoT, Vol. 6, Pages 77: LPWAN Technologies for IoT: Real-World Deployment Performance and Practical Comparison</title>
	<link>https://www.mdpi.com/2624-831X/6/4/77</link>
	<description>Low Power Wide Area Networks (LPWAN) have emerged as essential connectivity solutions for the Internet of Things (IoT), addressing requirements for long range, energy efficient communication that traditional wireless technologies cannot meet. With LPWAN connections projected to grow at 26% compound annual growth rate until 2027, understanding real-world performance is crucial for technology selection. This review examines four leading LPWAN technologies&amp;amp;mdash;LoRaWAN, Sigfox, Narrowband IoT (NB-IoT), and LTE-M. This review analyzes 20 peer reviewed studies from 2015&amp;amp;ndash;2025 reporting real-world deployment metrics across power consumption, range, data rate, scalability, availability, and security. Across these studies, practical performance diverges from vendor specifications. In the cited rural and urban LoRaWAN deployments LoRaWAN achieves 2+ year battery life and 11 km rural range but suffers collision limitations above 1000 devices per gateway. Sigfox demonstrates exceptional range (280 km record) with minimal power consumption but remains constrained by 12 byte payloads and security vulnerabilities. NB-IoT provides robust performance with 96&amp;amp;ndash;100% packet delivery ratios at &amp;amp;minus;127 dBm on the tested commercial networks, and supports tens of thousands devices per cell, though mobility increases energy consumption. In the cited trials LTE-M offers highest throughput and sub 200 ms latency but fails beyond &amp;amp;minus;113 dBm where NB-IoT maintains connectivity. NB-IoT emerges optimal for large scale stationary deployments, while LTE-M suits high throughput mobile applications.</description>
	<pubDate>2025-12-10</pubDate>

	<content:encoded><![CDATA[
	<p><b>IoT, Vol. 6, Pages 77: LPWAN Technologies for IoT: Real-World Deployment Performance and Practical Comparison</b></p>
	<p>IoT <a href="https://www.mdpi.com/2624-831X/6/4/77">doi: 10.3390/iot6040077</a></p>
	<p>Authors:
		Dmitrijs Orlovs
		Artis Rusins
		Valters Skrastiņš
		Janis Judvaitis
		</p>
	<p>Low Power Wide Area Networks (LPWAN) have emerged as essential connectivity solutions for the Internet of Things (IoT), addressing requirements for long range, energy efficient communication that traditional wireless technologies cannot meet. With LPWAN connections projected to grow at 26% compound annual growth rate until 2027, understanding real-world performance is crucial for technology selection. This review examines four leading LPWAN technologies&amp;amp;mdash;LoRaWAN, Sigfox, Narrowband IoT (NB-IoT), and LTE-M. This review analyzes 20 peer reviewed studies from 2015&amp;amp;ndash;2025 reporting real-world deployment metrics across power consumption, range, data rate, scalability, availability, and security. Across these studies, practical performance diverges from vendor specifications. In the cited rural and urban LoRaWAN deployments LoRaWAN achieves 2+ year battery life and 11 km rural range but suffers collision limitations above 1000 devices per gateway. Sigfox demonstrates exceptional range (280 km record) with minimal power consumption but remains constrained by 12 byte payloads and security vulnerabilities. NB-IoT provides robust performance with 96&amp;amp;ndash;100% packet delivery ratios at &amp;amp;minus;127 dBm on the tested commercial networks, and supports tens of thousands devices per cell, though mobility increases energy consumption. In the cited trials LTE-M offers highest throughput and sub 200 ms latency but fails beyond &amp;amp;minus;113 dBm where NB-IoT maintains connectivity. NB-IoT emerges optimal for large scale stationary deployments, while LTE-M suits high throughput mobile applications.</p>
	]]></content:encoded>

	<dc:title>LPWAN Technologies for IoT: Real-World Deployment Performance and Practical Comparison</dc:title>
			<dc:creator>Dmitrijs Orlovs</dc:creator>
			<dc:creator>Artis Rusins</dc:creator>
			<dc:creator>Valters Skrastiņš</dc:creator>
			<dc:creator>Janis Judvaitis</dc:creator>
		<dc:identifier>doi: 10.3390/iot6040077</dc:identifier>
	<dc:source>IoT</dc:source>
	<dc:date>2025-12-10</dc:date>

	<prism:publicationName>IoT</prism:publicationName>
	<prism:publicationDate>2025-12-10</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Review</prism:section>
	<prism:startingPage>77</prism:startingPage>
		<prism:doi>10.3390/iot6040077</prism:doi>
	<prism:url>https://www.mdpi.com/2624-831X/6/4/77</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-831X/6/4/76">

	<title>IoT, Vol. 6, Pages 76: Fog Computing and Graph-Based Databases for Remote Health Monitoring in IoMT Settings</title>
	<link>https://www.mdpi.com/2624-831X/6/4/76</link>
	<description>Remote patient monitoring is a promising and transformative pillar of healthcare. However, deploying such systems at a scale&amp;amp;mdash;across thousands of patients and Internet of Medical Things (IoMT) devices&amp;amp;mdash;demands robust, low-latency, and scalable storage systems. This research examines the application of Fog Computing for remote patient monitoring in IoMT settings, where a large volume of data, low latency, and secure management of confidential healthcare information are essential. We propose a four-layer IoMT&amp;amp;ndash;Fog&amp;amp;ndash;Cloud architecture in which Fog nodes, equipped with graph-based databases (Neo4j), conduct local processing, filtering, and integration of heterogeneous health data before transmitting it to cloud servers. To assess the viability of our approach, we implemented a containerised Fog node and simulated multiple patient-device networks using a real-world dataset. System performance was evaluated using 11 scenarios with varying numbers of devices and data transmission frequencies. Performance metrics include CPU load, memory footprint, and query latency. The results demonstrate that Neo4j can efficiently ingest and query millions of health observations with an acceptable latency of less than 500 ms, even in extreme scenarios involving more than 12,000 devices transmitting data every 50 ms. The resource consumption remained well below the critical thresholds, highlighting the suitability of the proposed approach for Fog nodes. Combining Fog computing and Neo4j is a novel approach that meets the latency and real-time data ingestion requirements of IoMT environments. Therefore, it is suitable for supporting delay-sensitive monitoring programmes, where rapid detection of anomalies is critical (e.g., a prompt response to cardiac emergencies or early detection of respiratory deterioration in patients with chronic obstructive pulmonary disease), even at a large scale.</description>
	<pubDate>2025-12-03</pubDate>

	<content:encoded><![CDATA[
	<p><b>IoT, Vol. 6, Pages 76: Fog Computing and Graph-Based Databases for Remote Health Monitoring in IoMT Settings</b></p>
	<p>IoT <a href="https://www.mdpi.com/2624-831X/6/4/76">doi: 10.3390/iot6040076</a></p>
	<p>Authors:
		Karrar A. Yousif
		Jorge Calvillo-Arbizu
		Agustín W. Lara-Romero
		</p>
	<p>Remote patient monitoring is a promising and transformative pillar of healthcare. However, deploying such systems at a scale&amp;amp;mdash;across thousands of patients and Internet of Medical Things (IoMT) devices&amp;amp;mdash;demands robust, low-latency, and scalable storage systems. This research examines the application of Fog Computing for remote patient monitoring in IoMT settings, where a large volume of data, low latency, and secure management of confidential healthcare information are essential. We propose a four-layer IoMT&amp;amp;ndash;Fog&amp;amp;ndash;Cloud architecture in which Fog nodes, equipped with graph-based databases (Neo4j), conduct local processing, filtering, and integration of heterogeneous health data before transmitting it to cloud servers. To assess the viability of our approach, we implemented a containerised Fog node and simulated multiple patient-device networks using a real-world dataset. System performance was evaluated using 11 scenarios with varying numbers of devices and data transmission frequencies. Performance metrics include CPU load, memory footprint, and query latency. The results demonstrate that Neo4j can efficiently ingest and query millions of health observations with an acceptable latency of less than 500 ms, even in extreme scenarios involving more than 12,000 devices transmitting data every 50 ms. The resource consumption remained well below the critical thresholds, highlighting the suitability of the proposed approach for Fog nodes. Combining Fog computing and Neo4j is a novel approach that meets the latency and real-time data ingestion requirements of IoMT environments. Therefore, it is suitable for supporting delay-sensitive monitoring programmes, where rapid detection of anomalies is critical (e.g., a prompt response to cardiac emergencies or early detection of respiratory deterioration in patients with chronic obstructive pulmonary disease), even at a large scale.</p>
	]]></content:encoded>

	<dc:title>Fog Computing and Graph-Based Databases for Remote Health Monitoring in IoMT Settings</dc:title>
			<dc:creator>Karrar A. Yousif</dc:creator>
			<dc:creator>Jorge Calvillo-Arbizu</dc:creator>
			<dc:creator>Agustín W. Lara-Romero</dc:creator>
		<dc:identifier>doi: 10.3390/iot6040076</dc:identifier>
	<dc:source>IoT</dc:source>
	<dc:date>2025-12-03</dc:date>

	<prism:publicationName>IoT</prism:publicationName>
	<prism:publicationDate>2025-12-03</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>76</prism:startingPage>
		<prism:doi>10.3390/iot6040076</prism:doi>
	<prism:url>https://www.mdpi.com/2624-831X/6/4/76</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-831X/6/4/75">

	<title>IoT, Vol. 6, Pages 75: IoT-Driven Destination Prediction in Smart Urban Mobility: A Comparative Study of Markov Chains and Hidden Markov Models</title>
	<link>https://www.mdpi.com/2624-831X/6/4/75</link>
	<description>The increasing availability of IoT-enabled mobility data and intelligent transportation systems in Smart Cities demands efficient and interpretable models for destination prediction. This study presents a comparative analysis between Markov Chains and Hidden Markov Models applied to urban mobility trajectories, evaluated through mean precision values. To ensure methodological rigor, the Smart Sampling with Data Filtering (SSDF) method was developed, integrating trajectory segmentation, spatial tessellation, frequency aggregation, and 10-fold cross-validation. Using data from 23 vehicles in the Vehicle Energy Dataset (VED) and a filtering threshold based on trajectory recurrence, the results show that the HMM achieved 61% precision versus 59% for Markov Chains (p = 0.0248). Incorporating day-of-week contextual information led to statistically significant precision improvements in 78.3% of cases for precision (95.7% for recall, 87.0% for F1-score). The remaining 21.7% indicate that model selection should balance model complexity and precision-efficiency trade-off. The proposed SSDF method establishes a replicable foundation for evaluating probabilistic models in IoT-based mobility systems, contributing to scalable, explainable, and sustainable Smart City transportation analytics.</description>
	<pubDate>2025-12-03</pubDate>

	<content:encoded><![CDATA[
	<p><b>IoT, Vol. 6, Pages 75: IoT-Driven Destination Prediction in Smart Urban Mobility: A Comparative Study of Markov Chains and Hidden Markov Models</b></p>
	<p>IoT <a href="https://www.mdpi.com/2624-831X/6/4/75">doi: 10.3390/iot6040075</a></p>
	<p>Authors:
		João Batista Firmino Junior
		Francisco Dantas Nobre Neto
		Bruno Neiva Moreno
		Tiago Brasileiro Araújo
		</p>
	<p>The increasing availability of IoT-enabled mobility data and intelligent transportation systems in Smart Cities demands efficient and interpretable models for destination prediction. This study presents a comparative analysis between Markov Chains and Hidden Markov Models applied to urban mobility trajectories, evaluated through mean precision values. To ensure methodological rigor, the Smart Sampling with Data Filtering (SSDF) method was developed, integrating trajectory segmentation, spatial tessellation, frequency aggregation, and 10-fold cross-validation. Using data from 23 vehicles in the Vehicle Energy Dataset (VED) and a filtering threshold based on trajectory recurrence, the results show that the HMM achieved 61% precision versus 59% for Markov Chains (p = 0.0248). Incorporating day-of-week contextual information led to statistically significant precision improvements in 78.3% of cases for precision (95.7% for recall, 87.0% for F1-score). The remaining 21.7% indicate that model selection should balance model complexity and precision-efficiency trade-off. The proposed SSDF method establishes a replicable foundation for evaluating probabilistic models in IoT-based mobility systems, contributing to scalable, explainable, and sustainable Smart City transportation analytics.</p>
	]]></content:encoded>

	<dc:title>IoT-Driven Destination Prediction in Smart Urban Mobility: A Comparative Study of Markov Chains and Hidden Markov Models</dc:title>
			<dc:creator>João Batista Firmino Junior</dc:creator>
			<dc:creator>Francisco Dantas Nobre Neto</dc:creator>
			<dc:creator>Bruno Neiva Moreno</dc:creator>
			<dc:creator>Tiago Brasileiro Araújo</dc:creator>
		<dc:identifier>doi: 10.3390/iot6040075</dc:identifier>
	<dc:source>IoT</dc:source>
	<dc:date>2025-12-03</dc:date>

	<prism:publicationName>IoT</prism:publicationName>
	<prism:publicationDate>2025-12-03</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>75</prism:startingPage>
		<prism:doi>10.3390/iot6040075</prism:doi>
	<prism:url>https://www.mdpi.com/2624-831X/6/4/75</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-831X/6/4/74">

	<title>IoT, Vol. 6, Pages 74: Optimizing IoMT Security: Performance Trade-Offs Between Neural Network Architectural Design, Dimensionality Reduction, and Class Imbalance Handling</title>
	<link>https://www.mdpi.com/2624-831X/6/4/74</link>
	<description>The proliferation of Internet of Medical Things (IoMT) devices in healthcare requires robust intrusion detection systems to protect sensitive data and ensure patient safety. While existing neural network-based Intrusion Detection Systems have shown considerable effectiveness, significant challenges persist&amp;amp;mdash;particularly class imbalance and high data dimensionality. Although various approaches have been proposed to mitigate these issues, their actual impact on detection accuracy remains insufficiently explored. This study investigates advanced Artificial Neural Network (ANN) architectures and preprocessing strategies for intrusion detection in IoMT environments, addressing critical challenges of feature dimensionality and class imbalance. Leveraging the WUSTL-EHMS-2020 dataset&amp;amp;mdash;a specialized dataset specifically designed for IoMT cybersecurity research&amp;amp;mdash;this research systematically examines the performance of multiple neural network designs. Our research implements and evaluates five distinct ANN architectures: the Standard Feedforward Network, the Enhanced Channel ANN, Dual-Branch Addition and Concatenation ANNs, and the Shortcut Connection ANN. To mitigate the class imbalance challenge, we compare three balancing approaches: the Synthetic Minority Over-sampling Technique (SMOTE), Hybrid Over-Under Sampling, and the Weighted Cross-Entropy Loss Function. Performance analysis reveals nuanced insights across different architectures and balancing strategies. SMOTE-based models achieved average AUC scores ranging from 0.8491 to 0.8766. Hybrid sampling strategies improved performance, with AUC increasing to 0.8750. The weighted cross-entropy loss function demonstrated the most consistent performance. The most significant finding emerges from the Dual-Branch ANN with addition operations and a weighted loss function, which achieved 0.9403 Accuracy, 0.8786 AUC, a 0.8716 F1-Score, 0.8650 Precision, and 0.8786 Recall. Compared to the related work&amp;amp;rsquo;s baseline, it demonstrates a substantial increase in F1 Score by 8.45% and an improvement of 18.67% in AUC and Recall, highlighting the model&amp;amp;rsquo;s superiority at identifying potential security threats and minimizing false negatives.</description>
	<pubDate>2025-11-29</pubDate>

	<content:encoded><![CDATA[
	<p><b>IoT, Vol. 6, Pages 74: Optimizing IoMT Security: Performance Trade-Offs Between Neural Network Architectural Design, Dimensionality Reduction, and Class Imbalance Handling</b></p>
	<p>IoT <a href="https://www.mdpi.com/2624-831X/6/4/74">doi: 10.3390/iot6040074</a></p>
	<p>Authors:
		Heyfa Ammar
		Asma Cherif
		</p>
	<p>The proliferation of Internet of Medical Things (IoMT) devices in healthcare requires robust intrusion detection systems to protect sensitive data and ensure patient safety. While existing neural network-based Intrusion Detection Systems have shown considerable effectiveness, significant challenges persist&amp;amp;mdash;particularly class imbalance and high data dimensionality. Although various approaches have been proposed to mitigate these issues, their actual impact on detection accuracy remains insufficiently explored. This study investigates advanced Artificial Neural Network (ANN) architectures and preprocessing strategies for intrusion detection in IoMT environments, addressing critical challenges of feature dimensionality and class imbalance. Leveraging the WUSTL-EHMS-2020 dataset&amp;amp;mdash;a specialized dataset specifically designed for IoMT cybersecurity research&amp;amp;mdash;this research systematically examines the performance of multiple neural network designs. Our research implements and evaluates five distinct ANN architectures: the Standard Feedforward Network, the Enhanced Channel ANN, Dual-Branch Addition and Concatenation ANNs, and the Shortcut Connection ANN. To mitigate the class imbalance challenge, we compare three balancing approaches: the Synthetic Minority Over-sampling Technique (SMOTE), Hybrid Over-Under Sampling, and the Weighted Cross-Entropy Loss Function. Performance analysis reveals nuanced insights across different architectures and balancing strategies. SMOTE-based models achieved average AUC scores ranging from 0.8491 to 0.8766. Hybrid sampling strategies improved performance, with AUC increasing to 0.8750. The weighted cross-entropy loss function demonstrated the most consistent performance. The most significant finding emerges from the Dual-Branch ANN with addition operations and a weighted loss function, which achieved 0.9403 Accuracy, 0.8786 AUC, a 0.8716 F1-Score, 0.8650 Precision, and 0.8786 Recall. Compared to the related work&amp;amp;rsquo;s baseline, it demonstrates a substantial increase in F1 Score by 8.45% and an improvement of 18.67% in AUC and Recall, highlighting the model&amp;amp;rsquo;s superiority at identifying potential security threats and minimizing false negatives.</p>
	]]></content:encoded>

	<dc:title>Optimizing IoMT Security: Performance Trade-Offs Between Neural Network Architectural Design, Dimensionality Reduction, and Class Imbalance Handling</dc:title>
			<dc:creator>Heyfa Ammar</dc:creator>
			<dc:creator>Asma Cherif</dc:creator>
		<dc:identifier>doi: 10.3390/iot6040074</dc:identifier>
	<dc:source>IoT</dc:source>
	<dc:date>2025-11-29</dc:date>

	<prism:publicationName>IoT</prism:publicationName>
	<prism:publicationDate>2025-11-29</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>74</prism:startingPage>
		<prism:doi>10.3390/iot6040074</prism:doi>
	<prism:url>https://www.mdpi.com/2624-831X/6/4/74</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-831X/6/4/73">

	<title>IoT, Vol. 6, Pages 73: An IoT-Enabled Digital Twin Architecture with Feature-Optimized Transformer-Based Triage Classifier on a Cloud Platform</title>
	<link>https://www.mdpi.com/2624-831X/6/4/73</link>
	<description>It is essential to assign the correct triage level to patients as soon as they arrive in the emergency department in order to save lives, especially during peak demand. However, many healthcare systems estimate the triage levels by manual eyes-on evaluation, which can be inconsistent and time consuming. This study creates a full Digital Twin-based architecture for patient monitoring and automated triage level recommendation using IoT sensors, AI, and cloud-based services. The system can monitor all patients&amp;amp;rsquo; vital signs through embedded sensors. The readings are used to update the Digital Twin instances that represent the present condition of the patients. This data is then used for triage prediction using a pretrained model that can predict the patients&amp;amp;rsquo; triage levels. The training of the model utilized the synthetic minority over-sampling technique, combined with Tomek links to lessen the degree of data imbalance. Additionally, Lagrange element optimization was applied to select those features of the most informative nature. The final triage level is predicted using the Tabular Prior-Data Fitted Network, a transformer-based model tailored for tabular data classification. This combination achieved an overall accuracy of 87.27%. The proposed system demonstrates the potential of integrating digital twins and AI to improve decision support in emergency healthcare environments.</description>
	<pubDate>2025-11-26</pubDate>

	<content:encoded><![CDATA[
	<p><b>IoT, Vol. 6, Pages 73: An IoT-Enabled Digital Twin Architecture with Feature-Optimized Transformer-Based Triage Classifier on a Cloud Platform</b></p>
	<p>IoT <a href="https://www.mdpi.com/2624-831X/6/4/73">doi: 10.3390/iot6040073</a></p>
	<p>Authors:
		Haider Q. Mutashar
		Hiba A. Abu-Alsaad
		Sawsan M. Mahmoud
		</p>
	<p>It is essential to assign the correct triage level to patients as soon as they arrive in the emergency department in order to save lives, especially during peak demand. However, many healthcare systems estimate the triage levels by manual eyes-on evaluation, which can be inconsistent and time consuming. This study creates a full Digital Twin-based architecture for patient monitoring and automated triage level recommendation using IoT sensors, AI, and cloud-based services. The system can monitor all patients&amp;amp;rsquo; vital signs through embedded sensors. The readings are used to update the Digital Twin instances that represent the present condition of the patients. This data is then used for triage prediction using a pretrained model that can predict the patients&amp;amp;rsquo; triage levels. The training of the model utilized the synthetic minority over-sampling technique, combined with Tomek links to lessen the degree of data imbalance. Additionally, Lagrange element optimization was applied to select those features of the most informative nature. The final triage level is predicted using the Tabular Prior-Data Fitted Network, a transformer-based model tailored for tabular data classification. This combination achieved an overall accuracy of 87.27%. The proposed system demonstrates the potential of integrating digital twins and AI to improve decision support in emergency healthcare environments.</p>
	]]></content:encoded>

	<dc:title>An IoT-Enabled Digital Twin Architecture with Feature-Optimized Transformer-Based Triage Classifier on a Cloud Platform</dc:title>
			<dc:creator>Haider Q. Mutashar</dc:creator>
			<dc:creator>Hiba A. Abu-Alsaad</dc:creator>
			<dc:creator>Sawsan M. Mahmoud</dc:creator>
		<dc:identifier>doi: 10.3390/iot6040073</dc:identifier>
	<dc:source>IoT</dc:source>
	<dc:date>2025-11-26</dc:date>

	<prism:publicationName>IoT</prism:publicationName>
	<prism:publicationDate>2025-11-26</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>73</prism:startingPage>
		<prism:doi>10.3390/iot6040073</prism:doi>
	<prism:url>https://www.mdpi.com/2624-831X/6/4/73</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-831X/6/4/72">

	<title>IoT, Vol. 6, Pages 72: Multi-Flow Complex Event Optimization in the Edge: A Smart Street Scenario</title>
	<link>https://www.mdpi.com/2624-831X/6/4/72</link>
	<description>Internet of Things (IoT) devices can be used to provide safety, security, and other services that ensure that smart systems work as intended. However, the increasing complexity of the tasks is increasing the potential of performance loss when limited resources are not utilized appropriately. Distributed complex event processing (CEP) applications can be used to execute multiple unique tasks on sensor data. Since these operations can require a variety of data from multiple sensors across separate task steps, non-optimal code and data management can lead to increased processing delays. In this study, a mathematical model for optimizing critical path performance across multiple independent CEP flows is proposed. The model optimally assigns both where codes are executed at, as well as where their respective data should be placed at. The proposed solution is implemented within an open source library with the inclusion of operator placement heuristics from the literature. Approaches are tested within a realistic smart-street scenario. Consumer delays, algorithm runtimes, and delivery ratios within different time windows are reported. The results indicate that the proposed approach can reduce the delivery times for the critical CEP paths better than the heuristic solutions, with the downside of increased optimization runtimes.</description>
	<pubDate>2025-11-21</pubDate>

	<content:encoded><![CDATA[
	<p><b>IoT, Vol. 6, Pages 72: Multi-Flow Complex Event Optimization in the Edge: A Smart Street Scenario</b></p>
	<p>IoT <a href="https://www.mdpi.com/2624-831X/6/4/72">doi: 10.3390/iot6040072</a></p>
	<p>Authors:
		Halit Uyanık
		Tolga Ovatman
		</p>
	<p>Internet of Things (IoT) devices can be used to provide safety, security, and other services that ensure that smart systems work as intended. However, the increasing complexity of the tasks is increasing the potential of performance loss when limited resources are not utilized appropriately. Distributed complex event processing (CEP) applications can be used to execute multiple unique tasks on sensor data. Since these operations can require a variety of data from multiple sensors across separate task steps, non-optimal code and data management can lead to increased processing delays. In this study, a mathematical model for optimizing critical path performance across multiple independent CEP flows is proposed. The model optimally assigns both where codes are executed at, as well as where their respective data should be placed at. The proposed solution is implemented within an open source library with the inclusion of operator placement heuristics from the literature. Approaches are tested within a realistic smart-street scenario. Consumer delays, algorithm runtimes, and delivery ratios within different time windows are reported. The results indicate that the proposed approach can reduce the delivery times for the critical CEP paths better than the heuristic solutions, with the downside of increased optimization runtimes.</p>
	]]></content:encoded>

	<dc:title>Multi-Flow Complex Event Optimization in the Edge: A Smart Street Scenario</dc:title>
			<dc:creator>Halit Uyanık</dc:creator>
			<dc:creator>Tolga Ovatman</dc:creator>
		<dc:identifier>doi: 10.3390/iot6040072</dc:identifier>
	<dc:source>IoT</dc:source>
	<dc:date>2025-11-21</dc:date>

	<prism:publicationName>IoT</prism:publicationName>
	<prism:publicationDate>2025-11-21</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>72</prism:startingPage>
		<prism:doi>10.3390/iot6040072</prism:doi>
	<prism:url>https://www.mdpi.com/2624-831X/6/4/72</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-831X/6/4/71">

	<title>IoT, Vol. 6, Pages 71: A Machine Learning Approach to Detect Denial of Sleep Attacks in Internet of Things (IoT)</title>
	<link>https://www.mdpi.com/2624-831X/6/4/71</link>
	<description>The Internet of Things (IoT) has rapidly evolved into a central component of today&amp;amp;rsquo;s technological landscape, enabling seamless connectivity and communication among a vast array of devices. It underpins automation, real-time monitoring, and smart infrastructure, serving as a foundation for Industry 4.0 and paving the way toward Industry 5.0. Despite the potential of IoT systems to transform industries, these systems face a number of challenges, most notably the lack of processing power, storage space, and battery life. Whereas cloud and fog computing help to relieve computational and storage constraints, energy limitations remain a severe impediment to long-term autonomous operation. Among the threats that exploit this weakness, the Denial-of-Sleep (DoSl) attack is particularly problematic because it prevents nodes from entering low-power states, leading to battery depletion and degraded network performance. This research investigates machine-learning (ML) and deep-learning (DL) methods for identifying such energy-wasting behaviors to protect IoT energy resources. A dataset was generated in a simulated IoT environment under multiple DoSl attack conditions to validate the proposed approach. Several ML and DL models were trained and tested on this data to discover distinctive power-consumption patterns related to the attacks. The experimental results confirm that the proposed models can effectively detect anomalous behaviors associated with DoSl activity, demonstrating their potential for energy-aware threat detection in IoT networks. Specifically, the Random Forest and Decision Tree classifiers achieved accuracies of 98.57% and 97.86%, respectively, on the held-out 25% test set, while the Long Short-Term Memory (LSTM) model reached 97.92% accuracy under a chronological split, confirming effective temporal generalization. All evaluations were conducted in a simulated environment, and the paper also outlines potential pathways for future physical testbed deployment.</description>
	<pubDate>2025-11-20</pubDate>

	<content:encoded><![CDATA[
	<p><b>IoT, Vol. 6, Pages 71: A Machine Learning Approach to Detect Denial of Sleep Attacks in Internet of Things (IoT)</b></p>
	<p>IoT <a href="https://www.mdpi.com/2624-831X/6/4/71">doi: 10.3390/iot6040071</a></p>
	<p>Authors:
		Ishara Dissanayake
		Anuradhi Welhenge
		Hesiri Dhammika Weerasinghe
		</p>
	<p>The Internet of Things (IoT) has rapidly evolved into a central component of today&amp;amp;rsquo;s technological landscape, enabling seamless connectivity and communication among a vast array of devices. It underpins automation, real-time monitoring, and smart infrastructure, serving as a foundation for Industry 4.0 and paving the way toward Industry 5.0. Despite the potential of IoT systems to transform industries, these systems face a number of challenges, most notably the lack of processing power, storage space, and battery life. Whereas cloud and fog computing help to relieve computational and storage constraints, energy limitations remain a severe impediment to long-term autonomous operation. Among the threats that exploit this weakness, the Denial-of-Sleep (DoSl) attack is particularly problematic because it prevents nodes from entering low-power states, leading to battery depletion and degraded network performance. This research investigates machine-learning (ML) and deep-learning (DL) methods for identifying such energy-wasting behaviors to protect IoT energy resources. A dataset was generated in a simulated IoT environment under multiple DoSl attack conditions to validate the proposed approach. Several ML and DL models were trained and tested on this data to discover distinctive power-consumption patterns related to the attacks. The experimental results confirm that the proposed models can effectively detect anomalous behaviors associated with DoSl activity, demonstrating their potential for energy-aware threat detection in IoT networks. Specifically, the Random Forest and Decision Tree classifiers achieved accuracies of 98.57% and 97.86%, respectively, on the held-out 25% test set, while the Long Short-Term Memory (LSTM) model reached 97.92% accuracy under a chronological split, confirming effective temporal generalization. All evaluations were conducted in a simulated environment, and the paper also outlines potential pathways for future physical testbed deployment.</p>
	]]></content:encoded>

	<dc:title>A Machine Learning Approach to Detect Denial of Sleep Attacks in Internet of Things (IoT)</dc:title>
			<dc:creator>Ishara Dissanayake</dc:creator>
			<dc:creator>Anuradhi Welhenge</dc:creator>
			<dc:creator>Hesiri Dhammika Weerasinghe</dc:creator>
		<dc:identifier>doi: 10.3390/iot6040071</dc:identifier>
	<dc:source>IoT</dc:source>
	<dc:date>2025-11-20</dc:date>

	<prism:publicationName>IoT</prism:publicationName>
	<prism:publicationDate>2025-11-20</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>71</prism:startingPage>
		<prism:doi>10.3390/iot6040071</prism:doi>
	<prism:url>https://www.mdpi.com/2624-831X/6/4/71</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-831X/6/4/70">

	<title>IoT, Vol. 6, Pages 70: Lightweight Quantized XGBoost for Botnet Detection in Resource-Constrained IoT Networks</title>
	<link>https://www.mdpi.com/2624-831X/6/4/70</link>
	<description>The rapid expansion of IoT devices has introduced significant security challenges, with malware authors constantly evolving their techniques to exploit vulnerabilities in IoT networks. Despite this growing threat, progress in developing effective detection solutions remains limited. In this study, we present an ML-based framework for detecting and classifying network threats targeting IoT environments. Using the CTU-IoT-Malware-Capture 2023 dataset and the UNSW Bot-IoT dataset, we transformed the task into a structured multi-class classification problem to better reflect real-world detection challenges. Our primary contribution lies in demonstrating the effectiveness of post-training quantization on gradient-boosted models, specifically a Quantized XGB variant enhanced with histogram-based quantization. This approach significantly reduces model size and inference time without sacrificing accuracy. The proposed model achieved high classification accuracies of 99.93% and 99.99% on the two datasets, while the quantization step led to 1.42&amp;amp;times; and 3&amp;amp;times; improvements in inference speed, and reductions in model size by 3.61&amp;amp;times; and 2.71&amp;amp;times;, respectively, making it well-suited for deployment in resource-constrained IoT settings. This work demonstrates not only the effectiveness of gradient boosting in handling complex traffic data but also introduces an efficient optimization strategy for real-time IoT threat detection.</description>
	<pubDate>2025-11-18</pubDate>

	<content:encoded><![CDATA[
	<p><b>IoT, Vol. 6, Pages 70: Lightweight Quantized XGBoost for Botnet Detection in Resource-Constrained IoT Networks</b></p>
	<p>IoT <a href="https://www.mdpi.com/2624-831X/6/4/70">doi: 10.3390/iot6040070</a></p>
	<p>Authors:
		Mohammed Rauf Ali Khan
		Abdulaziz Y. Barnawi
		Adnan Munir
		Zainab Alsalman
		Dario Marcelo Satan Sanunga
		</p>
	<p>The rapid expansion of IoT devices has introduced significant security challenges, with malware authors constantly evolving their techniques to exploit vulnerabilities in IoT networks. Despite this growing threat, progress in developing effective detection solutions remains limited. In this study, we present an ML-based framework for detecting and classifying network threats targeting IoT environments. Using the CTU-IoT-Malware-Capture 2023 dataset and the UNSW Bot-IoT dataset, we transformed the task into a structured multi-class classification problem to better reflect real-world detection challenges. Our primary contribution lies in demonstrating the effectiveness of post-training quantization on gradient-boosted models, specifically a Quantized XGB variant enhanced with histogram-based quantization. This approach significantly reduces model size and inference time without sacrificing accuracy. The proposed model achieved high classification accuracies of 99.93% and 99.99% on the two datasets, while the quantization step led to 1.42&amp;amp;times; and 3&amp;amp;times; improvements in inference speed, and reductions in model size by 3.61&amp;amp;times; and 2.71&amp;amp;times;, respectively, making it well-suited for deployment in resource-constrained IoT settings. This work demonstrates not only the effectiveness of gradient boosting in handling complex traffic data but also introduces an efficient optimization strategy for real-time IoT threat detection.</p>
	]]></content:encoded>

	<dc:title>Lightweight Quantized XGBoost for Botnet Detection in Resource-Constrained IoT Networks</dc:title>
			<dc:creator>Mohammed Rauf Ali Khan</dc:creator>
			<dc:creator>Abdulaziz Y. Barnawi</dc:creator>
			<dc:creator>Adnan Munir</dc:creator>
			<dc:creator>Zainab Alsalman</dc:creator>
			<dc:creator>Dario Marcelo Satan Sanunga</dc:creator>
		<dc:identifier>doi: 10.3390/iot6040070</dc:identifier>
	<dc:source>IoT</dc:source>
	<dc:date>2025-11-18</dc:date>

	<prism:publicationName>IoT</prism:publicationName>
	<prism:publicationDate>2025-11-18</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>70</prism:startingPage>
		<prism:doi>10.3390/iot6040070</prism:doi>
	<prism:url>https://www.mdpi.com/2624-831X/6/4/70</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-831X/6/4/69">

	<title>IoT, Vol. 6, Pages 69: Modular IoT Architecture for Monitoring and Control of Office Environments Based on Home Assistant</title>
	<link>https://www.mdpi.com/2624-831X/6/4/69</link>
	<description>Cloud-centric IoT frameworks remain dominant; however, they introduce major challenges related to data privacy, latency, and system resilience. Existing open-source solutions often lack standardized principles for scalable, local-first deployment and do not adequately integrate fault tolerance with hybrid automation logic. This study presents a practical and extensible local-first IoT architecture designed for full operational autonomy using open-source components. The proposed system features a modular, layered design that includes device, communication, data, management, service, security, and presentation layers. It integrates MQTT, Zigbee, REST, and WebSocket protocols to enable reliable publish&amp;amp;ndash;subscribe and request&amp;amp;ndash;response communication among heterogeneous devices. A hybrid automation model combines rule-based logic with lightweight data-driven routines for context-aware decision-making. The implementation uses Proxmox-based virtualization with Home Assistant as the core automation engine and operates entirely offline, ensuring privacy and continuity without cloud dependency. The architecture was deployed in a real-world office environment and evaluated under workload and fault-injection scenarios. Results demonstrate stable operation with MQTT throughput exceeding 360,000 messages without packet loss, automatic recovery from simulated failures within three minutes, and energy savings of approximately 28% compared to baseline manual control. Compared to established frameworks such as FIWARE and IoT-A, the proposed approach achieves enhanced modularity, local autonomy, and hybrid control capabilities, offering a reproducible model for privacy-sensitive smart environments.</description>
	<pubDate>2025-11-17</pubDate>

	<content:encoded><![CDATA[
	<p><b>IoT, Vol. 6, Pages 69: Modular IoT Architecture for Monitoring and Control of Office Environments Based on Home Assistant</b></p>
	<p>IoT <a href="https://www.mdpi.com/2624-831X/6/4/69">doi: 10.3390/iot6040069</a></p>
	<p>Authors:
		Yevheniy Khomenko
		Sergii Babichev
		</p>
	<p>Cloud-centric IoT frameworks remain dominant; however, they introduce major challenges related to data privacy, latency, and system resilience. Existing open-source solutions often lack standardized principles for scalable, local-first deployment and do not adequately integrate fault tolerance with hybrid automation logic. This study presents a practical and extensible local-first IoT architecture designed for full operational autonomy using open-source components. The proposed system features a modular, layered design that includes device, communication, data, management, service, security, and presentation layers. It integrates MQTT, Zigbee, REST, and WebSocket protocols to enable reliable publish&amp;amp;ndash;subscribe and request&amp;amp;ndash;response communication among heterogeneous devices. A hybrid automation model combines rule-based logic with lightweight data-driven routines for context-aware decision-making. The implementation uses Proxmox-based virtualization with Home Assistant as the core automation engine and operates entirely offline, ensuring privacy and continuity without cloud dependency. The architecture was deployed in a real-world office environment and evaluated under workload and fault-injection scenarios. Results demonstrate stable operation with MQTT throughput exceeding 360,000 messages without packet loss, automatic recovery from simulated failures within three minutes, and energy savings of approximately 28% compared to baseline manual control. Compared to established frameworks such as FIWARE and IoT-A, the proposed approach achieves enhanced modularity, local autonomy, and hybrid control capabilities, offering a reproducible model for privacy-sensitive smart environments.</p>
	]]></content:encoded>

	<dc:title>Modular IoT Architecture for Monitoring and Control of Office Environments Based on Home Assistant</dc:title>
			<dc:creator>Yevheniy Khomenko</dc:creator>
			<dc:creator>Sergii Babichev</dc:creator>
		<dc:identifier>doi: 10.3390/iot6040069</dc:identifier>
	<dc:source>IoT</dc:source>
	<dc:date>2025-11-17</dc:date>

	<prism:publicationName>IoT</prism:publicationName>
	<prism:publicationDate>2025-11-17</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>69</prism:startingPage>
		<prism:doi>10.3390/iot6040069</prism:doi>
	<prism:url>https://www.mdpi.com/2624-831X/6/4/69</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-831X/6/4/68">

	<title>IoT, Vol. 6, Pages 68: WireDepth: IoT-Enabled Multi-Sensor Depth Monitoring for Precision Subsoiling in Sugarcane</title>
	<link>https://www.mdpi.com/2624-831X/6/4/68</link>
	<description>Subsoil compaction is a major constraint in sugarcane production, limiting yields and reducing resource-use efficiency. This study presents WireDepth, an innovative cloud-connected monitoring system that leverages edge computing and IoT technologies for real-time, spatially aware analysis and visualization of subsoiling depth. The system integrates ultrasonic, laser, inclinometer, and potentiometer sensors mounted on the subsoiler, with on-board microcontroller processing and dual wireless connectivity (LoRaWAN and NB-IoT/LTE-M) for robust data transmission. A cloud platform delivers advanced analytics, including 3D depth maps and operational efficiency metrics. System accuracy was assessed using 300 reference depth measurements, with Root Mean Square Error (RMSE) and Mean Absolute Percentage Error (MAPE) calculated per sensor. The inclinometer and potentiometer achieved the highest accuracy (MAPE of 0.92% and 0.84%, respectively), with no significant deviation from field measurements (paired t-tests, p &amp;amp;gt; 0.05). Ultrasonic and laser sensors exhibited higher errors, particularly at shallow depths, due to soil debris interference. Correlation analysis confirmed a significant effect of depth on sensor accuracy, with laser sensors showing the strongest association (Pearson r = 0.457, p &amp;amp;lt; 0.001). Field validation in commercial sugarcane fields demonstrated that WireDepth improves subsoiling precision, reduces energy waste, and supports sustainable production by enhancing soil structure and root development. These findings advance precision agriculture by offering a scalable, real-time solution for subsoiling management, with broad implications for yield improvement in compaction-affected systems.</description>
	<pubDate>2025-11-14</pubDate>

	<content:encoded><![CDATA[
	<p><b>IoT, Vol. 6, Pages 68: WireDepth: IoT-Enabled Multi-Sensor Depth Monitoring for Precision Subsoiling in Sugarcane</b></p>
	<p>IoT <a href="https://www.mdpi.com/2624-831X/6/4/68">doi: 10.3390/iot6040068</a></p>
	<p>Authors:
		Saman Abdanan Mehdizadeh
		Aghajan Bahadori
		Manocheher Ebadian
		Mohammad Hasan Sadeghian
		Mansour Nasr Esfahani
		Yiannis Ampatzidis
		</p>
	<p>Subsoil compaction is a major constraint in sugarcane production, limiting yields and reducing resource-use efficiency. This study presents WireDepth, an innovative cloud-connected monitoring system that leverages edge computing and IoT technologies for real-time, spatially aware analysis and visualization of subsoiling depth. The system integrates ultrasonic, laser, inclinometer, and potentiometer sensors mounted on the subsoiler, with on-board microcontroller processing and dual wireless connectivity (LoRaWAN and NB-IoT/LTE-M) for robust data transmission. A cloud platform delivers advanced analytics, including 3D depth maps and operational efficiency metrics. System accuracy was assessed using 300 reference depth measurements, with Root Mean Square Error (RMSE) and Mean Absolute Percentage Error (MAPE) calculated per sensor. The inclinometer and potentiometer achieved the highest accuracy (MAPE of 0.92% and 0.84%, respectively), with no significant deviation from field measurements (paired t-tests, p &amp;amp;gt; 0.05). Ultrasonic and laser sensors exhibited higher errors, particularly at shallow depths, due to soil debris interference. Correlation analysis confirmed a significant effect of depth on sensor accuracy, with laser sensors showing the strongest association (Pearson r = 0.457, p &amp;amp;lt; 0.001). Field validation in commercial sugarcane fields demonstrated that WireDepth improves subsoiling precision, reduces energy waste, and supports sustainable production by enhancing soil structure and root development. These findings advance precision agriculture by offering a scalable, real-time solution for subsoiling management, with broad implications for yield improvement in compaction-affected systems.</p>
	]]></content:encoded>

	<dc:title>WireDepth: IoT-Enabled Multi-Sensor Depth Monitoring for Precision Subsoiling in Sugarcane</dc:title>
			<dc:creator>Saman Abdanan Mehdizadeh</dc:creator>
			<dc:creator>Aghajan Bahadori</dc:creator>
			<dc:creator>Manocheher Ebadian</dc:creator>
			<dc:creator>Mohammad Hasan Sadeghian</dc:creator>
			<dc:creator>Mansour Nasr Esfahani</dc:creator>
			<dc:creator>Yiannis Ampatzidis</dc:creator>
		<dc:identifier>doi: 10.3390/iot6040068</dc:identifier>
	<dc:source>IoT</dc:source>
	<dc:date>2025-11-14</dc:date>

	<prism:publicationName>IoT</prism:publicationName>
	<prism:publicationDate>2025-11-14</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>68</prism:startingPage>
		<prism:doi>10.3390/iot6040068</prism:doi>
	<prism:url>https://www.mdpi.com/2624-831X/6/4/68</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-831X/6/4/67">

	<title>IoT, Vol. 6, Pages 67: Centralized Two-Tiered Tree-Based Intrusion-Detection System (C2T-IDS)</title>
	<link>https://www.mdpi.com/2624-831X/6/4/67</link>
	<description>The exponential growth of Internet of Things (IoT) devices introduces significant security challenges due to their resource constraints and diverse attack surfaces. To address these issues, this paper proposes the Centralized Two-Tiered Tree-Based Intrusion Detection System (C2T-IDS), a lightweight framework designed for efficient and scalable threat detection in IoT networks. The system employs a hybrid edge-centralized architecture, where the first tier, deployed on edge gateways, performs real-time binary classification to detect anomalous traffic using optimized tree-based models. The second tier, hosted on a centralized server, conducts detailed multi-class classification to diagnose specific attack types using advanced ensemble methods. Evaluated on the realistic CIC-IoT-2023 dataset, C2T-IDS achieves a Macro F1-Score of up to 0.94 in detection and 0.80 in diagnosis, outperforming direct multi-class classification by 5&amp;amp;ndash;15%. With inference times as low as 6 milliseconds on edge devices, the framework demonstrates a practical balance between accuracy, efficiency, and deployability, offering a robust solution for securing resource-constrained IoT environments.</description>
	<pubDate>2025-11-05</pubDate>

	<content:encoded><![CDATA[
	<p><b>IoT, Vol. 6, Pages 67: Centralized Two-Tiered Tree-Based Intrusion-Detection System (C2T-IDS)</b></p>
	<p>IoT <a href="https://www.mdpi.com/2624-831X/6/4/67">doi: 10.3390/iot6040067</a></p>
	<p>Authors:
		Hisham Abdul Karim Yassine
		Mohammed El Saleh
		Bilal Ezzeddine Nakhal
		Abdallah El Chakik
		</p>
	<p>The exponential growth of Internet of Things (IoT) devices introduces significant security challenges due to their resource constraints and diverse attack surfaces. To address these issues, this paper proposes the Centralized Two-Tiered Tree-Based Intrusion Detection System (C2T-IDS), a lightweight framework designed for efficient and scalable threat detection in IoT networks. The system employs a hybrid edge-centralized architecture, where the first tier, deployed on edge gateways, performs real-time binary classification to detect anomalous traffic using optimized tree-based models. The second tier, hosted on a centralized server, conducts detailed multi-class classification to diagnose specific attack types using advanced ensemble methods. Evaluated on the realistic CIC-IoT-2023 dataset, C2T-IDS achieves a Macro F1-Score of up to 0.94 in detection and 0.80 in diagnosis, outperforming direct multi-class classification by 5&amp;amp;ndash;15%. With inference times as low as 6 milliseconds on edge devices, the framework demonstrates a practical balance between accuracy, efficiency, and deployability, offering a robust solution for securing resource-constrained IoT environments.</p>
	]]></content:encoded>

	<dc:title>Centralized Two-Tiered Tree-Based Intrusion-Detection System (C2T-IDS)</dc:title>
			<dc:creator>Hisham Abdul Karim Yassine</dc:creator>
			<dc:creator>Mohammed El Saleh</dc:creator>
			<dc:creator>Bilal Ezzeddine Nakhal</dc:creator>
			<dc:creator>Abdallah El Chakik</dc:creator>
		<dc:identifier>doi: 10.3390/iot6040067</dc:identifier>
	<dc:source>IoT</dc:source>
	<dc:date>2025-11-05</dc:date>

	<prism:publicationName>IoT</prism:publicationName>
	<prism:publicationDate>2025-11-05</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>67</prism:startingPage>
		<prism:doi>10.3390/iot6040067</prism:doi>
	<prism:url>https://www.mdpi.com/2624-831X/6/4/67</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-831X/6/4/66">

	<title>IoT, Vol. 6, Pages 66: A Systematic Review for Ammonia Monitoring Systems Based on the Internet of Things</title>
	<link>https://www.mdpi.com/2624-831X/6/4/66</link>
	<description>Ammonia is a gas primarily produced for use in agriculture, refrigeration systems, chemical manufacturing, and power generation. Despite its benefits, improper management of ammonia poses significant risks to human health and the environment. Consequently, monitoring ammonia is essential for enhancing industrial safety and preventing leaks that can lead to environmental contamination. Given the abundance and diversity of studies on Internet of Things (IoT) systems for gas detection, the main objective of this paper is to systematically review the literature to identify emerging research trends and opportunities. This review follows the Preferred Reporting Items for Systematic Reviews and Meta-Analyses (PRISMA) methodology, focusing on sensor technologies, microcontrollers, communication technologies, IoT platforms, and applications. The main findings indicate that most studies employed sensors from the MQ family (particularly the MQ-135 and MQ-137), microcontrollers based on the Xtensa architecture (ESP32 and ESP8266) and ARM Cortex-A processors (Raspberry Pi 3B+/4), with Wi-Fi as the predominant communication technology, and Blynk and ThingSpeak as the primary cloud-based IoT platforms. The most frequent applications were agriculture and environmental monitoring. These findings highlight the growing maturity of IoT technologies in ammonia sensing, while also addressing challenges like sensor reliability, energy efficiency, and development of integrated solutions with Artificial Intelligence.</description>
	<pubDate>2025-10-30</pubDate>

	<content:encoded><![CDATA[
	<p><b>IoT, Vol. 6, Pages 66: A Systematic Review for Ammonia Monitoring Systems Based on the Internet of Things</b></p>
	<p>IoT <a href="https://www.mdpi.com/2624-831X/6/4/66">doi: 10.3390/iot6040066</a></p>
	<p>Authors:
		Adriel Henrique Monte Claro da Silva
		Mikaelle Karoline da Silva
		Augusto Santos
		Luis Arturo Gómez-Malagón
		</p>
	<p>Ammonia is a gas primarily produced for use in agriculture, refrigeration systems, chemical manufacturing, and power generation. Despite its benefits, improper management of ammonia poses significant risks to human health and the environment. Consequently, monitoring ammonia is essential for enhancing industrial safety and preventing leaks that can lead to environmental contamination. Given the abundance and diversity of studies on Internet of Things (IoT) systems for gas detection, the main objective of this paper is to systematically review the literature to identify emerging research trends and opportunities. This review follows the Preferred Reporting Items for Systematic Reviews and Meta-Analyses (PRISMA) methodology, focusing on sensor technologies, microcontrollers, communication technologies, IoT platforms, and applications. The main findings indicate that most studies employed sensors from the MQ family (particularly the MQ-135 and MQ-137), microcontrollers based on the Xtensa architecture (ESP32 and ESP8266) and ARM Cortex-A processors (Raspberry Pi 3B+/4), with Wi-Fi as the predominant communication technology, and Blynk and ThingSpeak as the primary cloud-based IoT platforms. The most frequent applications were agriculture and environmental monitoring. These findings highlight the growing maturity of IoT technologies in ammonia sensing, while also addressing challenges like sensor reliability, energy efficiency, and development of integrated solutions with Artificial Intelligence.</p>
	]]></content:encoded>

	<dc:title>A Systematic Review for Ammonia Monitoring Systems Based on the Internet of Things</dc:title>
			<dc:creator>Adriel Henrique Monte Claro da Silva</dc:creator>
			<dc:creator>Mikaelle Karoline da Silva</dc:creator>
			<dc:creator>Augusto Santos</dc:creator>
			<dc:creator>Luis Arturo Gómez-Malagón</dc:creator>
		<dc:identifier>doi: 10.3390/iot6040066</dc:identifier>
	<dc:source>IoT</dc:source>
	<dc:date>2025-10-30</dc:date>

	<prism:publicationName>IoT</prism:publicationName>
	<prism:publicationDate>2025-10-30</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Review</prism:section>
	<prism:startingPage>66</prism:startingPage>
		<prism:doi>10.3390/iot6040066</prism:doi>
	<prism:url>https://www.mdpi.com/2624-831X/6/4/66</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-831X/6/4/65">

	<title>IoT, Vol. 6, Pages 65: Blockchain for Secure IoT: A Review of Identity Management, Access Control, and Trust Mechanisms</title>
	<link>https://www.mdpi.com/2624-831X/6/4/65</link>
	<description>Blockchain technologies offer transformative potential in terms of addressing the security, trust, and identity management issues that exist in large-scale Internet of Things (IoT) deployments. This narrative review provides a comprehensive survey of various studies, focusing on decentralized identity management, trust mechanisms, smart contracts, privacy preservation, and real-world IoT applications. According to the literature, blockchain-based solutions provide robust authentication through mechanisms such as Physical Unclonable Functions (PUFs), enhance transparency via smart contract-enabled reputation systems, and significantly mitigate vulnerabilities, including single points of failure and Sybil attacks. Smart contracts enable secure interactions by automating resource allocation, access control, and verification. Cryptographic tools, including zero-knowledge proofs (ZKPs), proxy re-encryption, and Merkle trees, further improve data privacy and device integrity. Despite these advantages, challenges persist in areas such as scalability, regulatory and compliance issues, privacy and security concerns, resource constraints, and interoperability. By reviewing the current state-of-the-art literature, this review emphasizes the importance of establishing standardized protocols, performance benchmarks, and robust regulatory frameworks to achieve scalable and secure blockchain-integrated IoT solutions, and provides emerging trends and future research directions for the integration of blockchain technology into the IoT ecosystem.</description>
	<pubDate>2025-10-28</pubDate>

	<content:encoded><![CDATA[
	<p><b>IoT, Vol. 6, Pages 65: Blockchain for Secure IoT: A Review of Identity Management, Access Control, and Trust Mechanisms</b></p>
	<p>IoT <a href="https://www.mdpi.com/2624-831X/6/4/65">doi: 10.3390/iot6040065</a></p>
	<p>Authors:
		Behnam Khayer
		Siamak Mirzaei
		Hooman Alavizadeh
		Ahmad Salehi Shahraki
		</p>
	<p>Blockchain technologies offer transformative potential in terms of addressing the security, trust, and identity management issues that exist in large-scale Internet of Things (IoT) deployments. This narrative review provides a comprehensive survey of various studies, focusing on decentralized identity management, trust mechanisms, smart contracts, privacy preservation, and real-world IoT applications. According to the literature, blockchain-based solutions provide robust authentication through mechanisms such as Physical Unclonable Functions (PUFs), enhance transparency via smart contract-enabled reputation systems, and significantly mitigate vulnerabilities, including single points of failure and Sybil attacks. Smart contracts enable secure interactions by automating resource allocation, access control, and verification. Cryptographic tools, including zero-knowledge proofs (ZKPs), proxy re-encryption, and Merkle trees, further improve data privacy and device integrity. Despite these advantages, challenges persist in areas such as scalability, regulatory and compliance issues, privacy and security concerns, resource constraints, and interoperability. By reviewing the current state-of-the-art literature, this review emphasizes the importance of establishing standardized protocols, performance benchmarks, and robust regulatory frameworks to achieve scalable and secure blockchain-integrated IoT solutions, and provides emerging trends and future research directions for the integration of blockchain technology into the IoT ecosystem.</p>
	]]></content:encoded>

	<dc:title>Blockchain for Secure IoT: A Review of Identity Management, Access Control, and Trust Mechanisms</dc:title>
			<dc:creator>Behnam Khayer</dc:creator>
			<dc:creator>Siamak Mirzaei</dc:creator>
			<dc:creator>Hooman Alavizadeh</dc:creator>
			<dc:creator>Ahmad Salehi Shahraki</dc:creator>
		<dc:identifier>doi: 10.3390/iot6040065</dc:identifier>
	<dc:source>IoT</dc:source>
	<dc:date>2025-10-28</dc:date>

	<prism:publicationName>IoT</prism:publicationName>
	<prism:publicationDate>2025-10-28</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>65</prism:startingPage>
		<prism:doi>10.3390/iot6040065</prism:doi>
	<prism:url>https://www.mdpi.com/2624-831X/6/4/65</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-831X/6/4/64">

	<title>IoT, Vol. 6, Pages 64: Compost Monitoring System for Kitchen Waste Management: Development, Deployment and Analysis</title>
	<link>https://www.mdpi.com/2624-831X/6/4/64</link>
	<description>Composting can be perceived as an art and science of converting organic waste into a rich and nutritious soil amendment&amp;amp;mdash;compost. The existing literature talks about how and what parameters need to be monitored in the process of composting and what actions are to be taken to optimize the process. In this paper, the development, deployment and data analytics of a compost monitoring system are presented, wherein not only the parameters to be measured but also the topology, mechanical design and battery operation details, which are crucial for the deployment of the system, are considered. Having realized that the temperature plays an important role in the process of composting, a contactless method of monitoring the compost temperature, using thermal imaging, has been investigated. Results showing the screenshots of the successfully developed system, plots of the obtained data and the inferences drawn from them are presented. This work not only contributes to the composting data, which is scarce, but also brings out the advantages of using thermal images in addition to temperature sensor probes.</description>
	<pubDate>2025-10-27</pubDate>

	<content:encoded><![CDATA[
	<p><b>IoT, Vol. 6, Pages 64: Compost Monitoring System for Kitchen Waste Management: Development, Deployment and Analysis</b></p>
	<p>IoT <a href="https://www.mdpi.com/2624-831X/6/4/64">doi: 10.3390/iot6040064</a></p>
	<p>Authors:
		Sasirekha Gurla Venkata Kameswari
		Arun Basavaraju
		Chandrashekhar Siva Kumar
		Jyotsna Bapat
		</p>
	<p>Composting can be perceived as an art and science of converting organic waste into a rich and nutritious soil amendment&amp;amp;mdash;compost. The existing literature talks about how and what parameters need to be monitored in the process of composting and what actions are to be taken to optimize the process. In this paper, the development, deployment and data analytics of a compost monitoring system are presented, wherein not only the parameters to be measured but also the topology, mechanical design and battery operation details, which are crucial for the deployment of the system, are considered. Having realized that the temperature plays an important role in the process of composting, a contactless method of monitoring the compost temperature, using thermal imaging, has been investigated. Results showing the screenshots of the successfully developed system, plots of the obtained data and the inferences drawn from them are presented. This work not only contributes to the composting data, which is scarce, but also brings out the advantages of using thermal images in addition to temperature sensor probes.</p>
	]]></content:encoded>

	<dc:title>Compost Monitoring System for Kitchen Waste Management: Development, Deployment and Analysis</dc:title>
			<dc:creator>Sasirekha Gurla Venkata Kameswari</dc:creator>
			<dc:creator>Arun Basavaraju</dc:creator>
			<dc:creator>Chandrashekhar Siva Kumar</dc:creator>
			<dc:creator>Jyotsna Bapat</dc:creator>
		<dc:identifier>doi: 10.3390/iot6040064</dc:identifier>
	<dc:source>IoT</dc:source>
	<dc:date>2025-10-27</dc:date>

	<prism:publicationName>IoT</prism:publicationName>
	<prism:publicationDate>2025-10-27</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>64</prism:startingPage>
		<prism:doi>10.3390/iot6040064</prism:doi>
	<prism:url>https://www.mdpi.com/2624-831X/6/4/64</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-831X/6/4/63">

	<title>IoT, Vol. 6, Pages 63: Case-Based Data Quality Management for IoT Logs: A Case Study Focusing on Detection of Data Quality Issues</title>
	<link>https://www.mdpi.com/2624-831X/6/4/63</link>
	<description>Smart manufacturing applications increasingly rely on time-series data from Industrial IoT sensors, yet these data streams often contain data quality issues (DQIs) that affect analysis and disrupt production. While traditional Machine Learning methods are difficult to apply due to the small amount of data available, the knowledge-based approach of Case-Based Reasoning (CBR) offers a way to reuse previously gained experience. We introduce the first end-to-end Case-Based Reasoning (CBR) framework that both detects and remedies DQIs in near real time, even when only a handful of annotated fault instances are available. Our solution encodes expert experience in the four CBR knowledge containers: (i) a vocabulary that represents sensor streams and their context in the DataStream format; (ii) a case base populated with fault-annotated event logs; (iii) tailored similarity measures&amp;amp;mdash;including a weighted Dynamic Time Warping variant and structure-aware list mapping&amp;amp;mdash;that isolate the signatures of missing-value, missing-sensor, and time-shift errors; and (iv) lightweight adaptation rules that recommend concrete repair actions or, where appropriate, invoke automated imputation and alignment routines. A case study is used to examine and present the suitability of the approach for a specific application domain. Although the case study demonstrates only limited capabilities in identifying Data Quality Issues (DQIs), we aim to support transparent evaluation and future research by publishing (1) a prototype of the Case-Based Reasoning (CBR) system and (2) a publicly accessible, meticulously annotated sensor-log benchmark. Together, these resources provide a reproducible baseline and a modular foundation for advancing similarity metrics, expanding the DQI taxonomy, and enabling knowledge-intensive reasoning in IoT data quality management.</description>
	<pubDate>2025-10-23</pubDate>

	<content:encoded><![CDATA[
	<p><b>IoT, Vol. 6, Pages 63: Case-Based Data Quality Management for IoT Logs: A Case Study Focusing on Detection of Data Quality Issues</b></p>
	<p>IoT <a href="https://www.mdpi.com/2624-831X/6/4/63">doi: 10.3390/iot6040063</a></p>
	<p>Authors:
		Alexander Schultheis
		Yannis Bertrand
		Joscha Grüger
		Lukas Malburg
		Ralph Bergmann
		Estefanía Serral Asensio
		</p>
	<p>Smart manufacturing applications increasingly rely on time-series data from Industrial IoT sensors, yet these data streams often contain data quality issues (DQIs) that affect analysis and disrupt production. While traditional Machine Learning methods are difficult to apply due to the small amount of data available, the knowledge-based approach of Case-Based Reasoning (CBR) offers a way to reuse previously gained experience. We introduce the first end-to-end Case-Based Reasoning (CBR) framework that both detects and remedies DQIs in near real time, even when only a handful of annotated fault instances are available. Our solution encodes expert experience in the four CBR knowledge containers: (i) a vocabulary that represents sensor streams and their context in the DataStream format; (ii) a case base populated with fault-annotated event logs; (iii) tailored similarity measures&amp;amp;mdash;including a weighted Dynamic Time Warping variant and structure-aware list mapping&amp;amp;mdash;that isolate the signatures of missing-value, missing-sensor, and time-shift errors; and (iv) lightweight adaptation rules that recommend concrete repair actions or, where appropriate, invoke automated imputation and alignment routines. A case study is used to examine and present the suitability of the approach for a specific application domain. Although the case study demonstrates only limited capabilities in identifying Data Quality Issues (DQIs), we aim to support transparent evaluation and future research by publishing (1) a prototype of the Case-Based Reasoning (CBR) system and (2) a publicly accessible, meticulously annotated sensor-log benchmark. Together, these resources provide a reproducible baseline and a modular foundation for advancing similarity metrics, expanding the DQI taxonomy, and enabling knowledge-intensive reasoning in IoT data quality management.</p>
	]]></content:encoded>

	<dc:title>Case-Based Data Quality Management for IoT Logs: A Case Study Focusing on Detection of Data Quality Issues</dc:title>
			<dc:creator>Alexander Schultheis</dc:creator>
			<dc:creator>Yannis Bertrand</dc:creator>
			<dc:creator>Joscha Grüger</dc:creator>
			<dc:creator>Lukas Malburg</dc:creator>
			<dc:creator>Ralph Bergmann</dc:creator>
			<dc:creator>Estefanía Serral Asensio</dc:creator>
		<dc:identifier>doi: 10.3390/iot6040063</dc:identifier>
	<dc:source>IoT</dc:source>
	<dc:date>2025-10-23</dc:date>

	<prism:publicationName>IoT</prism:publicationName>
	<prism:publicationDate>2025-10-23</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>63</prism:startingPage>
		<prism:doi>10.3390/iot6040063</prism:doi>
	<prism:url>https://www.mdpi.com/2624-831X/6/4/63</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-831X/6/4/62">

	<title>IoT, Vol. 6, Pages 62: Toward Scalable and Sustainable Detection Systems: A Behavioural Taxonomy and Utility-Based Framework for Security Detection in IoT and IIoT</title>
	<link>https://www.mdpi.com/2624-831X/6/4/62</link>
	<description>Resource-constrained IoT and IIoT systems require detection architectures that balance accuracy with energy efficiency, scalability, and contextual awareness. This paper presents a conceptual framework informed by a systematic review of energy-aware detection systems (XDS), unifying intrusion and anomaly detection systems (IDS and ADS) within a single framework. The proposed taxonomy captures six key dimensions: energy-awareness, adaptivity, modularity, offloading support, domain scope, and attack coverage. Applying this framework to the recent literature reveals recurring limitations, including static architectures, limited runtime coordination, and narrow evaluation settings. To address these challenges, we introduce a utility-based decision model for multi-layer task placement, guided by operational metrics such as energy cost, latency, and detection complexity. Unlike review-only studies, this work contributes both a synthesis of current limitations and the design of a novel six-dimensional taxonomy and utility-based layered architecture. The study concludes with future directions that support the development of adaptable, sustainable, and context-aware XDS architectures for heterogeneous environments.</description>
	<pubDate>2025-10-21</pubDate>

	<content:encoded><![CDATA[
	<p><b>IoT, Vol. 6, Pages 62: Toward Scalable and Sustainable Detection Systems: A Behavioural Taxonomy and Utility-Based Framework for Security Detection in IoT and IIoT</b></p>
	<p>IoT <a href="https://www.mdpi.com/2624-831X/6/4/62">doi: 10.3390/iot6040062</a></p>
	<p>Authors:
		Ali Jaddoa
		Hasanein Alharbi
		Abbas Hommadi
		Hussein A. Ismael
		</p>
	<p>Resource-constrained IoT and IIoT systems require detection architectures that balance accuracy with energy efficiency, scalability, and contextual awareness. This paper presents a conceptual framework informed by a systematic review of energy-aware detection systems (XDS), unifying intrusion and anomaly detection systems (IDS and ADS) within a single framework. The proposed taxonomy captures six key dimensions: energy-awareness, adaptivity, modularity, offloading support, domain scope, and attack coverage. Applying this framework to the recent literature reveals recurring limitations, including static architectures, limited runtime coordination, and narrow evaluation settings. To address these challenges, we introduce a utility-based decision model for multi-layer task placement, guided by operational metrics such as energy cost, latency, and detection complexity. Unlike review-only studies, this work contributes both a synthesis of current limitations and the design of a novel six-dimensional taxonomy and utility-based layered architecture. The study concludes with future directions that support the development of adaptable, sustainable, and context-aware XDS architectures for heterogeneous environments.</p>
	]]></content:encoded>

	<dc:title>Toward Scalable and Sustainable Detection Systems: A Behavioural Taxonomy and Utility-Based Framework for Security Detection in IoT and IIoT</dc:title>
			<dc:creator>Ali Jaddoa</dc:creator>
			<dc:creator>Hasanein Alharbi</dc:creator>
			<dc:creator>Abbas Hommadi</dc:creator>
			<dc:creator>Hussein A. Ismael</dc:creator>
		<dc:identifier>doi: 10.3390/iot6040062</dc:identifier>
	<dc:source>IoT</dc:source>
	<dc:date>2025-10-21</dc:date>

	<prism:publicationName>IoT</prism:publicationName>
	<prism:publicationDate>2025-10-21</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>62</prism:startingPage>
		<prism:doi>10.3390/iot6040062</prism:doi>
	<prism:url>https://www.mdpi.com/2624-831X/6/4/62</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-831X/6/4/61">

	<title>IoT, Vol. 6, Pages 61: A Comprehensive Review of Cybersecurity Threats to Wireless Infocommunications in the Quantum-Age Cryptography</title>
	<link>https://www.mdpi.com/2624-831X/6/4/61</link>
	<description>The dynamic growth in the dependence of numerous industrial sectors, businesses, and critical infrastructure on infocommunication technologies necessitates the enhancement of their resilience to cyberattacks and radio-frequency threats. This article addresses a relevant scientific and applied issue, which is to formulate prospective directions for improving the effectiveness of cybersecurity approaches for infocommunication networks through a comparative analysis and logical synthesis of the state-of-the-art of applied research on cyber threats to the information security of mobile and satellite networks, including those related to the rapid development of quantum computing technologies. The article presents results on the systematisation of cyberattacks at the physical, signalling and cryptographic levels, as well as threats to cryptographic protocols and authentication systems. Particular attention is given to the prospects for implementing post-quantum cryptography, hybrid cryptographic models and the integration of threat detection mechanisms based on machine learning and artificial intelligence algorithms. The article proposes a classification of current threats according to architectural levels, analyses typical protocol vulnerabilities in next-generation mobile networks and satellite communications, and identifies key research gaps in existing cybersecurity approaches. Based on a critical analysis of scientific and applied literature, this article identifies key areas for future research. These include developing lightweight cryptographic algorithms, standardising post-quantum cryptographic models, creating adaptive cybersecurity frameworks and optimising protection mechanisms for resource-constrained devices within information and digital networks.</description>
	<pubDate>2025-10-16</pubDate>

	<content:encoded><![CDATA[
	<p><b>IoT, Vol. 6, Pages 61: A Comprehensive Review of Cybersecurity Threats to Wireless Infocommunications in the Quantum-Age Cryptography</b></p>
	<p>IoT <a href="https://www.mdpi.com/2624-831X/6/4/61">doi: 10.3390/iot6040061</a></p>
	<p>Authors:
		Ivan Laktionov
		Grygorii Diachenko
		Dmytro Moroz
		Iryna Getman
		</p>
	<p>The dynamic growth in the dependence of numerous industrial sectors, businesses, and critical infrastructure on infocommunication technologies necessitates the enhancement of their resilience to cyberattacks and radio-frequency threats. This article addresses a relevant scientific and applied issue, which is to formulate prospective directions for improving the effectiveness of cybersecurity approaches for infocommunication networks through a comparative analysis and logical synthesis of the state-of-the-art of applied research on cyber threats to the information security of mobile and satellite networks, including those related to the rapid development of quantum computing technologies. The article presents results on the systematisation of cyberattacks at the physical, signalling and cryptographic levels, as well as threats to cryptographic protocols and authentication systems. Particular attention is given to the prospects for implementing post-quantum cryptography, hybrid cryptographic models and the integration of threat detection mechanisms based on machine learning and artificial intelligence algorithms. The article proposes a classification of current threats according to architectural levels, analyses typical protocol vulnerabilities in next-generation mobile networks and satellite communications, and identifies key research gaps in existing cybersecurity approaches. Based on a critical analysis of scientific and applied literature, this article identifies key areas for future research. These include developing lightweight cryptographic algorithms, standardising post-quantum cryptographic models, creating adaptive cybersecurity frameworks and optimising protection mechanisms for resource-constrained devices within information and digital networks.</p>
	]]></content:encoded>

	<dc:title>A Comprehensive Review of Cybersecurity Threats to Wireless Infocommunications in the Quantum-Age Cryptography</dc:title>
			<dc:creator>Ivan Laktionov</dc:creator>
			<dc:creator>Grygorii Diachenko</dc:creator>
			<dc:creator>Dmytro Moroz</dc:creator>
			<dc:creator>Iryna Getman</dc:creator>
		<dc:identifier>doi: 10.3390/iot6040061</dc:identifier>
	<dc:source>IoT</dc:source>
	<dc:date>2025-10-16</dc:date>

	<prism:publicationName>IoT</prism:publicationName>
	<prism:publicationDate>2025-10-16</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Review</prism:section>
	<prism:startingPage>61</prism:startingPage>
		<prism:doi>10.3390/iot6040061</prism:doi>
	<prism:url>https://www.mdpi.com/2624-831X/6/4/61</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-831X/6/4/60">

	<title>IoT, Vol. 6, Pages 60: BiLSTM-Based Fault Anticipation for Predictive Activation of FRER in Time-Sensitive Industrial Networks</title>
	<link>https://www.mdpi.com/2624-831X/6/4/60</link>
	<description>Frame Replication and Elimination for Reliability (FRER) in Time-Sensitive Networking (TSN) enhances fault tolerance by duplicating critical traffic across disjoint paths. However, always-on FRER configurations introduce persistent redundancy overhead, even under nominal network conditions. This paper proposes a predictive FRER activation framework that anticipates faults using a Key Performance Indicator (KPI)-driven bidirectional Long Short-Term Memory (BiLSTM) model. By continuously analyzing multivariate KPIs&amp;amp;mdash;such as latency, jitter, and retransmission rates&amp;amp;mdash;the model forecasts potential faults and proactively activates FRER. Redundancy is deactivated upon KPI recovery or after a defined minimum protection window, thereby reducing bandwidth usage without compromising reliability. The framework includes a Python-based simulation environment, a real-time visualization dashboard built with Streamlit, and a fully integrated runtime controller. The experimental results demonstrate substantial improvements in link utilization while preserving fault protection, highlighting the effectiveness of anticipatory redundancy strategies in industrial TSN environments.</description>
	<pubDate>2025-10-02</pubDate>

	<content:encoded><![CDATA[
	<p><b>IoT, Vol. 6, Pages 60: BiLSTM-Based Fault Anticipation for Predictive Activation of FRER in Time-Sensitive Industrial Networks</b></p>
	<p>IoT <a href="https://www.mdpi.com/2624-831X/6/4/60">doi: 10.3390/iot6040060</a></p>
	<p>Authors:
		Mohamed Seliem
		Utz Roedig
		Cormac Sreenan
		Dirk Pesch
		</p>
	<p>Frame Replication and Elimination for Reliability (FRER) in Time-Sensitive Networking (TSN) enhances fault tolerance by duplicating critical traffic across disjoint paths. However, always-on FRER configurations introduce persistent redundancy overhead, even under nominal network conditions. This paper proposes a predictive FRER activation framework that anticipates faults using a Key Performance Indicator (KPI)-driven bidirectional Long Short-Term Memory (BiLSTM) model. By continuously analyzing multivariate KPIs&amp;amp;mdash;such as latency, jitter, and retransmission rates&amp;amp;mdash;the model forecasts potential faults and proactively activates FRER. Redundancy is deactivated upon KPI recovery or after a defined minimum protection window, thereby reducing bandwidth usage without compromising reliability. The framework includes a Python-based simulation environment, a real-time visualization dashboard built with Streamlit, and a fully integrated runtime controller. The experimental results demonstrate substantial improvements in link utilization while preserving fault protection, highlighting the effectiveness of anticipatory redundancy strategies in industrial TSN environments.</p>
	]]></content:encoded>

	<dc:title>BiLSTM-Based Fault Anticipation for Predictive Activation of FRER in Time-Sensitive Industrial Networks</dc:title>
			<dc:creator>Mohamed Seliem</dc:creator>
			<dc:creator>Utz Roedig</dc:creator>
			<dc:creator>Cormac Sreenan</dc:creator>
			<dc:creator>Dirk Pesch</dc:creator>
		<dc:identifier>doi: 10.3390/iot6040060</dc:identifier>
	<dc:source>IoT</dc:source>
	<dc:date>2025-10-02</dc:date>

	<prism:publicationName>IoT</prism:publicationName>
	<prism:publicationDate>2025-10-02</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>60</prism:startingPage>
		<prism:doi>10.3390/iot6040060</prism:doi>
	<prism:url>https://www.mdpi.com/2624-831X/6/4/60</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-831X/6/4/59">

	<title>IoT, Vol. 6, Pages 59: Transmit Power Optimization for Intelligent Reflecting Surface-Assisted Coal Mine Wireless Communication Systems</title>
	<link>https://www.mdpi.com/2624-831X/6/4/59</link>
	<description>The adverse propagation environment in underground coal mine tunnels caused by enclosed spaces, rough surfaces, and dense scatterers severely degrades reliable wireless signal transmission, which further impedes the deployment of IoT applications such as gas monitors and personnel positioning terminals. However, the conventional power enhancement solutions are infeasible for the underground coal mine scenario due to strict explosion-proof safety regulations and battery-powered IoT devices. To address this challenge, we propose singular value decomposition-based Lagrangian optimization (SVD-LOP) to minimize transmit power at the mining base station (MBS) for IRS-assisted coal mine wireless communication systems. In particular, we first establish a three-dimensional twin cluster geometry-based stochastic model (3D-TCGBSM) to accurately characterize the underground coal mine channel. On this basis, we formulate the MBS transmit power minimization problem constrained by user signal-to-noise ratio (SNR) target and IRS phase shifts. To solve this non-convex problem, we propose the SVD-LOP algorithm that performs SVD on the channel matrix to decouple the complex channel coupling and introduces the Lagrange multipliers. Furthermore, we develop a low-complexity successive convex approximation (LC-SCA) algorithm to reduce computational complexity, which constructs a convex approximation of the objective function based on a first-order Taylor expansion and enables suboptimal solutions. Simulation results demonstrate that the proposed SVD-LOP and LC-SCA algorithms achieve transmit power peaks of 20.8dBm and 21.4dBm, respectively, which are slightly lower than the 21.8dBm observed for the SDR algorithm. It is evident that these algorithms remain well below the explosion-proof safety threshold, which achieves significant power reduction. However, computational complexity analysis reveals that the proposed SVD-LOP and LC-SCA algorithms achieve O(N3) and O(N2) respectively, which offers substantial reductions compared to the SDR algorithm&amp;amp;rsquo;s O(N7). Moreover, both proposed algorithms exhibit robust convergence across varying user SNR targets while maintaining stable performance gains under different tunnel roughness scenarios.</description>
	<pubDate>2025-09-25</pubDate>

	<content:encoded><![CDATA[
	<p><b>IoT, Vol. 6, Pages 59: Transmit Power Optimization for Intelligent Reflecting Surface-Assisted Coal Mine Wireless Communication Systems</b></p>
	<p>IoT <a href="https://www.mdpi.com/2624-831X/6/4/59">doi: 10.3390/iot6040059</a></p>
	<p>Authors:
		Yang Liu
		Xiaoyue Li
		Bin Wang
		Yanhong Xu
		</p>
	<p>The adverse propagation environment in underground coal mine tunnels caused by enclosed spaces, rough surfaces, and dense scatterers severely degrades reliable wireless signal transmission, which further impedes the deployment of IoT applications such as gas monitors and personnel positioning terminals. However, the conventional power enhancement solutions are infeasible for the underground coal mine scenario due to strict explosion-proof safety regulations and battery-powered IoT devices. To address this challenge, we propose singular value decomposition-based Lagrangian optimization (SVD-LOP) to minimize transmit power at the mining base station (MBS) for IRS-assisted coal mine wireless communication systems. In particular, we first establish a three-dimensional twin cluster geometry-based stochastic model (3D-TCGBSM) to accurately characterize the underground coal mine channel. On this basis, we formulate the MBS transmit power minimization problem constrained by user signal-to-noise ratio (SNR) target and IRS phase shifts. To solve this non-convex problem, we propose the SVD-LOP algorithm that performs SVD on the channel matrix to decouple the complex channel coupling and introduces the Lagrange multipliers. Furthermore, we develop a low-complexity successive convex approximation (LC-SCA) algorithm to reduce computational complexity, which constructs a convex approximation of the objective function based on a first-order Taylor expansion and enables suboptimal solutions. Simulation results demonstrate that the proposed SVD-LOP and LC-SCA algorithms achieve transmit power peaks of 20.8dBm and 21.4dBm, respectively, which are slightly lower than the 21.8dBm observed for the SDR algorithm. It is evident that these algorithms remain well below the explosion-proof safety threshold, which achieves significant power reduction. However, computational complexity analysis reveals that the proposed SVD-LOP and LC-SCA algorithms achieve O(N3) and O(N2) respectively, which offers substantial reductions compared to the SDR algorithm&amp;amp;rsquo;s O(N7). Moreover, both proposed algorithms exhibit robust convergence across varying user SNR targets while maintaining stable performance gains under different tunnel roughness scenarios.</p>
	]]></content:encoded>

	<dc:title>Transmit Power Optimization for Intelligent Reflecting Surface-Assisted Coal Mine Wireless Communication Systems</dc:title>
			<dc:creator>Yang Liu</dc:creator>
			<dc:creator>Xiaoyue Li</dc:creator>
			<dc:creator>Bin Wang</dc:creator>
			<dc:creator>Yanhong Xu</dc:creator>
		<dc:identifier>doi: 10.3390/iot6040059</dc:identifier>
	<dc:source>IoT</dc:source>
	<dc:date>2025-09-25</dc:date>

	<prism:publicationName>IoT</prism:publicationName>
	<prism:publicationDate>2025-09-25</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>59</prism:startingPage>
		<prism:doi>10.3390/iot6040059</prism:doi>
	<prism:url>https://www.mdpi.com/2624-831X/6/4/59</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-831X/6/4/58">

	<title>IoT, Vol. 6, Pages 58: Acoustic Trap Design for Biodiversity Detection</title>
	<link>https://www.mdpi.com/2624-831X/6/4/58</link>
	<description>Real-time insect monitoring is essential for sustainable agriculture and biodiversity conservation. The traditional method of attracting insects to colored glue traps and manually counting the catch is time-intensive and requires specialized taxonomic expertise. Moreover, these traps are often lethal to pests and beneficial insects alike, raising both ecological and ethical concerns. Camera-based trap designs have recently emerged to lower the amount of manual labor involved in determining insect species, yet they are still deadly to the catch. This study presents the design and evaluation of a non-lethal acoustic monitoring system capable of detecting and classifying insect species based on their sound signatures. A first prototype was developed with a focus on low self-noise and suitability for autonomous field deployment. The system was initially validated through laboratory experiments, and subsequently tested in six rapeseed fields over a 25-day period. More than 3400 h of acoustic data were successfully collected without system failures. Key findings highlight the importance of carefully selecting each component to minimize self-noise, as insect sounds are extremely low in amplitude. The results also underscore the need for efficient data and energy management strategies in long-term field deployments. This paper aims to share the development process, design decisions, technical challenges, and practical lessons learned over the course of building our IoT sensor system. By outlining what worked, what did not, and what should be improved, this work contributes to the advancement of non-invasive insect monitoring technologies.</description>
	<pubDate>2025-09-24</pubDate>

	<content:encoded><![CDATA[
	<p><b>IoT, Vol. 6, Pages 58: Acoustic Trap Design for Biodiversity Detection</b></p>
	<p>IoT <a href="https://www.mdpi.com/2624-831X/6/4/58">doi: 10.3390/iot6040058</a></p>
	<p>Authors:
		Chingiz Seyidbayli
		Bárbara Fengler
		Daniel Szafranski
		Andreas Reinhardt
		</p>
	<p>Real-time insect monitoring is essential for sustainable agriculture and biodiversity conservation. The traditional method of attracting insects to colored glue traps and manually counting the catch is time-intensive and requires specialized taxonomic expertise. Moreover, these traps are often lethal to pests and beneficial insects alike, raising both ecological and ethical concerns. Camera-based trap designs have recently emerged to lower the amount of manual labor involved in determining insect species, yet they are still deadly to the catch. This study presents the design and evaluation of a non-lethal acoustic monitoring system capable of detecting and classifying insect species based on their sound signatures. A first prototype was developed with a focus on low self-noise and suitability for autonomous field deployment. The system was initially validated through laboratory experiments, and subsequently tested in six rapeseed fields over a 25-day period. More than 3400 h of acoustic data were successfully collected without system failures. Key findings highlight the importance of carefully selecting each component to minimize self-noise, as insect sounds are extremely low in amplitude. The results also underscore the need for efficient data and energy management strategies in long-term field deployments. This paper aims to share the development process, design decisions, technical challenges, and practical lessons learned over the course of building our IoT sensor system. By outlining what worked, what did not, and what should be improved, this work contributes to the advancement of non-invasive insect monitoring technologies.</p>
	]]></content:encoded>

	<dc:title>Acoustic Trap Design for Biodiversity Detection</dc:title>
			<dc:creator>Chingiz Seyidbayli</dc:creator>
			<dc:creator>Bárbara Fengler</dc:creator>
			<dc:creator>Daniel Szafranski</dc:creator>
			<dc:creator>Andreas Reinhardt</dc:creator>
		<dc:identifier>doi: 10.3390/iot6040058</dc:identifier>
	<dc:source>IoT</dc:source>
	<dc:date>2025-09-24</dc:date>

	<prism:publicationName>IoT</prism:publicationName>
	<prism:publicationDate>2025-09-24</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>58</prism:startingPage>
		<prism:doi>10.3390/iot6040058</prism:doi>
	<prism:url>https://www.mdpi.com/2624-831X/6/4/58</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-831X/6/4/57">

	<title>IoT, Vol. 6, Pages 57: UniTwin: Enabling Multi-Digital Twin Coordination for Modeling Distributed and Complex Systems</title>
	<link>https://www.mdpi.com/2624-831X/6/4/57</link>
	<description>The growing complexity and scale of Cyber&amp;amp;ndash;Physical Systems (CPSs) have led to an increasing need for the holistic orchestration of multiple Digital Twins (DTs). Therefore, an extension to the UniTwin framework is introduced within this paper. UniTwin is a containerized, cloud-native DT framework. This extension enables the hierarchical aggregation of DTs across various abstraction levels. Traditional DT frameworks often lack mechanisms for dynamic composition at the level of entire systems. This is essential for modeling distributed systems in heterogeneous environments. UniTwin addresses this gap by grouping DTs into composite entities with an aggregation mechanism. The aggregation mechanism is demonstrated in a smart manufacturing case study, which covers the orchestration of a production line for personalized shopping cart chips. It uses modular DTs provided for each device within the production line. A System-Aggregated Digital Twin (S-ADT) is used to orchestrate the individual DTs, mapping the devices in the production line. Therefore, the production line adapts and reconfigures according to user-defined parameters. This validates the flexibility and practicality of the aggregation mechanism. This work contributes an aggregation mechanism for the UniTwin framework, paving the way for adaptable DTs for complex CPSs in domains like smart manufacturing, logistics, and infrastructure.</description>
	<pubDate>2025-09-23</pubDate>

	<content:encoded><![CDATA[
	<p><b>IoT, Vol. 6, Pages 57: UniTwin: Enabling Multi-Digital Twin Coordination for Modeling Distributed and Complex Systems</b></p>
	<p>IoT <a href="https://www.mdpi.com/2624-831X/6/4/57">doi: 10.3390/iot6040057</a></p>
	<p>Authors:
		Tim Markus Häußermann
		Joel Lehmann
		Florian Kolb
		Alessa Rache
		Julian Reichwald
		</p>
	<p>The growing complexity and scale of Cyber&amp;amp;ndash;Physical Systems (CPSs) have led to an increasing need for the holistic orchestration of multiple Digital Twins (DTs). Therefore, an extension to the UniTwin framework is introduced within this paper. UniTwin is a containerized, cloud-native DT framework. This extension enables the hierarchical aggregation of DTs across various abstraction levels. Traditional DT frameworks often lack mechanisms for dynamic composition at the level of entire systems. This is essential for modeling distributed systems in heterogeneous environments. UniTwin addresses this gap by grouping DTs into composite entities with an aggregation mechanism. The aggregation mechanism is demonstrated in a smart manufacturing case study, which covers the orchestration of a production line for personalized shopping cart chips. It uses modular DTs provided for each device within the production line. A System-Aggregated Digital Twin (S-ADT) is used to orchestrate the individual DTs, mapping the devices in the production line. Therefore, the production line adapts and reconfigures according to user-defined parameters. This validates the flexibility and practicality of the aggregation mechanism. This work contributes an aggregation mechanism for the UniTwin framework, paving the way for adaptable DTs for complex CPSs in domains like smart manufacturing, logistics, and infrastructure.</p>
	]]></content:encoded>

	<dc:title>UniTwin: Enabling Multi-Digital Twin Coordination for Modeling Distributed and Complex Systems</dc:title>
			<dc:creator>Tim Markus Häußermann</dc:creator>
			<dc:creator>Joel Lehmann</dc:creator>
			<dc:creator>Florian Kolb</dc:creator>
			<dc:creator>Alessa Rache</dc:creator>
			<dc:creator>Julian Reichwald</dc:creator>
		<dc:identifier>doi: 10.3390/iot6040057</dc:identifier>
	<dc:source>IoT</dc:source>
	<dc:date>2025-09-23</dc:date>

	<prism:publicationName>IoT</prism:publicationName>
	<prism:publicationDate>2025-09-23</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>57</prism:startingPage>
		<prism:doi>10.3390/iot6040057</prism:doi>
	<prism:url>https://www.mdpi.com/2624-831X/6/4/57</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-831X/6/3/56">

	<title>IoT, Vol. 6, Pages 56: AI-Driven Attack Detection and Cryptographic Privacy Protection for Cyber-Resilient Industrial Control Systems</title>
	<link>https://www.mdpi.com/2624-831X/6/3/56</link>
	<description>Industrial control systems (ICS) are increasingly vulnerable to evolving cyber threats due to the convergence of operational and information technologies. This research presents a robust cybersecurity framework that integrates machine learning-based anomaly detection with advanced cryptographic techniques to protect ICS communication networks. Using the ICS-Flow dataset, we evaluate several ensemble models, with XGBoost achieving 99.92% accuracy in binary classification and Decision Tree attaining 99.81% accuracy in multi-class classification. Additionally, we implement an LSTM autoencoder for temporal anomaly detection and employ the ADWIN technique for real-time drift detection. To ensure data security, we apply AES-CBC with HMAC and AES-GCM with RSA encryption, which demonstrates resilience against brute-force, tampering, and cryptanalytic attacks. Security assessments, including entropy analysis and adversarial evaluations (IND-CPA and IND-CCA), confirm the robustness of the encryption schemes against passive and active threats. A hardware implementation on a PYNQ Zynq board shows the feasibility of real-time deployment, with a runtime of 0.11 s. The results demonstrate that the proposed framework enhances ICS security by combining AI-driven anomaly detection with RSA-based cryptography, offering a viable solution for protecting ICS networks from emerging cyber threats.</description>
	<pubDate>2025-09-22</pubDate>

	<content:encoded><![CDATA[
	<p><b>IoT, Vol. 6, Pages 56: AI-Driven Attack Detection and Cryptographic Privacy Protection for Cyber-Resilient Industrial Control Systems</b></p>
	<p>IoT <a href="https://www.mdpi.com/2624-831X/6/3/56">doi: 10.3390/iot6030056</a></p>
	<p>Authors:
		Archana Pallakonda
		Kabilan Kaliyannan
		Rahul Loganathan Sumathi
		Rayappa David Amar Raj
		Rama Muni Reddy Yanamala
		Christian Napoli
		Cristian Randieri
		</p>
	<p>Industrial control systems (ICS) are increasingly vulnerable to evolving cyber threats due to the convergence of operational and information technologies. This research presents a robust cybersecurity framework that integrates machine learning-based anomaly detection with advanced cryptographic techniques to protect ICS communication networks. Using the ICS-Flow dataset, we evaluate several ensemble models, with XGBoost achieving 99.92% accuracy in binary classification and Decision Tree attaining 99.81% accuracy in multi-class classification. Additionally, we implement an LSTM autoencoder for temporal anomaly detection and employ the ADWIN technique for real-time drift detection. To ensure data security, we apply AES-CBC with HMAC and AES-GCM with RSA encryption, which demonstrates resilience against brute-force, tampering, and cryptanalytic attacks. Security assessments, including entropy analysis and adversarial evaluations (IND-CPA and IND-CCA), confirm the robustness of the encryption schemes against passive and active threats. A hardware implementation on a PYNQ Zynq board shows the feasibility of real-time deployment, with a runtime of 0.11 s. The results demonstrate that the proposed framework enhances ICS security by combining AI-driven anomaly detection with RSA-based cryptography, offering a viable solution for protecting ICS networks from emerging cyber threats.</p>
	]]></content:encoded>

	<dc:title>AI-Driven Attack Detection and Cryptographic Privacy Protection for Cyber-Resilient Industrial Control Systems</dc:title>
			<dc:creator>Archana Pallakonda</dc:creator>
			<dc:creator>Kabilan Kaliyannan</dc:creator>
			<dc:creator>Rahul Loganathan Sumathi</dc:creator>
			<dc:creator>Rayappa David Amar Raj</dc:creator>
			<dc:creator>Rama Muni Reddy Yanamala</dc:creator>
			<dc:creator>Christian Napoli</dc:creator>
			<dc:creator>Cristian Randieri</dc:creator>
		<dc:identifier>doi: 10.3390/iot6030056</dc:identifier>
	<dc:source>IoT</dc:source>
	<dc:date>2025-09-22</dc:date>

	<prism:publicationName>IoT</prism:publicationName>
	<prism:publicationDate>2025-09-22</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>56</prism:startingPage>
		<prism:doi>10.3390/iot6030056</prism:doi>
	<prism:url>https://www.mdpi.com/2624-831X/6/3/56</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-831X/6/3/55">

	<title>IoT, Vol. 6, Pages 55: Toward Smart School Mobility: IoT-Based Comfort Monitoring Through Sensor Fusion and Standardized Signal Analysis</title>
	<link>https://www.mdpi.com/2624-831X/6/3/55</link>
	<description>As smart cities evolve, integrating new technologies into school transportation is becoming increasingly important to ensure student comfort and safety. Monitoring and enhancing comfort during daily commutes can significantly influence well-being and learning readiness. However, most existing research addresses isolated factors, which limits the development of comprehensive and scalable solutions. This study presents the design and implementation of a low-cost, generalized IoT-based system for monitoring comfort in school transportation. The system processes multiple environmental and operational signals, and these data are transmitted to a cloud computing platform for real-time analysis. Signal processing incorporates standardized metrics, such as root mean square (RMS) values from ISO 2631-1 for vibration assessment. In addition, machine learning techniques, including a Random Forest classifier and ensemble-based models, are applied to classify ride comfort levels using both road roughness and environmental variables. The results show that stacked multisensor fusion achieved a significant improvement in classification performance compared with vibration-only models. The platform also integrates route visualization with commuting time per student, providing valuable information to assess the impact of travel duration on school mobility.</description>
	<pubDate>2025-09-16</pubDate>

	<content:encoded><![CDATA[
	<p><b>IoT, Vol. 6, Pages 55: Toward Smart School Mobility: IoT-Based Comfort Monitoring Through Sensor Fusion and Standardized Signal Analysis</b></p>
	<p>IoT <a href="https://www.mdpi.com/2624-831X/6/3/55">doi: 10.3390/iot6030055</a></p>
	<p>Authors:
		Lorena León Quiñonez
		Luiz Cesar Martini
		Leonardo de Souza Mendes
		Felipe Marques Pires
		Carlos Carrión Betancourt
		</p>
	<p>As smart cities evolve, integrating new technologies into school transportation is becoming increasingly important to ensure student comfort and safety. Monitoring and enhancing comfort during daily commutes can significantly influence well-being and learning readiness. However, most existing research addresses isolated factors, which limits the development of comprehensive and scalable solutions. This study presents the design and implementation of a low-cost, generalized IoT-based system for monitoring comfort in school transportation. The system processes multiple environmental and operational signals, and these data are transmitted to a cloud computing platform for real-time analysis. Signal processing incorporates standardized metrics, such as root mean square (RMS) values from ISO 2631-1 for vibration assessment. In addition, machine learning techniques, including a Random Forest classifier and ensemble-based models, are applied to classify ride comfort levels using both road roughness and environmental variables. The results show that stacked multisensor fusion achieved a significant improvement in classification performance compared with vibration-only models. The platform also integrates route visualization with commuting time per student, providing valuable information to assess the impact of travel duration on school mobility.</p>
	]]></content:encoded>

	<dc:title>Toward Smart School Mobility: IoT-Based Comfort Monitoring Through Sensor Fusion and Standardized Signal Analysis</dc:title>
			<dc:creator>Lorena León Quiñonez</dc:creator>
			<dc:creator>Luiz Cesar Martini</dc:creator>
			<dc:creator>Leonardo de Souza Mendes</dc:creator>
			<dc:creator>Felipe Marques Pires</dc:creator>
			<dc:creator>Carlos Carrión Betancourt</dc:creator>
		<dc:identifier>doi: 10.3390/iot6030055</dc:identifier>
	<dc:source>IoT</dc:source>
	<dc:date>2025-09-16</dc:date>

	<prism:publicationName>IoT</prism:publicationName>
	<prism:publicationDate>2025-09-16</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>55</prism:startingPage>
		<prism:doi>10.3390/iot6030055</prism:doi>
	<prism:url>https://www.mdpi.com/2624-831X/6/3/55</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-831X/6/3/54">

	<title>IoT, Vol. 6, Pages 54: Extending WSN Lifetime via Optimized Mobile Sink Trajectories: Linear Programming and Cuckoo Search Approaches with Overhearing-Aware Energy Models</title>
	<link>https://www.mdpi.com/2624-831X/6/3/54</link>
	<description>Maximizing the lifetimes of Wireless Sensor Networks (WSNs) is a prominent area of research. The energy hole problem is a major cause of network shutdown, where nodes within the Sink coverage deplete their energy faster due to the high energy cost of forwarding data from distant nodes to the Sink. Several research works have proposed solutions to address this issue, including the use of a mobile Sink to balance energy consumption throughout the network. However, most Sink mobility models overlook the energy consumption caused by overhearing, which is a critical factor in WSNs. In this paper, we introduce Linear Programming (LP) and Cuckoo Search (CS) metaheuristic optimization-based solutions to maximize the lifetime of WSNs by determining the optimal Sink sojourn points and associated durations. The proposed approaches consider the energy consumption levels of both reception and transmission, in addition to accounting for overhearing as an additional source of energy consumption. This allows for a comparison between the LP and CS solutions in terms of their effectiveness. To further enhance our solution, we apply the Travel Salesman Problem (TSP) to find the shortest path between the Sink sojourn points. By incorporating the TSP, we can optimize the routing path for the mobile Sink, thereby minimizing energy consumption and maximizing network lifetime. Test results demonstrate that the LP solution provides more accurate Sink sojourn times and locations, while the CS solution is faster, particularly for large WSNs. Moreover, our findings indicate that overlooking overhearing leads to a 48% decrease in WSN lifetime, making it essential to consider this factor if one is to achieve realistic results.</description>
	<pubDate>2025-09-14</pubDate>

	<content:encoded><![CDATA[
	<p><b>IoT, Vol. 6, Pages 54: Extending WSN Lifetime via Optimized Mobile Sink Trajectories: Linear Programming and Cuckoo Search Approaches with Overhearing-Aware Energy Models</b></p>
	<p>IoT <a href="https://www.mdpi.com/2624-831X/6/3/54">doi: 10.3390/iot6030054</a></p>
	<p>Authors:
		Ghada Turki Al-Mamari
		Fatma Bouabdallah
		Asma Cherif
		</p>
	<p>Maximizing the lifetimes of Wireless Sensor Networks (WSNs) is a prominent area of research. The energy hole problem is a major cause of network shutdown, where nodes within the Sink coverage deplete their energy faster due to the high energy cost of forwarding data from distant nodes to the Sink. Several research works have proposed solutions to address this issue, including the use of a mobile Sink to balance energy consumption throughout the network. However, most Sink mobility models overlook the energy consumption caused by overhearing, which is a critical factor in WSNs. In this paper, we introduce Linear Programming (LP) and Cuckoo Search (CS) metaheuristic optimization-based solutions to maximize the lifetime of WSNs by determining the optimal Sink sojourn points and associated durations. The proposed approaches consider the energy consumption levels of both reception and transmission, in addition to accounting for overhearing as an additional source of energy consumption. This allows for a comparison between the LP and CS solutions in terms of their effectiveness. To further enhance our solution, we apply the Travel Salesman Problem (TSP) to find the shortest path between the Sink sojourn points. By incorporating the TSP, we can optimize the routing path for the mobile Sink, thereby minimizing energy consumption and maximizing network lifetime. Test results demonstrate that the LP solution provides more accurate Sink sojourn times and locations, while the CS solution is faster, particularly for large WSNs. Moreover, our findings indicate that overlooking overhearing leads to a 48% decrease in WSN lifetime, making it essential to consider this factor if one is to achieve realistic results.</p>
	]]></content:encoded>

	<dc:title>Extending WSN Lifetime via Optimized Mobile Sink Trajectories: Linear Programming and Cuckoo Search Approaches with Overhearing-Aware Energy Models</dc:title>
			<dc:creator>Ghada Turki Al-Mamari</dc:creator>
			<dc:creator>Fatma Bouabdallah</dc:creator>
			<dc:creator>Asma Cherif</dc:creator>
		<dc:identifier>doi: 10.3390/iot6030054</dc:identifier>
	<dc:source>IoT</dc:source>
	<dc:date>2025-09-14</dc:date>

	<prism:publicationName>IoT</prism:publicationName>
	<prism:publicationDate>2025-09-14</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>54</prism:startingPage>
		<prism:doi>10.3390/iot6030054</prism:doi>
	<prism:url>https://www.mdpi.com/2624-831X/6/3/54</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-831X/6/3/53">

	<title>IoT, Vol. 6, Pages 53: Trustworthy Adaptive AI for Real-Time Intrusion Detection in Industrial IoT Security</title>
	<link>https://www.mdpi.com/2624-831X/6/3/53</link>
	<description>Traditional security methods fail to match the speed of evolving threats because Industrial Internet of Things (IIoT) technologies have become more widely adopted. A lightweight adaptive AI-based intrusion detection system (IDS) for IIoT environments is presented in this paper. The proposed system detects cyber threats in real time through an ensemble of online learning models that also adapt to changing network behavior. The system implements SHAP (SHapley Additive exPlanations) for model prediction explanations to allow human operators to verify and understand alert causes while addressing the essential need for trust and transparency. The system validation was performed using the ToN_IoT and Bot-IoT benchmark datasets. The proposed system detects threats with 96.4% accuracy while producing 2.1% false positives and requiring 35 ms on average for detection on edge devices with limited resources. Security analysts can understand model decisions through SHAP analysis because packet size and protocol type and device activity patterns strongly affect model predictions. The system underwent testing on a Raspberry Pi 5-based IIoT testbed to evaluate its deployability in real-world scenarios through emulation of practical edge environments with constrained computational resources. The research unites real-time adaptability with explainability and low-latency performance in an IDS framework specifically designed for industrial IoT security. The solution provides a scalable method to boost cyber resilience in manufacturing, together with energy and critical infrastructure sectors. By enabling fast, interpretable, and low-latency intrusion detection directly on edge devices, this solution enhances cyber resilience in critical sectors such as manufacturing, energy, and infrastructure, where timely and trustworthy threat responses are essential to maintaining operational continuity and safety.</description>
	<pubDate>2025-09-08</pubDate>

	<content:encoded><![CDATA[
	<p><b>IoT, Vol. 6, Pages 53: Trustworthy Adaptive AI for Real-Time Intrusion Detection in Industrial IoT Security</b></p>
	<p>IoT <a href="https://www.mdpi.com/2624-831X/6/3/53">doi: 10.3390/iot6030053</a></p>
	<p>Authors:
		Mohammad Al Rawajbeh
		Amala Jayanthi Maria Soosai
		Lakshmana Kumar Ramasamy
		Firoz Khan
		</p>
	<p>Traditional security methods fail to match the speed of evolving threats because Industrial Internet of Things (IIoT) technologies have become more widely adopted. A lightweight adaptive AI-based intrusion detection system (IDS) for IIoT environments is presented in this paper. The proposed system detects cyber threats in real time through an ensemble of online learning models that also adapt to changing network behavior. The system implements SHAP (SHapley Additive exPlanations) for model prediction explanations to allow human operators to verify and understand alert causes while addressing the essential need for trust and transparency. The system validation was performed using the ToN_IoT and Bot-IoT benchmark datasets. The proposed system detects threats with 96.4% accuracy while producing 2.1% false positives and requiring 35 ms on average for detection on edge devices with limited resources. Security analysts can understand model decisions through SHAP analysis because packet size and protocol type and device activity patterns strongly affect model predictions. The system underwent testing on a Raspberry Pi 5-based IIoT testbed to evaluate its deployability in real-world scenarios through emulation of practical edge environments with constrained computational resources. The research unites real-time adaptability with explainability and low-latency performance in an IDS framework specifically designed for industrial IoT security. The solution provides a scalable method to boost cyber resilience in manufacturing, together with energy and critical infrastructure sectors. By enabling fast, interpretable, and low-latency intrusion detection directly on edge devices, this solution enhances cyber resilience in critical sectors such as manufacturing, energy, and infrastructure, where timely and trustworthy threat responses are essential to maintaining operational continuity and safety.</p>
	]]></content:encoded>

	<dc:title>Trustworthy Adaptive AI for Real-Time Intrusion Detection in Industrial IoT Security</dc:title>
			<dc:creator>Mohammad Al Rawajbeh</dc:creator>
			<dc:creator>Amala Jayanthi Maria Soosai</dc:creator>
			<dc:creator>Lakshmana Kumar Ramasamy</dc:creator>
			<dc:creator>Firoz Khan</dc:creator>
		<dc:identifier>doi: 10.3390/iot6030053</dc:identifier>
	<dc:source>IoT</dc:source>
	<dc:date>2025-09-08</dc:date>

	<prism:publicationName>IoT</prism:publicationName>
	<prism:publicationDate>2025-09-08</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>53</prism:startingPage>
		<prism:doi>10.3390/iot6030053</prism:doi>
	<prism:url>https://www.mdpi.com/2624-831X/6/3/53</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-831X/6/3/52">

	<title>IoT, Vol. 6, Pages 52: An Extension of Input Setup Assistance Service Using Generative AI to Unlearned Sensors for the SEMAR IoT Application Server Platform</title>
	<link>https://www.mdpi.com/2624-831X/6/3/52</link>
	<description>Nowadays, Internet of Things (IoT) application systems are broadly applied to various sectors of society for efficient management by monitoring environments using sensors, analyzing sampled data, and giving proper feedback. For their fast deployment, we have developed Smart Environmental Monitoring and Analysis in Real Time (SEMAR) as an integrated IoT application server platform and implemented the input setup assistance service using prompt engineering and a generative AI model to assist connecting sensors to SEMAR with step-by-step guidance. However, the current service cannot assist in connections of the sensors not learned by the AI model, such as newly released ones. To address this issue, in this paper, we propose an extension to the service for handling unlearned sensors by utilizing datasheets with four steps: (1) users input a PDF datasheet containing information about the sensor, (2) key specifications are extracted from the datasheet and structured into markdown format using a generative AI, (3) this data is saved to a vector database using chunking and embedding methods, and (4) the data is used in Retrieval-Augmented Generation (RAG) to provide additional context when guiding users through sensor setup. Our evaluation with five generative AI models shows that OpenAI&amp;amp;rsquo;s GPT-4o achieves the highest accuracy in extracting specifications from PDF datasheets and the best answer relevancy (0.987), while Gemini 2.0 Flash delivers the most balanced results, with the highest overall RAGAs score (0.76). Other models produced competitive but mixed outcomes, averaging 0.74 across metrics. The step-by-step guidance function achieved a task success rate above 80%. In a course evaluation by 48 students, the system improved the student test scores, further confirming the effectiveness of our proposed extension.</description>
	<pubDate>2025-09-08</pubDate>

	<content:encoded><![CDATA[
	<p><b>IoT, Vol. 6, Pages 52: An Extension of Input Setup Assistance Service Using Generative AI to Unlearned Sensors for the SEMAR IoT Application Server Platform</b></p>
	<p>IoT <a href="https://www.mdpi.com/2624-831X/6/3/52">doi: 10.3390/iot6030052</a></p>
	<p>Authors:
		I Nyoman Darma Kotama
		Nobuo Funabiki
		Yohanes Yohanie Fridelin Panduman
		Komang Candra Brata
		Anak Agung Surya Pradhana
		 Noprianto
		</p>
	<p>Nowadays, Internet of Things (IoT) application systems are broadly applied to various sectors of society for efficient management by monitoring environments using sensors, analyzing sampled data, and giving proper feedback. For their fast deployment, we have developed Smart Environmental Monitoring and Analysis in Real Time (SEMAR) as an integrated IoT application server platform and implemented the input setup assistance service using prompt engineering and a generative AI model to assist connecting sensors to SEMAR with step-by-step guidance. However, the current service cannot assist in connections of the sensors not learned by the AI model, such as newly released ones. To address this issue, in this paper, we propose an extension to the service for handling unlearned sensors by utilizing datasheets with four steps: (1) users input a PDF datasheet containing information about the sensor, (2) key specifications are extracted from the datasheet and structured into markdown format using a generative AI, (3) this data is saved to a vector database using chunking and embedding methods, and (4) the data is used in Retrieval-Augmented Generation (RAG) to provide additional context when guiding users through sensor setup. Our evaluation with five generative AI models shows that OpenAI&amp;amp;rsquo;s GPT-4o achieves the highest accuracy in extracting specifications from PDF datasheets and the best answer relevancy (0.987), while Gemini 2.0 Flash delivers the most balanced results, with the highest overall RAGAs score (0.76). Other models produced competitive but mixed outcomes, averaging 0.74 across metrics. The step-by-step guidance function achieved a task success rate above 80%. In a course evaluation by 48 students, the system improved the student test scores, further confirming the effectiveness of our proposed extension.</p>
	]]></content:encoded>

	<dc:title>An Extension of Input Setup Assistance Service Using Generative AI to Unlearned Sensors for the SEMAR IoT Application Server Platform</dc:title>
			<dc:creator>I Nyoman Darma Kotama</dc:creator>
			<dc:creator>Nobuo Funabiki</dc:creator>
			<dc:creator>Yohanes Yohanie Fridelin Panduman</dc:creator>
			<dc:creator>Komang Candra Brata</dc:creator>
			<dc:creator>Anak Agung Surya Pradhana</dc:creator>
			<dc:creator> Noprianto</dc:creator>
		<dc:identifier>doi: 10.3390/iot6030052</dc:identifier>
	<dc:source>IoT</dc:source>
	<dc:date>2025-09-08</dc:date>

	<prism:publicationName>IoT</prism:publicationName>
	<prism:publicationDate>2025-09-08</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>52</prism:startingPage>
		<prism:doi>10.3390/iot6030052</prism:doi>
	<prism:url>https://www.mdpi.com/2624-831X/6/3/52</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-831X/6/3/51">

	<title>IoT, Vol. 6, Pages 51: Internet of Things and Blockchain Adoption in Food Supply Chain: A Survey</title>
	<link>https://www.mdpi.com/2624-831X/6/3/51</link>
	<description>The characteristics of Food Supply Chains (FSC) make them hard to manage properly, and many efforts have been conducted to alleviate the difficulties related to their management, especially when it comes to integrating the latest Information and Communications Technologies. The Internet of Things (IoT) has shown to be very beneficial in providing a holistic and real-time vision of FSCs. Blockchain, with its decentralization and immutability, is another promising technology, that is showing a great potential in managing FSCs. A lot of research has been carried out to prove the advantages of each of these technologies on its own. However, the research investigating their adoption together is still not enough. Our paper presents a study of recent advances in the integration of IoT and Blockchain in Food Supply Chain Management (FSCM) over the past five years. We identify key research trends, analyze the benefits and limitations of IoT&amp;amp;ndash;blockchain integration, and highlight major challenges hindering large-scale adoption. Finally, we propose future research directions to address these challenges and improve the adoption of IoT&amp;amp;ndash;blockchain solutions in FSCs. This study aims to serve as a reference for researchers and practitioners seeking to understand and advance the integration of these emerging technologies in FSCM.</description>
	<pubDate>2025-09-02</pubDate>

	<content:encoded><![CDATA[
	<p><b>IoT, Vol. 6, Pages 51: Internet of Things and Blockchain Adoption in Food Supply Chain: A Survey</b></p>
	<p>IoT <a href="https://www.mdpi.com/2624-831X/6/3/51">doi: 10.3390/iot6030051</a></p>
	<p>Authors:
		Yehya Bouchbout
		Ala-Eddine Benrazek
		Bálint Molnár
		Brahim Farou
		Khawla Bouafia
		Hamid Seridi
		</p>
	<p>The characteristics of Food Supply Chains (FSC) make them hard to manage properly, and many efforts have been conducted to alleviate the difficulties related to their management, especially when it comes to integrating the latest Information and Communications Technologies. The Internet of Things (IoT) has shown to be very beneficial in providing a holistic and real-time vision of FSCs. Blockchain, with its decentralization and immutability, is another promising technology, that is showing a great potential in managing FSCs. A lot of research has been carried out to prove the advantages of each of these technologies on its own. However, the research investigating their adoption together is still not enough. Our paper presents a study of recent advances in the integration of IoT and Blockchain in Food Supply Chain Management (FSCM) over the past five years. We identify key research trends, analyze the benefits and limitations of IoT&amp;amp;ndash;blockchain integration, and highlight major challenges hindering large-scale adoption. Finally, we propose future research directions to address these challenges and improve the adoption of IoT&amp;amp;ndash;blockchain solutions in FSCs. This study aims to serve as a reference for researchers and practitioners seeking to understand and advance the integration of these emerging technologies in FSCM.</p>
	]]></content:encoded>

	<dc:title>Internet of Things and Blockchain Adoption in Food Supply Chain: A Survey</dc:title>
			<dc:creator>Yehya Bouchbout</dc:creator>
			<dc:creator>Ala-Eddine Benrazek</dc:creator>
			<dc:creator>Bálint Molnár</dc:creator>
			<dc:creator>Brahim Farou</dc:creator>
			<dc:creator>Khawla Bouafia</dc:creator>
			<dc:creator>Hamid Seridi</dc:creator>
		<dc:identifier>doi: 10.3390/iot6030051</dc:identifier>
	<dc:source>IoT</dc:source>
	<dc:date>2025-09-02</dc:date>

	<prism:publicationName>IoT</prism:publicationName>
	<prism:publicationDate>2025-09-02</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Review</prism:section>
	<prism:startingPage>51</prism:startingPage>
		<prism:doi>10.3390/iot6030051</prism:doi>
	<prism:url>https://www.mdpi.com/2624-831X/6/3/51</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-831X/6/3/50">

	<title>IoT, Vol. 6, Pages 50: LightLiveAuth: A Lightweight Continuous Authentication Model for Virtual Reality</title>
	<link>https://www.mdpi.com/2624-831X/6/3/50</link>
	<description>As network infrastructure and Internet of Things (IoT) technologies continue to evolve, immersive systems such as virtual reality (VR) are becoming increasingly integrated into interconnected environments. These advancements allow real-time processing of multi-modal data, improving user experiences with rich visual and three-dimensional interactions. However, ensuring continuous user authentication in VR environments remains a significant challenge. To address this issue, an effective user monitoring system is required to track VR users in real time and trigger re-authentication when necessary. Based on this premise, we propose a multi-modal authentication framework that uses eye-tracking data for authentication, named MobileNetV3pro. The framework applies a transfer learning approach by adapting the MobileNetV3Large architecture (pretrained on ImageNet) as a feature extractor. Its pre-trained convolutional layers are used to obtain high-level image representations, while a custom fully connected classification is added to perform binary classification. Authentication performance is evaluated using Equal Error Rate (EER), accuracy, F1-score, model size, and inference time. Experimental results show that eye-based authentication with MobileNetV3pro achieves a lower EER (3.00%) than baseline models, demonstrating its effectiveness in VR environments.</description>
	<pubDate>2025-09-02</pubDate>

	<content:encoded><![CDATA[
	<p><b>IoT, Vol. 6, Pages 50: LightLiveAuth: A Lightweight Continuous Authentication Model for Virtual Reality</b></p>
	<p>IoT <a href="https://www.mdpi.com/2624-831X/6/3/50">doi: 10.3390/iot6030050</a></p>
	<p>Authors:
		Pengyu Li
		Feifei Chen
		Lei Pan
		Thuong Hoang
		Ye Zhu
		Leon Yang
		</p>
	<p>As network infrastructure and Internet of Things (IoT) technologies continue to evolve, immersive systems such as virtual reality (VR) are becoming increasingly integrated into interconnected environments. These advancements allow real-time processing of multi-modal data, improving user experiences with rich visual and three-dimensional interactions. However, ensuring continuous user authentication in VR environments remains a significant challenge. To address this issue, an effective user monitoring system is required to track VR users in real time and trigger re-authentication when necessary. Based on this premise, we propose a multi-modal authentication framework that uses eye-tracking data for authentication, named MobileNetV3pro. The framework applies a transfer learning approach by adapting the MobileNetV3Large architecture (pretrained on ImageNet) as a feature extractor. Its pre-trained convolutional layers are used to obtain high-level image representations, while a custom fully connected classification is added to perform binary classification. Authentication performance is evaluated using Equal Error Rate (EER), accuracy, F1-score, model size, and inference time. Experimental results show that eye-based authentication with MobileNetV3pro achieves a lower EER (3.00%) than baseline models, demonstrating its effectiveness in VR environments.</p>
	]]></content:encoded>

	<dc:title>LightLiveAuth: A Lightweight Continuous Authentication Model for Virtual Reality</dc:title>
			<dc:creator>Pengyu Li</dc:creator>
			<dc:creator>Feifei Chen</dc:creator>
			<dc:creator>Lei Pan</dc:creator>
			<dc:creator>Thuong Hoang</dc:creator>
			<dc:creator>Ye Zhu</dc:creator>
			<dc:creator>Leon Yang</dc:creator>
		<dc:identifier>doi: 10.3390/iot6030050</dc:identifier>
	<dc:source>IoT</dc:source>
	<dc:date>2025-09-02</dc:date>

	<prism:publicationName>IoT</prism:publicationName>
	<prism:publicationDate>2025-09-02</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>50</prism:startingPage>
		<prism:doi>10.3390/iot6030050</prism:doi>
	<prism:url>https://www.mdpi.com/2624-831X/6/3/50</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-831X/6/3/49">

	<title>IoT, Vol. 6, Pages 49: Energy-Efficient Strategies in Wireless Body Area Networks: A Comprehensive Survey</title>
	<link>https://www.mdpi.com/2624-831X/6/3/49</link>
	<description>Wireless body area networks (WBANs) are a pivotal solution for continuous health monitoring, but their energy constraints pose a significant challenge for long-term operation. This paper provides a comprehensive review of state-of-the-art energy-efficient mechanisms, critically evaluating solutions across various network layers. We focus on three key approaches: energy-aware MAC protocols that reduce idle listening and optimize duty cycling; energy-efficient routing protocols that enhance data transmission and network longevity; and emerging energy harvesting techniques that offer a path toward energy-autonomous WBANs. Furthermore, the paper provides a detailed analysis of the inherent trade-offs between energy efficiency and other critical performance metrics, such as latency, reliability, and security. It also explores the transformative potential of emerging technologies, such as AI and blockchain, for dynamic energy management and secure data handling. By synthesizing these findings, this work contributes to the development of sustainable WBAN solutions and outlines clear directions for future research.</description>
	<pubDate>2025-08-29</pubDate>

	<content:encoded><![CDATA[
	<p><b>IoT, Vol. 6, Pages 49: Energy-Efficient Strategies in Wireless Body Area Networks: A Comprehensive Survey</b></p>
	<p>IoT <a href="https://www.mdpi.com/2624-831X/6/3/49">doi: 10.3390/iot6030049</a></p>
	<p>Authors:
		Marwa Boumaiz
		Mohammed El Ghazi
		Anas Bouayad
		Younes Balboul
		Moulhime El Bekkali
		</p>
	<p>Wireless body area networks (WBANs) are a pivotal solution for continuous health monitoring, but their energy constraints pose a significant challenge for long-term operation. This paper provides a comprehensive review of state-of-the-art energy-efficient mechanisms, critically evaluating solutions across various network layers. We focus on three key approaches: energy-aware MAC protocols that reduce idle listening and optimize duty cycling; energy-efficient routing protocols that enhance data transmission and network longevity; and emerging energy harvesting techniques that offer a path toward energy-autonomous WBANs. Furthermore, the paper provides a detailed analysis of the inherent trade-offs between energy efficiency and other critical performance metrics, such as latency, reliability, and security. It also explores the transformative potential of emerging technologies, such as AI and blockchain, for dynamic energy management and secure data handling. By synthesizing these findings, this work contributes to the development of sustainable WBAN solutions and outlines clear directions for future research.</p>
	]]></content:encoded>

	<dc:title>Energy-Efficient Strategies in Wireless Body Area Networks: A Comprehensive Survey</dc:title>
			<dc:creator>Marwa Boumaiz</dc:creator>
			<dc:creator>Mohammed El Ghazi</dc:creator>
			<dc:creator>Anas Bouayad</dc:creator>
			<dc:creator>Younes Balboul</dc:creator>
			<dc:creator>Moulhime El Bekkali</dc:creator>
		<dc:identifier>doi: 10.3390/iot6030049</dc:identifier>
	<dc:source>IoT</dc:source>
	<dc:date>2025-08-29</dc:date>

	<prism:publicationName>IoT</prism:publicationName>
	<prism:publicationDate>2025-08-29</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Review</prism:section>
	<prism:startingPage>49</prism:startingPage>
		<prism:doi>10.3390/iot6030049</prism:doi>
	<prism:url>https://www.mdpi.com/2624-831X/6/3/49</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-831X/6/3/48">

	<title>IoT, Vol. 6, Pages 48: A Two-Stage Hybrid Federated Learning Framework for Privacy-Preserving IoT Anomaly Detection and Classification</title>
	<link>https://www.mdpi.com/2624-831X/6/3/48</link>
	<description>The rapid surge of Artificial Internet-of-Things (AIoT) devices has outpaced the deployment of robust, privacy-preserving anomaly detection solutions suitable for resource-constrained edge environments. This paper presents a two-stage hybrid Federated Learning (FL) framework for IoT anomaly detection and classification, validated on the real-world N-BaIoT dataset. In the first stage, each device trains a generative Artificial Intelligence (AI) model on benign traffic only, and in the second stage a Histogram-based Gradient-Boosting (HGB) classifier labels flagged traffic. All models operate under a synchronous, collaborative FL architecture across nine commercial IoT devices, thus preserving data privacy and minimizing communication. Through both inter- and intra-benchmarking against state-of-the-art baselines, the Variational Autoencoder&amp;amp;ndash;HGB (VAE-HGB) pipeline emerges as the top performer, achieving an average end-to-end accuracy of 99.14% across all classes. These results demonstrate that reconstruction-driven generative AI models, when combined with federated averaging and efficient classification, deliver a highly scalable, accurate, and privacy-preserving solution for securing resource-constrained IoT environments.</description>
	<pubDate>2025-08-29</pubDate>

	<content:encoded><![CDATA[
	<p><b>IoT, Vol. 6, Pages 48: A Two-Stage Hybrid Federated Learning Framework for Privacy-Preserving IoT Anomaly Detection and Classification</b></p>
	<p>IoT <a href="https://www.mdpi.com/2624-831X/6/3/48">doi: 10.3390/iot6030048</a></p>
	<p>Authors:
		Mohammad Shahin
		Ali Hosseinzadeh
		F. Frank Chen
		</p>
	<p>The rapid surge of Artificial Internet-of-Things (AIoT) devices has outpaced the deployment of robust, privacy-preserving anomaly detection solutions suitable for resource-constrained edge environments. This paper presents a two-stage hybrid Federated Learning (FL) framework for IoT anomaly detection and classification, validated on the real-world N-BaIoT dataset. In the first stage, each device trains a generative Artificial Intelligence (AI) model on benign traffic only, and in the second stage a Histogram-based Gradient-Boosting (HGB) classifier labels flagged traffic. All models operate under a synchronous, collaborative FL architecture across nine commercial IoT devices, thus preserving data privacy and minimizing communication. Through both inter- and intra-benchmarking against state-of-the-art baselines, the Variational Autoencoder&amp;amp;ndash;HGB (VAE-HGB) pipeline emerges as the top performer, achieving an average end-to-end accuracy of 99.14% across all classes. These results demonstrate that reconstruction-driven generative AI models, when combined with federated averaging and efficient classification, deliver a highly scalable, accurate, and privacy-preserving solution for securing resource-constrained IoT environments.</p>
	]]></content:encoded>

	<dc:title>A Two-Stage Hybrid Federated Learning Framework for Privacy-Preserving IoT Anomaly Detection and Classification</dc:title>
			<dc:creator>Mohammad Shahin</dc:creator>
			<dc:creator>Ali Hosseinzadeh</dc:creator>
			<dc:creator>F. Frank Chen</dc:creator>
		<dc:identifier>doi: 10.3390/iot6030048</dc:identifier>
	<dc:source>IoT</dc:source>
	<dc:date>2025-08-29</dc:date>

	<prism:publicationName>IoT</prism:publicationName>
	<prism:publicationDate>2025-08-29</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>48</prism:startingPage>
		<prism:doi>10.3390/iot6030048</prism:doi>
	<prism:url>https://www.mdpi.com/2624-831X/6/3/48</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-831X/6/3/47">

	<title>IoT, Vol. 6, Pages 47: Radio Frequency Fingerprinting Authentication for IoT Networks Using Siamese Networks</title>
	<link>https://www.mdpi.com/2624-831X/6/3/47</link>
	<description>As IoT (internet of things) devices grow in prominence, safeguarding them from cyberattacks is becoming a pressing challenge. To bootstrap IoT security, device identification or authentication is crucial for establishing trusted connections among devices without prior trust. In this regard, radio frequency fingerprinting (RFF) is gaining attention because it is more efficient and requires fewer computational resources compared to resource-intensive cryptographic methods, such as digital signatures. RFF works by identifying unique manufacturing defects in the radio circuitry of IoT devices by analyzing over-the-air signals that embed these imperfections, allowing for the identification of the transmitting hardware. Recent studies on RFF often leverage advanced classification models, including classical machine learning techniques such as K-Nearest Neighbor (KNN) and Support Vector Machine (SVM), as well as modern deep learning architectures like Convolutional Neural Network (CNN). In particular, CNNs are well-suited as they use multidimensional mapping to detect and extract reliable fingerprints during the learning process. However, a significant limitation of these approaches is that they require large datasets and necessitate retraining when new devices not included in the initial training set are added. This retraining can cause service interruptions and is costly, especially in large-scale IoT networks. In this paper, we propose a novel solution to this problem: RFF using Siamese networks, which eliminates the need for retraining and allows for seamless authentication in IoT deployments. The proposed Siamese network is trained using in-phase and quadrature (I/Q) samples from 10 different Software-Defined Radios (SDRs). Additionally, we present a new algorithm, the Similarity-Based Embedding Classification (SBEC) for RFF. We present experimental results that demonstrate that the Siamese network effectively distinguishes between malicious and trusted devices with a remarkable 98% identification accuracy.</description>
	<pubDate>2025-08-22</pubDate>

	<content:encoded><![CDATA[
	<p><b>IoT, Vol. 6, Pages 47: Radio Frequency Fingerprinting Authentication for IoT Networks Using Siamese Networks</b></p>
	<p>IoT <a href="https://www.mdpi.com/2624-831X/6/3/47">doi: 10.3390/iot6030047</a></p>
	<p>Authors:
		Raju Dhakal
		Laxima Niure Kandel
		Prashant Shekhar
		</p>
	<p>As IoT (internet of things) devices grow in prominence, safeguarding them from cyberattacks is becoming a pressing challenge. To bootstrap IoT security, device identification or authentication is crucial for establishing trusted connections among devices without prior trust. In this regard, radio frequency fingerprinting (RFF) is gaining attention because it is more efficient and requires fewer computational resources compared to resource-intensive cryptographic methods, such as digital signatures. RFF works by identifying unique manufacturing defects in the radio circuitry of IoT devices by analyzing over-the-air signals that embed these imperfections, allowing for the identification of the transmitting hardware. Recent studies on RFF often leverage advanced classification models, including classical machine learning techniques such as K-Nearest Neighbor (KNN) and Support Vector Machine (SVM), as well as modern deep learning architectures like Convolutional Neural Network (CNN). In particular, CNNs are well-suited as they use multidimensional mapping to detect and extract reliable fingerprints during the learning process. However, a significant limitation of these approaches is that they require large datasets and necessitate retraining when new devices not included in the initial training set are added. This retraining can cause service interruptions and is costly, especially in large-scale IoT networks. In this paper, we propose a novel solution to this problem: RFF using Siamese networks, which eliminates the need for retraining and allows for seamless authentication in IoT deployments. The proposed Siamese network is trained using in-phase and quadrature (I/Q) samples from 10 different Software-Defined Radios (SDRs). Additionally, we present a new algorithm, the Similarity-Based Embedding Classification (SBEC) for RFF. We present experimental results that demonstrate that the Siamese network effectively distinguishes between malicious and trusted devices with a remarkable 98% identification accuracy.</p>
	]]></content:encoded>

	<dc:title>Radio Frequency Fingerprinting Authentication for IoT Networks Using Siamese Networks</dc:title>
			<dc:creator>Raju Dhakal</dc:creator>
			<dc:creator>Laxima Niure Kandel</dc:creator>
			<dc:creator>Prashant Shekhar</dc:creator>
		<dc:identifier>doi: 10.3390/iot6030047</dc:identifier>
	<dc:source>IoT</dc:source>
	<dc:date>2025-08-22</dc:date>

	<prism:publicationName>IoT</prism:publicationName>
	<prism:publicationDate>2025-08-22</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>47</prism:startingPage>
		<prism:doi>10.3390/iot6030047</prism:doi>
	<prism:url>https://www.mdpi.com/2624-831X/6/3/47</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-831X/6/3/46">

	<title>IoT, Vol. 6, Pages 46: IoT and Machine Learning for Smart Bird Monitoring and Repellence: Techniques, Challenges, and Opportunities</title>
	<link>https://www.mdpi.com/2624-831X/6/3/46</link>
	<description>The activities of birds present increasing challenges in agriculture, aviation, and environmental conservation. This has led to economic losses, safety risks, and ecological imbalances. Attempts have been made to address the problem, with traditional deterrent methods proving to be labour-intensive, environmentally unfriendly, and ineffective over time. Advances in artificial intelligence (AI) and the Internet of Things (IoT) present opportunities for enabling automated real-time bird detection and repellence. This study reviews recent developments (2020&amp;amp;ndash;2025) in AI-driven bird detection and repellence systems, emphasising the integration of image, audio, and multi-sensor data in IoT and edge-based environments. The Preferred Reporting Items for Systematic Reviews and Meta-Analyses framework was used, with 267 studies initially identified and screened from key scientific databases. A total of 154 studies met the inclusion criteria and were analysed. The findings show the increasing use of convolutional neural networks (CNNs), YOLO variants, and MobileNet in visual detection, and the growing use of lightweight audio-based models such as BirdNET, MFCC-based CNNs, and TinyML frameworks for microcontroller deployment. Multi-sensor fusion is proposed to improve detection accuracy in diverse environments. Repellence strategies include sound-based deterrents, visual deterrents, predator-mimicking visuals, and adaptive AI-integrated systems. Deployment success depends on edge compatibility, power efficiency, and dataset quality. The limitations of current studies include species-specific detection challenges, data scarcity, environmental changes, and energy constraints. Future research should focus on tiny and lightweight AI models, standardised multi-modal datasets, and intelligent, behaviour-aware deterrence mechanisms suitable for precision agriculture and ecological monitoring.</description>
	<pubDate>2025-08-07</pubDate>

	<content:encoded><![CDATA[
	<p><b>IoT, Vol. 6, Pages 46: IoT and Machine Learning for Smart Bird Monitoring and Repellence: Techniques, Challenges, and Opportunities</b></p>
	<p>IoT <a href="https://www.mdpi.com/2624-831X/6/3/46">doi: 10.3390/iot6030046</a></p>
	<p>Authors:
		Samson O. Ooko
		Emmanuel Ndashimye
		Evariste Twahirwa
		Moise Busogi
		</p>
	<p>The activities of birds present increasing challenges in agriculture, aviation, and environmental conservation. This has led to economic losses, safety risks, and ecological imbalances. Attempts have been made to address the problem, with traditional deterrent methods proving to be labour-intensive, environmentally unfriendly, and ineffective over time. Advances in artificial intelligence (AI) and the Internet of Things (IoT) present opportunities for enabling automated real-time bird detection and repellence. This study reviews recent developments (2020&amp;amp;ndash;2025) in AI-driven bird detection and repellence systems, emphasising the integration of image, audio, and multi-sensor data in IoT and edge-based environments. The Preferred Reporting Items for Systematic Reviews and Meta-Analyses framework was used, with 267 studies initially identified and screened from key scientific databases. A total of 154 studies met the inclusion criteria and were analysed. The findings show the increasing use of convolutional neural networks (CNNs), YOLO variants, and MobileNet in visual detection, and the growing use of lightweight audio-based models such as BirdNET, MFCC-based CNNs, and TinyML frameworks for microcontroller deployment. Multi-sensor fusion is proposed to improve detection accuracy in diverse environments. Repellence strategies include sound-based deterrents, visual deterrents, predator-mimicking visuals, and adaptive AI-integrated systems. Deployment success depends on edge compatibility, power efficiency, and dataset quality. The limitations of current studies include species-specific detection challenges, data scarcity, environmental changes, and energy constraints. Future research should focus on tiny and lightweight AI models, standardised multi-modal datasets, and intelligent, behaviour-aware deterrence mechanisms suitable for precision agriculture and ecological monitoring.</p>
	]]></content:encoded>

	<dc:title>IoT and Machine Learning for Smart Bird Monitoring and Repellence: Techniques, Challenges, and Opportunities</dc:title>
			<dc:creator>Samson O. Ooko</dc:creator>
			<dc:creator>Emmanuel Ndashimye</dc:creator>
			<dc:creator>Evariste Twahirwa</dc:creator>
			<dc:creator>Moise Busogi</dc:creator>
		<dc:identifier>doi: 10.3390/iot6030046</dc:identifier>
	<dc:source>IoT</dc:source>
	<dc:date>2025-08-07</dc:date>

	<prism:publicationName>IoT</prism:publicationName>
	<prism:publicationDate>2025-08-07</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Review</prism:section>
	<prism:startingPage>46</prism:startingPage>
		<prism:doi>10.3390/iot6030046</prism:doi>
	<prism:url>https://www.mdpi.com/2624-831X/6/3/46</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-831X/6/3/45">

	<title>IoT, Vol. 6, Pages 45: IoT Devices and Their Impact on Learning: A Systematic Review of Technological and Educational Affordances</title>
	<link>https://www.mdpi.com/2624-831X/6/3/45</link>
	<description>A principal factor of the fourth Industrial Revolution is the Internet of Things (IoT), a network of &amp;amp;ldquo;smart&amp;amp;rdquo; objects that communicate by exchanging helpful information about themselves and their environment. Our research aims to address the gaps in the existing literature regarding the educational and technological affordances of IoT applications in learning environments in secondary education. Our systematic review using the PRISMA method allowed us to extract 25 empirical studies from the last 10 years. We present the categorization of educational and technological affordances, as well as the devices used in these environments. Moreover, our findings indicate widespread adoption of organized educational activities and design-based learning, often incorporating tangible interfaces, smart objects, and IoT applications, which enhance student engagement and interaction. Additionally, we identify the impact of IoT-based learning on knowledge building, autonomous learning, student attitude, and motivation. The results suggest that the IoT can facilitate personalized and experiential learning, fostering a more immersive and adaptive educational experience. Based on these findings, we discuss key recommendations for educators, policymakers, and researchers, while also addressing this study&amp;amp;rsquo;s limitations and potential directions for future research.</description>
	<pubDate>2025-08-07</pubDate>

	<content:encoded><![CDATA[
	<p><b>IoT, Vol. 6, Pages 45: IoT Devices and Their Impact on Learning: A Systematic Review of Technological and Educational Affordances</b></p>
	<p>IoT <a href="https://www.mdpi.com/2624-831X/6/3/45">doi: 10.3390/iot6030045</a></p>
	<p>Authors:
		Dimitris Tsipianitis
		Anastasia Misirli
		Konstantinos Lavidas
		Vassilis Komis
		</p>
	<p>A principal factor of the fourth Industrial Revolution is the Internet of Things (IoT), a network of &amp;amp;ldquo;smart&amp;amp;rdquo; objects that communicate by exchanging helpful information about themselves and their environment. Our research aims to address the gaps in the existing literature regarding the educational and technological affordances of IoT applications in learning environments in secondary education. Our systematic review using the PRISMA method allowed us to extract 25 empirical studies from the last 10 years. We present the categorization of educational and technological affordances, as well as the devices used in these environments. Moreover, our findings indicate widespread adoption of organized educational activities and design-based learning, often incorporating tangible interfaces, smart objects, and IoT applications, which enhance student engagement and interaction. Additionally, we identify the impact of IoT-based learning on knowledge building, autonomous learning, student attitude, and motivation. The results suggest that the IoT can facilitate personalized and experiential learning, fostering a more immersive and adaptive educational experience. Based on these findings, we discuss key recommendations for educators, policymakers, and researchers, while also addressing this study&amp;amp;rsquo;s limitations and potential directions for future research.</p>
	]]></content:encoded>

	<dc:title>IoT Devices and Their Impact on Learning: A Systematic Review of Technological and Educational Affordances</dc:title>
			<dc:creator>Dimitris Tsipianitis</dc:creator>
			<dc:creator>Anastasia Misirli</dc:creator>
			<dc:creator>Konstantinos Lavidas</dc:creator>
			<dc:creator>Vassilis Komis</dc:creator>
		<dc:identifier>doi: 10.3390/iot6030045</dc:identifier>
	<dc:source>IoT</dc:source>
	<dc:date>2025-08-07</dc:date>

	<prism:publicationName>IoT</prism:publicationName>
	<prism:publicationDate>2025-08-07</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Systematic Review</prism:section>
	<prism:startingPage>45</prism:startingPage>
		<prism:doi>10.3390/iot6030045</prism:doi>
	<prism:url>https://www.mdpi.com/2624-831X/6/3/45</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-831X/6/3/44">

	<title>IoT, Vol. 6, Pages 44: Optimizing Urban Mobility Through Complex Network Analysis and Big Data from Smart Cards</title>
	<link>https://www.mdpi.com/2624-831X/6/3/44</link>
	<description>Urban public transportation systems face increasing pressure from shifting travel patterns, rising peak-hour demand, and the need for equitable and resilient service delivery. While complex network theory has been widely applied to analyze transit systems, limited attention has been paid to behavioral segmentation within such networks. This study introduces a frequency-based framework that differentiates high-frequency (HF) and low-frequency (LF) passengers to examine how distinct user groups shape network structure, congestion vulnerability, and robustness. Using over 20 million smart-card records from Beijing&amp;amp;rsquo;s multimodal transit system, we construct and analyze directed weighted networks for HF and LF users, integrating topological metrics, temporal comparisons, and community detection. Results reveal that HF networks are densely connected but structurally fragile, exhibiting lower modularity and significantly greater efficiency loss during peak periods. In contrast, LF networks are more spatially dispersed yet resilient, maintaining stronger intracommunity stability. Peak-hour simulation shows a 70% drop in efficiency and a 99% decrease in clustering, with HF networks experiencing higher vulnerability. Based on these findings, we propose differentiated policy strategies for each user group and outline a future optimization framework constrained by budget and equity considerations. This study contributes a scalable, data-driven approach to integrating passenger behavior with network science, offering actionable insights for resilient and inclusive transit planning.</description>
	<pubDate>2025-08-06</pubDate>

	<content:encoded><![CDATA[
	<p><b>IoT, Vol. 6, Pages 44: Optimizing Urban Mobility Through Complex Network Analysis and Big Data from Smart Cards</b></p>
	<p>IoT <a href="https://www.mdpi.com/2624-831X/6/3/44">doi: 10.3390/iot6030044</a></p>
	<p>Authors:
		Li Sun
		Negin Ashrafi
		Maryam Pishgar
		</p>
	<p>Urban public transportation systems face increasing pressure from shifting travel patterns, rising peak-hour demand, and the need for equitable and resilient service delivery. While complex network theory has been widely applied to analyze transit systems, limited attention has been paid to behavioral segmentation within such networks. This study introduces a frequency-based framework that differentiates high-frequency (HF) and low-frequency (LF) passengers to examine how distinct user groups shape network structure, congestion vulnerability, and robustness. Using over 20 million smart-card records from Beijing&amp;amp;rsquo;s multimodal transit system, we construct and analyze directed weighted networks for HF and LF users, integrating topological metrics, temporal comparisons, and community detection. Results reveal that HF networks are densely connected but structurally fragile, exhibiting lower modularity and significantly greater efficiency loss during peak periods. In contrast, LF networks are more spatially dispersed yet resilient, maintaining stronger intracommunity stability. Peak-hour simulation shows a 70% drop in efficiency and a 99% decrease in clustering, with HF networks experiencing higher vulnerability. Based on these findings, we propose differentiated policy strategies for each user group and outline a future optimization framework constrained by budget and equity considerations. This study contributes a scalable, data-driven approach to integrating passenger behavior with network science, offering actionable insights for resilient and inclusive transit planning.</p>
	]]></content:encoded>

	<dc:title>Optimizing Urban Mobility Through Complex Network Analysis and Big Data from Smart Cards</dc:title>
			<dc:creator>Li Sun</dc:creator>
			<dc:creator>Negin Ashrafi</dc:creator>
			<dc:creator>Maryam Pishgar</dc:creator>
		<dc:identifier>doi: 10.3390/iot6030044</dc:identifier>
	<dc:source>IoT</dc:source>
	<dc:date>2025-08-06</dc:date>

	<prism:publicationName>IoT</prism:publicationName>
	<prism:publicationDate>2025-08-06</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>44</prism:startingPage>
		<prism:doi>10.3390/iot6030044</prism:doi>
	<prism:url>https://www.mdpi.com/2624-831X/6/3/44</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-831X/6/3/43">

	<title>IoT, Vol. 6, Pages 43: The Role of IoT in Enhancing Sports Analytics: A Bibliometric Perspective</title>
	<link>https://www.mdpi.com/2624-831X/6/3/43</link>
	<description>The use of Internet of Things (IoT) for sports innovation has transformed the way athletes train, compete, and recover in any sports activity. This study performs a bibliometric analysis to examine research trends, collaborations, and publications in the realm of IoT and Sports. Our analysis included 780 Scopus articles and 150 WoS articles published during 2012&amp;amp;ndash;2025, and duplicates were removed. We analyzed and visualized the bibliometric data using R version 3.6.1, VOSviewer version 1.6.20, and the bibliometrix library. The study provides insights from a bibliometric analysis, showcasing the allocation of topics, scientific contributions, patterns of co-authorship, prominent authors and their productivity over time, notable terms, key sources, publications with citations, analysis of citations, source-specific citation analysis, yearly publication patterns, and the distribution of research papers. The results indicate that China and India have the leading scientific production in the development of IoT and Sports research, with prominent authors like Anton Umek, Anton Kos, and Emiliano Schena making significant contributions. Wearable technology and wearable sensors are the most trending topics in IoT and Sports, followed by medical sciences and artificial intelligence paradigms. The analysis also emphasizes the importance of open-access journals like &amp;amp;lsquo;Journal of Physics: Conference Series&amp;amp;rsquo; and &amp;amp;lsquo;IEEE Access&amp;amp;rsquo; for their contributions to IoT and Sports research. Future research directions focus on enhancing effective, lightweight, and efficient wearable devices while implementing technologies like edge computing and lightweight AI in wearable technologies.</description>
	<pubDate>2025-07-31</pubDate>

	<content:encoded><![CDATA[
	<p><b>IoT, Vol. 6, Pages 43: The Role of IoT in Enhancing Sports Analytics: A Bibliometric Perspective</b></p>
	<p>IoT <a href="https://www.mdpi.com/2624-831X/6/3/43">doi: 10.3390/iot6030043</a></p>
	<p>Authors:
		Yuvanshankar Azhagumurugan
		Jawahar Sundaram
		Zenith Dewamuni
		 Pritika
		Yakub Sebastian
		Bharanidharan Shanmugam
		</p>
	<p>The use of Internet of Things (IoT) for sports innovation has transformed the way athletes train, compete, and recover in any sports activity. This study performs a bibliometric analysis to examine research trends, collaborations, and publications in the realm of IoT and Sports. Our analysis included 780 Scopus articles and 150 WoS articles published during 2012&amp;amp;ndash;2025, and duplicates were removed. We analyzed and visualized the bibliometric data using R version 3.6.1, VOSviewer version 1.6.20, and the bibliometrix library. The study provides insights from a bibliometric analysis, showcasing the allocation of topics, scientific contributions, patterns of co-authorship, prominent authors and their productivity over time, notable terms, key sources, publications with citations, analysis of citations, source-specific citation analysis, yearly publication patterns, and the distribution of research papers. The results indicate that China and India have the leading scientific production in the development of IoT and Sports research, with prominent authors like Anton Umek, Anton Kos, and Emiliano Schena making significant contributions. Wearable technology and wearable sensors are the most trending topics in IoT and Sports, followed by medical sciences and artificial intelligence paradigms. The analysis also emphasizes the importance of open-access journals like &amp;amp;lsquo;Journal of Physics: Conference Series&amp;amp;rsquo; and &amp;amp;lsquo;IEEE Access&amp;amp;rsquo; for their contributions to IoT and Sports research. Future research directions focus on enhancing effective, lightweight, and efficient wearable devices while implementing technologies like edge computing and lightweight AI in wearable technologies.</p>
	]]></content:encoded>

	<dc:title>The Role of IoT in Enhancing Sports Analytics: A Bibliometric Perspective</dc:title>
			<dc:creator>Yuvanshankar Azhagumurugan</dc:creator>
			<dc:creator>Jawahar Sundaram</dc:creator>
			<dc:creator>Zenith Dewamuni</dc:creator>
			<dc:creator> Pritika</dc:creator>
			<dc:creator>Yakub Sebastian</dc:creator>
			<dc:creator>Bharanidharan Shanmugam</dc:creator>
		<dc:identifier>doi: 10.3390/iot6030043</dc:identifier>
	<dc:source>IoT</dc:source>
	<dc:date>2025-07-31</dc:date>

	<prism:publicationName>IoT</prism:publicationName>
	<prism:publicationDate>2025-07-31</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Review</prism:section>
	<prism:startingPage>43</prism:startingPage>
		<prism:doi>10.3390/iot6030043</prism:doi>
	<prism:url>https://www.mdpi.com/2624-831X/6/3/43</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-831X/6/3/42">

	<title>IoT, Vol. 6, Pages 42: A No-Code Educational Platform for Introducing Internet of Things and Its Application to Agricultural Education</title>
	<link>https://www.mdpi.com/2624-831X/6/3/42</link>
	<description>This study introduces a no-code educational platform created to introduce Internet of Things (IoT) to university students who lack programming experience. The platform allows users to set IoT sensor nodes, and create a wireless sensor network through a simple graphical interface. Sensors&amp;amp;rsquo; data can be sent to cloud services but they can also be stored locally, which makes our platform particularly realistic in fieldwork settings where internet access may be limited. The platform was tested in a pilot activity within a university course that previously covered IoT only in theory and was evaluated using the Technology Acceptance Model (TAM). Results showed strong student engagement and high ratings for ease of use, usefulness, and future use intent. These findings suggest that a no-code approach can effectively bridge the gap between IoT technologies and learners in non-engineering fields.</description>
	<pubDate>2025-07-31</pubDate>

	<content:encoded><![CDATA[
	<p><b>IoT, Vol. 6, Pages 42: A No-Code Educational Platform for Introducing Internet of Things and Its Application to Agricultural Education</b></p>
	<p>IoT <a href="https://www.mdpi.com/2624-831X/6/3/42">doi: 10.3390/iot6030042</a></p>
	<p>Authors:
		George Lagogiannis
		Avraam Chatzopoulos
		</p>
	<p>This study introduces a no-code educational platform created to introduce Internet of Things (IoT) to university students who lack programming experience. The platform allows users to set IoT sensor nodes, and create a wireless sensor network through a simple graphical interface. Sensors&amp;amp;rsquo; data can be sent to cloud services but they can also be stored locally, which makes our platform particularly realistic in fieldwork settings where internet access may be limited. The platform was tested in a pilot activity within a university course that previously covered IoT only in theory and was evaluated using the Technology Acceptance Model (TAM). Results showed strong student engagement and high ratings for ease of use, usefulness, and future use intent. These findings suggest that a no-code approach can effectively bridge the gap between IoT technologies and learners in non-engineering fields.</p>
	]]></content:encoded>

	<dc:title>A No-Code Educational Platform for Introducing Internet of Things and Its Application to Agricultural Education</dc:title>
			<dc:creator>George Lagogiannis</dc:creator>
			<dc:creator>Avraam Chatzopoulos</dc:creator>
		<dc:identifier>doi: 10.3390/iot6030042</dc:identifier>
	<dc:source>IoT</dc:source>
	<dc:date>2025-07-31</dc:date>

	<prism:publicationName>IoT</prism:publicationName>
	<prism:publicationDate>2025-07-31</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>42</prism:startingPage>
		<prism:doi>10.3390/iot6030042</prism:doi>
	<prism:url>https://www.mdpi.com/2624-831X/6/3/42</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-831X/6/3/41">

	<title>IoT, Vol. 6, Pages 41: Enhancing IoT Connectivity in Suburban and Rural Terrains Through Optimized Propagation Models Using Convolutional Neural Networks</title>
	<link>https://www.mdpi.com/2624-831X/6/3/41</link>
	<description>The widespread adoption of the Internet of Things (IoT) has driven major advancements in wireless communication, especially in rural and suburban areas where low population density and limited infrastructure pose significant challenges. Accurate Path Loss (PL) prediction is critical for the effective deployment and operation of Wireless Sensor Networks (WSNs) in such environments. This study explores the use of Convolutional Neural Networks (CNNs) for PL modeling, utilizing a comprehensive dataset collected in a smart campus setting that captures the influence of terrain and environmental variations. Several CNN architectures were evaluated based on different combinations of input features&amp;amp;mdash;such as distance, elevation, clutter height, and altitude&amp;amp;mdash;to assess their predictive accuracy. The findings reveal that CNN-based models outperform traditional propagation models (Free Space Path Loss (FSPL), Okumura&amp;amp;ndash;Hata, COST 231, Log-Distance), achieving lower error rates and more precise PL estimations. The best performing CNN configuration, using only distance and elevation, highlights the value of terrain-aware modeling. These results underscore the potential of deep learning techniques to enhance IoT connectivity in sparsely connected regions and support the development of more resilient communication infrastructures.</description>
	<pubDate>2025-07-31</pubDate>

	<content:encoded><![CDATA[
	<p><b>IoT, Vol. 6, Pages 41: Enhancing IoT Connectivity in Suburban and Rural Terrains Through Optimized Propagation Models Using Convolutional Neural Networks</b></p>
	<p>IoT <a href="https://www.mdpi.com/2624-831X/6/3/41">doi: 10.3390/iot6030041</a></p>
	<p>Authors:
		George Papastergiou
		Apostolos Xenakis
		Costas Chaikalis
		Dimitrios Kosmanos
		Menelaos Panagiotis Papastergiou
		</p>
	<p>The widespread adoption of the Internet of Things (IoT) has driven major advancements in wireless communication, especially in rural and suburban areas where low population density and limited infrastructure pose significant challenges. Accurate Path Loss (PL) prediction is critical for the effective deployment and operation of Wireless Sensor Networks (WSNs) in such environments. This study explores the use of Convolutional Neural Networks (CNNs) for PL modeling, utilizing a comprehensive dataset collected in a smart campus setting that captures the influence of terrain and environmental variations. Several CNN architectures were evaluated based on different combinations of input features&amp;amp;mdash;such as distance, elevation, clutter height, and altitude&amp;amp;mdash;to assess their predictive accuracy. The findings reveal that CNN-based models outperform traditional propagation models (Free Space Path Loss (FSPL), Okumura&amp;amp;ndash;Hata, COST 231, Log-Distance), achieving lower error rates and more precise PL estimations. The best performing CNN configuration, using only distance and elevation, highlights the value of terrain-aware modeling. These results underscore the potential of deep learning techniques to enhance IoT connectivity in sparsely connected regions and support the development of more resilient communication infrastructures.</p>
	]]></content:encoded>

	<dc:title>Enhancing IoT Connectivity in Suburban and Rural Terrains Through Optimized Propagation Models Using Convolutional Neural Networks</dc:title>
			<dc:creator>George Papastergiou</dc:creator>
			<dc:creator>Apostolos Xenakis</dc:creator>
			<dc:creator>Costas Chaikalis</dc:creator>
			<dc:creator>Dimitrios Kosmanos</dc:creator>
			<dc:creator>Menelaos Panagiotis Papastergiou</dc:creator>
		<dc:identifier>doi: 10.3390/iot6030041</dc:identifier>
	<dc:source>IoT</dc:source>
	<dc:date>2025-07-31</dc:date>

	<prism:publicationName>IoT</prism:publicationName>
	<prism:publicationDate>2025-07-31</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>41</prism:startingPage>
		<prism:doi>10.3390/iot6030041</prism:doi>
	<prism:url>https://www.mdpi.com/2624-831X/6/3/41</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-831X/6/3/40">

	<title>IoT, Vol. 6, Pages 40: Evaluating the Energy Costs of SHA-256 and SHA-3 (KangarooTwelve) in Resource-Constrained IoT Devices</title>
	<link>https://www.mdpi.com/2624-831X/6/3/40</link>
	<description>The rapid expansion of Internet of Things (IoT) devices has heightened the demand for lightweight and secure cryptographic mechanisms suitable for resource-constrained environments. While SHA-256 remains a widely used standard, the emergence of SHA-3 particularly the KangarooTwelve variant offers potential benefits in flexibility and post-quantum resilience for lightweight resource-constrained devices. This paper presents a comparative evaluation of the energy costs associated with SHA-256 and SHA-3 hashing in Contiki 3.0, using three generationally distinct IoT platforms: Sky Mote, Z1 Mote, and Wismote. Unlike previous studies that rely on hardware acceleration or limited scope, our work conducts a uniform, software-only analysis across all motes, employing consistent radio duty cycling, ContikiMAC (a low-power Medium Access Control protocol) and isolating the cryptographic workload from network overhead. The empirical results from the Cooja simulator reveal that while SHA-3 provides advanced security features, it incurs significantly higher CPU and, in some cases, radio energy costs particularly on legacy hardware. However, modern platforms like Wismote demonstrate a more balanced trade-off, making SHA-3 viable in higher-capability deployments. These findings offer actionable guidance for designers of secure IoT systems, highlighting the practical implications of cryptographic selection in energy-sensitive environments.</description>
	<pubDate>2025-07-11</pubDate>

	<content:encoded><![CDATA[
	<p><b>IoT, Vol. 6, Pages 40: Evaluating the Energy Costs of SHA-256 and SHA-3 (KangarooTwelve) in Resource-Constrained IoT Devices</b></p>
	<p>IoT <a href="https://www.mdpi.com/2624-831X/6/3/40">doi: 10.3390/iot6030040</a></p>
	<p>Authors:
		Iain Baird
		Isam Wadhaj
		Baraq Ghaleb
		Craig Thomson
		Gordon Russell
		</p>
	<p>The rapid expansion of Internet of Things (IoT) devices has heightened the demand for lightweight and secure cryptographic mechanisms suitable for resource-constrained environments. While SHA-256 remains a widely used standard, the emergence of SHA-3 particularly the KangarooTwelve variant offers potential benefits in flexibility and post-quantum resilience for lightweight resource-constrained devices. This paper presents a comparative evaluation of the energy costs associated with SHA-256 and SHA-3 hashing in Contiki 3.0, using three generationally distinct IoT platforms: Sky Mote, Z1 Mote, and Wismote. Unlike previous studies that rely on hardware acceleration or limited scope, our work conducts a uniform, software-only analysis across all motes, employing consistent radio duty cycling, ContikiMAC (a low-power Medium Access Control protocol) and isolating the cryptographic workload from network overhead. The empirical results from the Cooja simulator reveal that while SHA-3 provides advanced security features, it incurs significantly higher CPU and, in some cases, radio energy costs particularly on legacy hardware. However, modern platforms like Wismote demonstrate a more balanced trade-off, making SHA-3 viable in higher-capability deployments. These findings offer actionable guidance for designers of secure IoT systems, highlighting the practical implications of cryptographic selection in energy-sensitive environments.</p>
	]]></content:encoded>

	<dc:title>Evaluating the Energy Costs of SHA-256 and SHA-3 (KangarooTwelve) in Resource-Constrained IoT Devices</dc:title>
			<dc:creator>Iain Baird</dc:creator>
			<dc:creator>Isam Wadhaj</dc:creator>
			<dc:creator>Baraq Ghaleb</dc:creator>
			<dc:creator>Craig Thomson</dc:creator>
			<dc:creator>Gordon Russell</dc:creator>
		<dc:identifier>doi: 10.3390/iot6030040</dc:identifier>
	<dc:source>IoT</dc:source>
	<dc:date>2025-07-11</dc:date>

	<prism:publicationName>IoT</prism:publicationName>
	<prism:publicationDate>2025-07-11</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>40</prism:startingPage>
		<prism:doi>10.3390/iot6030040</prism:doi>
	<prism:url>https://www.mdpi.com/2624-831X/6/3/40</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-831X/6/3/39">

	<title>IoT, Vol. 6, Pages 39: An Adaptive Holt&amp;ndash;Winters Model for Seasonal Forecasting of Internet of Things (IoT) Data Streams</title>
	<link>https://www.mdpi.com/2624-831X/6/3/39</link>
	<description>In various applications, IoT temporal data play a crucial role in accurately predicting future trends. Traditional models, including Rolling Window, SVR-RBF, and ARIMA, suffer from a potential accuracy decrease because they generally use all available data or the most recent data window during training, which can result in the inclusion of noisy data. To address this critical issue, this paper proposes a new forecasting technique called Adaptive Holt&amp;amp;ndash;Winters (AHW). The AHW approach utilizes two models grounded in an exponential smoothing methodology. The first model is trained on the most current data window, whereas the second extracts information from a historical data segment exhibiting patterns most analogous to the present. The outputs of the two models are then combined, demonstrating enhanced prediction precision since the focus is on the relevant data patterns. The effectiveness of the AHW model is evaluated against well-known models (Rolling Window, SVR-RBF, ARIMA, LSTM, CNN, RNN, and Holt&amp;amp;ndash;Winters), utilizing various metrics, such as RMSE, MAE, p-value, and time performance. A comprehensive evaluation covers various real-world datasets at different granularities (daily and monthly), including temperature from the National Climatic Data Center (NCDC), humidity and soil moisture measurements from the Basel City environmental system, and global intensity and global reactive power from the Individual Household Electric Power Consumption (IHEPC) dataset. The evaluation results demonstrate that AHW constantly attains higher forecasting accuracy across the tested datasets compared to other models. This indicates the efficacy of AHW in leveraging pertinent data patterns for enhanced predictive precision, offering a robust solution for temporal IoT data forecasting.</description>
	<pubDate>2025-07-10</pubDate>

	<content:encoded><![CDATA[
	<p><b>IoT, Vol. 6, Pages 39: An Adaptive Holt&amp;ndash;Winters Model for Seasonal Forecasting of Internet of Things (IoT) Data Streams</b></p>
	<p>IoT <a href="https://www.mdpi.com/2624-831X/6/3/39">doi: 10.3390/iot6030039</a></p>
	<p>Authors:
		Samer Sawalha
		Ghazi Al-Naymat
		</p>
	<p>In various applications, IoT temporal data play a crucial role in accurately predicting future trends. Traditional models, including Rolling Window, SVR-RBF, and ARIMA, suffer from a potential accuracy decrease because they generally use all available data or the most recent data window during training, which can result in the inclusion of noisy data. To address this critical issue, this paper proposes a new forecasting technique called Adaptive Holt&amp;amp;ndash;Winters (AHW). The AHW approach utilizes two models grounded in an exponential smoothing methodology. The first model is trained on the most current data window, whereas the second extracts information from a historical data segment exhibiting patterns most analogous to the present. The outputs of the two models are then combined, demonstrating enhanced prediction precision since the focus is on the relevant data patterns. The effectiveness of the AHW model is evaluated against well-known models (Rolling Window, SVR-RBF, ARIMA, LSTM, CNN, RNN, and Holt&amp;amp;ndash;Winters), utilizing various metrics, such as RMSE, MAE, p-value, and time performance. A comprehensive evaluation covers various real-world datasets at different granularities (daily and monthly), including temperature from the National Climatic Data Center (NCDC), humidity and soil moisture measurements from the Basel City environmental system, and global intensity and global reactive power from the Individual Household Electric Power Consumption (IHEPC) dataset. The evaluation results demonstrate that AHW constantly attains higher forecasting accuracy across the tested datasets compared to other models. This indicates the efficacy of AHW in leveraging pertinent data patterns for enhanced predictive precision, offering a robust solution for temporal IoT data forecasting.</p>
	]]></content:encoded>

	<dc:title>An Adaptive Holt&amp;amp;ndash;Winters Model for Seasonal Forecasting of Internet of Things (IoT) Data Streams</dc:title>
			<dc:creator>Samer Sawalha</dc:creator>
			<dc:creator>Ghazi Al-Naymat</dc:creator>
		<dc:identifier>doi: 10.3390/iot6030039</dc:identifier>
	<dc:source>IoT</dc:source>
	<dc:date>2025-07-10</dc:date>

	<prism:publicationName>IoT</prism:publicationName>
	<prism:publicationDate>2025-07-10</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>39</prism:startingPage>
		<prism:doi>10.3390/iot6030039</prism:doi>
	<prism:url>https://www.mdpi.com/2624-831X/6/3/39</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-831X/6/3/38">

	<title>IoT, Vol. 6, Pages 38: Secure and Efficient Video Management: A Novel Framework for CCTV Surveillance Systems</title>
	<link>https://www.mdpi.com/2624-831X/6/3/38</link>
	<description>This paper presents a novel video encoding and decoding method aimed at enhancing security and reducing storage requirements, particularly for CCTV systems. The technique merges two video streams of matching frame dimensions into a single stream, optimizing disk space usage without compromising video quality. The combined video is secured using an advanced encryption standard (AES)-based shift algorithm that rearranges pixel positions, preventing unauthorized access. During decoding, the AES shift is reversed, enabling precise reconstruction of the original videos. This approach provides a space-efficient and secure solution for managing multiple video feeds while ensuring accurate recovery of the original content. The experimental results demonstrate that the transmission time for the encoded video is consistently shorter compared to transmitting the video streams separately. This, in turn, leads to about 54% reduction in energy consumption across diverse outdoor and indoor video datasets, highlighting significant improvements in both transmission efficiency and energy savings by our proposed scheme.</description>
	<pubDate>2025-07-04</pubDate>

	<content:encoded><![CDATA[
	<p><b>IoT, Vol. 6, Pages 38: Secure and Efficient Video Management: A Novel Framework for CCTV Surveillance Systems</b></p>
	<p>IoT <a href="https://www.mdpi.com/2624-831X/6/3/38">doi: 10.3390/iot6030038</a></p>
	<p>Authors:
		Swarnalatha Camalapuram Subramanyam
		Ansuman Bhattacharya
		Koushik Sinha
		</p>
	<p>This paper presents a novel video encoding and decoding method aimed at enhancing security and reducing storage requirements, particularly for CCTV systems. The technique merges two video streams of matching frame dimensions into a single stream, optimizing disk space usage without compromising video quality. The combined video is secured using an advanced encryption standard (AES)-based shift algorithm that rearranges pixel positions, preventing unauthorized access. During decoding, the AES shift is reversed, enabling precise reconstruction of the original videos. This approach provides a space-efficient and secure solution for managing multiple video feeds while ensuring accurate recovery of the original content. The experimental results demonstrate that the transmission time for the encoded video is consistently shorter compared to transmitting the video streams separately. This, in turn, leads to about 54% reduction in energy consumption across diverse outdoor and indoor video datasets, highlighting significant improvements in both transmission efficiency and energy savings by our proposed scheme.</p>
	]]></content:encoded>

	<dc:title>Secure and Efficient Video Management: A Novel Framework for CCTV Surveillance Systems</dc:title>
			<dc:creator>Swarnalatha Camalapuram Subramanyam</dc:creator>
			<dc:creator>Ansuman Bhattacharya</dc:creator>
			<dc:creator>Koushik Sinha</dc:creator>
		<dc:identifier>doi: 10.3390/iot6030038</dc:identifier>
	<dc:source>IoT</dc:source>
	<dc:date>2025-07-04</dc:date>

	<prism:publicationName>IoT</prism:publicationName>
	<prism:publicationDate>2025-07-04</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>38</prism:startingPage>
		<prism:doi>10.3390/iot6030038</prism:doi>
	<prism:url>https://www.mdpi.com/2624-831X/6/3/38</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-831X/6/3/37">

	<title>IoT, Vol. 6, Pages 37: Using Blockchain Ledgers to Record AI Decisions in IoT</title>
	<link>https://www.mdpi.com/2624-831X/6/3/37</link>
	<description>The rapid integration of AI into IoT systems has outpaced the ability to explain and audit automated decisions, resulting in a serious transparency gap. We address this challenge by proposing a blockchain-based framework to create immutable audit trails of AI-driven IoT decisions. In our approach, each AI inference comprising key inputs, model ID, and output is logged to a permissioned blockchain ledger, ensuring that every decision is traceable and auditable. IoT devices and edge gateways submit cryptographically signed decision records via smart contracts, resulting in an immutable, timestamped log that is tamper-resistant. This decentralized approach guarantees non-repudiation and data integrity while balancing transparency with privacy (e.g., hashing personal data on-chain) to meet data protection norms. Our design aligns with emerging regulations, such as the EU AI Act&amp;amp;rsquo;s logging mandate and GDPR&amp;amp;rsquo;s transparency requirements. We demonstrate the framework&amp;amp;rsquo;s applicability in two domains: healthcare IoT (logging diagnostic AI alerts for accountability) and industrial IoT (tracking autonomous control actions), showing its generalizability to high-stakes environments. Our contributions include the following: (1) a novel architecture for AI decision provenance in IoT, (2) a blockchain-based design to securely record AI decision-making processes, and (3) a simulation informed performance assessment based on projected metrics (throughput, latency, and storage) to assess the approach&amp;amp;rsquo;s feasibility. By providing a reliable immutable audit trail for AI in IoT, our framework enhances transparency and trust in autonomous systems and offers a much-needed mechanism for auditable AI under increasing regulatory scrutiny.</description>
	<pubDate>2025-07-03</pubDate>

	<content:encoded><![CDATA[
	<p><b>IoT, Vol. 6, Pages 37: Using Blockchain Ledgers to Record AI Decisions in IoT</b></p>
	<p>IoT <a href="https://www.mdpi.com/2624-831X/6/3/37">doi: 10.3390/iot6030037</a></p>
	<p>Authors:
		Vikram Kulothungan
		</p>
	<p>The rapid integration of AI into IoT systems has outpaced the ability to explain and audit automated decisions, resulting in a serious transparency gap. We address this challenge by proposing a blockchain-based framework to create immutable audit trails of AI-driven IoT decisions. In our approach, each AI inference comprising key inputs, model ID, and output is logged to a permissioned blockchain ledger, ensuring that every decision is traceable and auditable. IoT devices and edge gateways submit cryptographically signed decision records via smart contracts, resulting in an immutable, timestamped log that is tamper-resistant. This decentralized approach guarantees non-repudiation and data integrity while balancing transparency with privacy (e.g., hashing personal data on-chain) to meet data protection norms. Our design aligns with emerging regulations, such as the EU AI Act&amp;amp;rsquo;s logging mandate and GDPR&amp;amp;rsquo;s transparency requirements. We demonstrate the framework&amp;amp;rsquo;s applicability in two domains: healthcare IoT (logging diagnostic AI alerts for accountability) and industrial IoT (tracking autonomous control actions), showing its generalizability to high-stakes environments. Our contributions include the following: (1) a novel architecture for AI decision provenance in IoT, (2) a blockchain-based design to securely record AI decision-making processes, and (3) a simulation informed performance assessment based on projected metrics (throughput, latency, and storage) to assess the approach&amp;amp;rsquo;s feasibility. By providing a reliable immutable audit trail for AI in IoT, our framework enhances transparency and trust in autonomous systems and offers a much-needed mechanism for auditable AI under increasing regulatory scrutiny.</p>
	]]></content:encoded>

	<dc:title>Using Blockchain Ledgers to Record AI Decisions in IoT</dc:title>
			<dc:creator>Vikram Kulothungan</dc:creator>
		<dc:identifier>doi: 10.3390/iot6030037</dc:identifier>
	<dc:source>IoT</dc:source>
	<dc:date>2025-07-03</dc:date>

	<prism:publicationName>IoT</prism:publicationName>
	<prism:publicationDate>2025-07-03</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>37</prism:startingPage>
		<prism:doi>10.3390/iot6030037</prism:doi>
	<prism:url>https://www.mdpi.com/2624-831X/6/3/37</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-831X/6/3/36">

	<title>IoT, Vol. 6, Pages 36: Construction Worker Activity Recognition Using Deep Residual Convolutional Network Based on Fused IMU Sensor Data in Internet-of-Things Environment</title>
	<link>https://www.mdpi.com/2624-831X/6/3/36</link>
	<description>With the advent of Industry 4.0, sensor-based human activity recognition has become increasingly vital for improving worker safety, enhancing operational efficiency, and optimizing workflows in Internet-of-Things (IoT) environments. This study introduces a novel deep learning-based framework for construction worker activity recognition, employing a deep residual convolutional neural network (ResNet) architecture integrated with multi-sensor fusion techniques. The proposed system processes data from multiple inertial measurement unit sensors strategically positioned on workers&amp;amp;rsquo; bodies to identify and classify construction-related activities accurately. A comprehensive pre-processing pipeline is implemented, incorporating Butterworth filtering for noise suppression, data normalization, and an adaptive sliding window mechanism for temporal segmentation. Experimental validation is conducted using the publicly available VTT-ConIoT dataset, which includes recordings of 16 construction activities performed by 13 participants in a controlled laboratory setting. The results demonstrate that the ResNet-based sensor fusion approach outperforms traditional single-sensor models and other deep learning methods. The system achieves classification accuracies of 97.32% for binary discrimination between recommended and non-recommended activities, 97.14% for categorizing six core task types, and 98.68% for detailed classification across sixteen individual activities. Optimal performance is consistently obtained with a 4-second window size, balancing recognition accuracy with computational efficiency. Although the hand-mounted sensor proved to be the most effective as a standalone unit, multi-sensor configurations delivered significantly higher accuracy, particularly in complex classification tasks. The proposed approach demonstrates strong potential for real-world applications, offering robust performance across diverse working conditions while maintaining computational feasibility for IoT deployment. This work advances the field of innovative construction by presenting a practical solution for real-time worker activity monitoring, which can be seamlessly integrated into existing IoT infrastructures to promote workplace safety, streamline construction processes, and support data-driven management decisions.</description>
	<pubDate>2025-06-28</pubDate>

	<content:encoded><![CDATA[
	<p><b>IoT, Vol. 6, Pages 36: Construction Worker Activity Recognition Using Deep Residual Convolutional Network Based on Fused IMU Sensor Data in Internet-of-Things Environment</b></p>
	<p>IoT <a href="https://www.mdpi.com/2624-831X/6/3/36">doi: 10.3390/iot6030036</a></p>
	<p>Authors:
		Sakorn Mekruksavanich
		Anuchit Jitpattanakul
		</p>
	<p>With the advent of Industry 4.0, sensor-based human activity recognition has become increasingly vital for improving worker safety, enhancing operational efficiency, and optimizing workflows in Internet-of-Things (IoT) environments. This study introduces a novel deep learning-based framework for construction worker activity recognition, employing a deep residual convolutional neural network (ResNet) architecture integrated with multi-sensor fusion techniques. The proposed system processes data from multiple inertial measurement unit sensors strategically positioned on workers&amp;amp;rsquo; bodies to identify and classify construction-related activities accurately. A comprehensive pre-processing pipeline is implemented, incorporating Butterworth filtering for noise suppression, data normalization, and an adaptive sliding window mechanism for temporal segmentation. Experimental validation is conducted using the publicly available VTT-ConIoT dataset, which includes recordings of 16 construction activities performed by 13 participants in a controlled laboratory setting. The results demonstrate that the ResNet-based sensor fusion approach outperforms traditional single-sensor models and other deep learning methods. The system achieves classification accuracies of 97.32% for binary discrimination between recommended and non-recommended activities, 97.14% for categorizing six core task types, and 98.68% for detailed classification across sixteen individual activities. Optimal performance is consistently obtained with a 4-second window size, balancing recognition accuracy with computational efficiency. Although the hand-mounted sensor proved to be the most effective as a standalone unit, multi-sensor configurations delivered significantly higher accuracy, particularly in complex classification tasks. The proposed approach demonstrates strong potential for real-world applications, offering robust performance across diverse working conditions while maintaining computational feasibility for IoT deployment. This work advances the field of innovative construction by presenting a practical solution for real-time worker activity monitoring, which can be seamlessly integrated into existing IoT infrastructures to promote workplace safety, streamline construction processes, and support data-driven management decisions.</p>
	]]></content:encoded>

	<dc:title>Construction Worker Activity Recognition Using Deep Residual Convolutional Network Based on Fused IMU Sensor Data in Internet-of-Things Environment</dc:title>
			<dc:creator>Sakorn Mekruksavanich</dc:creator>
			<dc:creator>Anuchit Jitpattanakul</dc:creator>
		<dc:identifier>doi: 10.3390/iot6030036</dc:identifier>
	<dc:source>IoT</dc:source>
	<dc:date>2025-06-28</dc:date>

	<prism:publicationName>IoT</prism:publicationName>
	<prism:publicationDate>2025-06-28</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>36</prism:startingPage>
		<prism:doi>10.3390/iot6030036</prism:doi>
	<prism:url>https://www.mdpi.com/2624-831X/6/3/36</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-831X/6/3/35">

	<title>IoT, Vol. 6, Pages 35: Data-Bound Adaptive Federated Learning: FedAdaDB</title>
	<link>https://www.mdpi.com/2624-831X/6/3/35</link>
	<description>Federated Learning (FL) enables decentralized Machine Learning (ML), focusing on preserving data privacy, but faces a unique set of optimization challenges, such as dealing with non-IID data, communication overhead, and client drift. Adaptive optimizers like AdaGrad, Adam, and Adam variations have been applied in FL, showing good results in convergence speed and accuracy. However, it can be quite challenging to combine good convergence, model generalization, and stability in an FL setup. Data-bound adaptive methods like AdaDB have demonstrated promising results in centralized settings by incorporating dynamic, data-dependent bounds on Learning Rates (LRs). In this paper, FedAdaDB is introduced, which is an FL version of AdaDB aiming to address the aforementioned challenges. FedAdaDB uses the AdaDB optimizer at the server-side to dynamically adjust LR bounds based on the aggregated client updates. Extensive experiments have been conducted comparing FedAdaDB with FedAvg and FedAdam on three different datasets (EMNIST, CIFAR100, and Shakespeare). The results show that FedAdaDB consistently offers better and more robust outcomes, in terms of the measured final validation accuracy across all datasets, for a trade-off of a small delay in the convergence speed at an early stage.</description>
	<pubDate>2025-06-24</pubDate>

	<content:encoded><![CDATA[
	<p><b>IoT, Vol. 6, Pages 35: Data-Bound Adaptive Federated Learning: FedAdaDB</b></p>
	<p>IoT <a href="https://www.mdpi.com/2624-831X/6/3/35">doi: 10.3390/iot6030035</a></p>
	<p>Authors:
		Fotios Zantalis
		Grigorios Koulouras
		</p>
	<p>Federated Learning (FL) enables decentralized Machine Learning (ML), focusing on preserving data privacy, but faces a unique set of optimization challenges, such as dealing with non-IID data, communication overhead, and client drift. Adaptive optimizers like AdaGrad, Adam, and Adam variations have been applied in FL, showing good results in convergence speed and accuracy. However, it can be quite challenging to combine good convergence, model generalization, and stability in an FL setup. Data-bound adaptive methods like AdaDB have demonstrated promising results in centralized settings by incorporating dynamic, data-dependent bounds on Learning Rates (LRs). In this paper, FedAdaDB is introduced, which is an FL version of AdaDB aiming to address the aforementioned challenges. FedAdaDB uses the AdaDB optimizer at the server-side to dynamically adjust LR bounds based on the aggregated client updates. Extensive experiments have been conducted comparing FedAdaDB with FedAvg and FedAdam on three different datasets (EMNIST, CIFAR100, and Shakespeare). The results show that FedAdaDB consistently offers better and more robust outcomes, in terms of the measured final validation accuracy across all datasets, for a trade-off of a small delay in the convergence speed at an early stage.</p>
	]]></content:encoded>

	<dc:title>Data-Bound Adaptive Federated Learning: FedAdaDB</dc:title>
			<dc:creator>Fotios Zantalis</dc:creator>
			<dc:creator>Grigorios Koulouras</dc:creator>
		<dc:identifier>doi: 10.3390/iot6030035</dc:identifier>
	<dc:source>IoT</dc:source>
	<dc:date>2025-06-24</dc:date>

	<prism:publicationName>IoT</prism:publicationName>
	<prism:publicationDate>2025-06-24</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>35</prism:startingPage>
		<prism:doi>10.3390/iot6030035</prism:doi>
	<prism:url>https://www.mdpi.com/2624-831X/6/3/35</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-831X/6/3/34">

	<title>IoT, Vol. 6, Pages 34: MQTT Broker Architectural Enhancements for High-Performance P2P Messaging: TBMQ Scalability and Reliability in Distributed IoT Systems</title>
	<link>https://www.mdpi.com/2624-831X/6/3/34</link>
	<description>The Message Queuing Telemetry Transport (MQTT) protocol remains a key enabler for lightweight and low-latency messaging in Internet of Things (IoT) applications. However, traditional broker implementations often struggle with the demands of large-scale point-to-point (P2P) communication. This paper presents a performance and architectural evaluation of TBMQ, an open source MQTT broker designed to support reliable P2P messaging at scale. The broker employs Redis Cluster for session persistence and Apache Kafka for message routing. Additional optimizations include asynchronous Redis access via Lettuce and Lua-based atomic operations. Stepwise load testing was performed using Kubernetes-based deployments on Amazon EKS, progressively increasing message rates to 1 million messages per second (msg/s). The results demonstrate that TBMQ achieves linear scalability and stable latency as the load increases. It reaches an average throughput of 8900 msg/s per CPU core, while maintaining end-to-end delivery latency within two-digit millisecond bounds. These findings confirm that TBMQ&amp;amp;rsquo;s architecture provides an effective foundation for reliable, high-throughput messaging in distributed IoT systems.</description>
	<pubDate>2025-06-23</pubDate>

	<content:encoded><![CDATA[
	<p><b>IoT, Vol. 6, Pages 34: MQTT Broker Architectural Enhancements for High-Performance P2P Messaging: TBMQ Scalability and Reliability in Distributed IoT Systems</b></p>
	<p>IoT <a href="https://www.mdpi.com/2624-831X/6/3/34">doi: 10.3390/iot6030034</a></p>
	<p>Authors:
		Dmytro Shvaika
		Andrii Shvaika
		Volodymyr Artemchuk
		</p>
	<p>The Message Queuing Telemetry Transport (MQTT) protocol remains a key enabler for lightweight and low-latency messaging in Internet of Things (IoT) applications. However, traditional broker implementations often struggle with the demands of large-scale point-to-point (P2P) communication. This paper presents a performance and architectural evaluation of TBMQ, an open source MQTT broker designed to support reliable P2P messaging at scale. The broker employs Redis Cluster for session persistence and Apache Kafka for message routing. Additional optimizations include asynchronous Redis access via Lettuce and Lua-based atomic operations. Stepwise load testing was performed using Kubernetes-based deployments on Amazon EKS, progressively increasing message rates to 1 million messages per second (msg/s). The results demonstrate that TBMQ achieves linear scalability and stable latency as the load increases. It reaches an average throughput of 8900 msg/s per CPU core, while maintaining end-to-end delivery latency within two-digit millisecond bounds. These findings confirm that TBMQ&amp;amp;rsquo;s architecture provides an effective foundation for reliable, high-throughput messaging in distributed IoT systems.</p>
	]]></content:encoded>

	<dc:title>MQTT Broker Architectural Enhancements for High-Performance P2P Messaging: TBMQ Scalability and Reliability in Distributed IoT Systems</dc:title>
			<dc:creator>Dmytro Shvaika</dc:creator>
			<dc:creator>Andrii Shvaika</dc:creator>
			<dc:creator>Volodymyr Artemchuk</dc:creator>
		<dc:identifier>doi: 10.3390/iot6030034</dc:identifier>
	<dc:source>IoT</dc:source>
	<dc:date>2025-06-23</dc:date>

	<prism:publicationName>IoT</prism:publicationName>
	<prism:publicationDate>2025-06-23</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>34</prism:startingPage>
		<prism:doi>10.3390/iot6030034</prism:doi>
	<prism:url>https://www.mdpi.com/2624-831X/6/3/34</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-831X/6/2/33">

	<title>IoT, Vol. 6, Pages 33: Empowering Energy Transition: IoT-Driven Heat Pump Management for Optimal Thermal Comfort</title>
	<link>https://www.mdpi.com/2624-831X/6/2/33</link>
	<description>This paper analyzes the process of energy transition from traditional solid fuel heating to an air-to-air (A2A) heat pump-based heating system. Special emphasis was placed on the implementation of new technologies for improved management of energy systems, aiming to elevate both comfort levels and energy efficiency. This paper explores the use of the open-source software Home Assistant as an integration platform for home automation, designed to manage smart home devices while preserving local control, user privacy, and increasing cybersecurity. The proposed hardware platform includes a Raspberry Pi with appropriate IoT modules, providing a flexible and economically viable solution for household needs.</description>
	<pubDate>2025-06-17</pubDate>

	<content:encoded><![CDATA[
	<p><b>IoT, Vol. 6, Pages 33: Empowering Energy Transition: IoT-Driven Heat Pump Management for Optimal Thermal Comfort</b></p>
	<p>IoT <a href="https://www.mdpi.com/2624-831X/6/2/33">doi: 10.3390/iot6020033</a></p>
	<p>Authors:
		Ivica Glavan
		Ivan Gospić
		Igor Poljak
		</p>
	<p>This paper analyzes the process of energy transition from traditional solid fuel heating to an air-to-air (A2A) heat pump-based heating system. Special emphasis was placed on the implementation of new technologies for improved management of energy systems, aiming to elevate both comfort levels and energy efficiency. This paper explores the use of the open-source software Home Assistant as an integration platform for home automation, designed to manage smart home devices while preserving local control, user privacy, and increasing cybersecurity. The proposed hardware platform includes a Raspberry Pi with appropriate IoT modules, providing a flexible and economically viable solution for household needs.</p>
	]]></content:encoded>

	<dc:title>Empowering Energy Transition: IoT-Driven Heat Pump Management for Optimal Thermal Comfort</dc:title>
			<dc:creator>Ivica Glavan</dc:creator>
			<dc:creator>Ivan Gospić</dc:creator>
			<dc:creator>Igor Poljak</dc:creator>
		<dc:identifier>doi: 10.3390/iot6020033</dc:identifier>
	<dc:source>IoT</dc:source>
	<dc:date>2025-06-17</dc:date>

	<prism:publicationName>IoT</prism:publicationName>
	<prism:publicationDate>2025-06-17</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>33</prism:startingPage>
		<prism:doi>10.3390/iot6020033</prism:doi>
	<prism:url>https://www.mdpi.com/2624-831X/6/2/33</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-831X/6/2/32">

	<title>IoT, Vol. 6, Pages 32: Wireless Environmental Monitoring and Control in Poultry Houses: A Conceptual Study</title>
	<link>https://www.mdpi.com/2624-831X/6/2/32</link>
	<description>Modern commercial poultry farming typically occurs indoors, where partial or complete environmental control is employed to enhance production efficiency. Maintaining optimal conditions, such as temperature, relative humidity, carbon dioxide, and ammonia levels, is essential for ensuring bird comfort and maximizing productivity. Monitoring the conditions of poultry houses requires reliable and intelligent management systems. This study introduces a Wireless Monitoring and Control System developed to regulate environmental conditions within poultry facilities. The system continuously monitors key parameters via a network of distributed sensor nodes, which transmit data wirelessly to a centralized control unit using Wi-Fi. The control unit processes the incoming data, stores it in a database, and adjusts actuators accordingly to maintain ideal conditions. A web-based dashboard allows users to monitor and control the environment in real time. Field testing confirmed the system&amp;amp;rsquo;s effectiveness in keeping conditions optimal, supporting poultry welfare and operational efficiency.</description>
	<pubDate>2025-06-03</pubDate>

	<content:encoded><![CDATA[
	<p><b>IoT, Vol. 6, Pages 32: Wireless Environmental Monitoring and Control in Poultry Houses: A Conceptual Study</b></p>
	<p>IoT <a href="https://www.mdpi.com/2624-831X/6/2/32">doi: 10.3390/iot6020032</a></p>
	<p>Authors:
		António Godinho
		Romeu Vicente
		Sérgio Silva
		Paulo Jorge Coelho
		</p>
	<p>Modern commercial poultry farming typically occurs indoors, where partial or complete environmental control is employed to enhance production efficiency. Maintaining optimal conditions, such as temperature, relative humidity, carbon dioxide, and ammonia levels, is essential for ensuring bird comfort and maximizing productivity. Monitoring the conditions of poultry houses requires reliable and intelligent management systems. This study introduces a Wireless Monitoring and Control System developed to regulate environmental conditions within poultry facilities. The system continuously monitors key parameters via a network of distributed sensor nodes, which transmit data wirelessly to a centralized control unit using Wi-Fi. The control unit processes the incoming data, stores it in a database, and adjusts actuators accordingly to maintain ideal conditions. A web-based dashboard allows users to monitor and control the environment in real time. Field testing confirmed the system&amp;amp;rsquo;s effectiveness in keeping conditions optimal, supporting poultry welfare and operational efficiency.</p>
	]]></content:encoded>

	<dc:title>Wireless Environmental Monitoring and Control in Poultry Houses: A Conceptual Study</dc:title>
			<dc:creator>António Godinho</dc:creator>
			<dc:creator>Romeu Vicente</dc:creator>
			<dc:creator>Sérgio Silva</dc:creator>
			<dc:creator>Paulo Jorge Coelho</dc:creator>
		<dc:identifier>doi: 10.3390/iot6020032</dc:identifier>
	<dc:source>IoT</dc:source>
	<dc:date>2025-06-03</dc:date>

	<prism:publicationName>IoT</prism:publicationName>
	<prism:publicationDate>2025-06-03</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>32</prism:startingPage>
		<prism:doi>10.3390/iot6020032</prism:doi>
	<prism:url>https://www.mdpi.com/2624-831X/6/2/32</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-831X/6/2/31">

	<title>IoT, Vol. 6, Pages 31: Raspberry Pi-Based Face Recognition Door Lock System</title>
	<link>https://www.mdpi.com/2624-831X/6/2/31</link>
	<description>Access control systems protect homes and businesses in the continually evolving security industry. This paper designs and implements a Raspberry Pi-based facial recognition door lock system using artificial intelligence and computer vision for reliability, efficiency, and usability. With the Raspberry Pi as its CPU, the system uses facial recognition for authentication. A camera module for real-time image capturing, a relay module for solenoid lock control, and OpenCV for image processing are essential. The system uses the DeepFace library to detect user emotions and adaptive learning to improve recognition accuracy for approved users. The device also adapts to poor lighting and distances, and it sends real-time remote monitoring messages. Some of the most important things that have been achieved include adaptive facial recognition, ensuring that the system changes as it is used, and integrating real-time notifications and emotion detection without any problems. Face recognition worked well in many settings. Modular architecture facilitated hardware&amp;amp;ndash;software integration and scalability for various applications. In conclusion, this study created an intelligent facial recognition door lock system using Raspberry Pi hardware and open-source software libraries. The system addresses traditional access control limits and is practical, scalable, and inexpensive, demonstrating biometric technology&amp;amp;rsquo;s potential in modern security systems.</description>
	<pubDate>2025-05-20</pubDate>

	<content:encoded><![CDATA[
	<p><b>IoT, Vol. 6, Pages 31: Raspberry Pi-Based Face Recognition Door Lock System</b></p>
	<p>IoT <a href="https://www.mdpi.com/2624-831X/6/2/31">doi: 10.3390/iot6020031</a></p>
	<p>Authors:
		Seifeldin Sherif Fathy Ali Elnozahy
		Senthill C. Pari
		Lee Chu Liang
		</p>
	<p>Access control systems protect homes and businesses in the continually evolving security industry. This paper designs and implements a Raspberry Pi-based facial recognition door lock system using artificial intelligence and computer vision for reliability, efficiency, and usability. With the Raspberry Pi as its CPU, the system uses facial recognition for authentication. A camera module for real-time image capturing, a relay module for solenoid lock control, and OpenCV for image processing are essential. The system uses the DeepFace library to detect user emotions and adaptive learning to improve recognition accuracy for approved users. The device also adapts to poor lighting and distances, and it sends real-time remote monitoring messages. Some of the most important things that have been achieved include adaptive facial recognition, ensuring that the system changes as it is used, and integrating real-time notifications and emotion detection without any problems. Face recognition worked well in many settings. Modular architecture facilitated hardware&amp;amp;ndash;software integration and scalability for various applications. In conclusion, this study created an intelligent facial recognition door lock system using Raspberry Pi hardware and open-source software libraries. The system addresses traditional access control limits and is practical, scalable, and inexpensive, demonstrating biometric technology&amp;amp;rsquo;s potential in modern security systems.</p>
	]]></content:encoded>

	<dc:title>Raspberry Pi-Based Face Recognition Door Lock System</dc:title>
			<dc:creator>Seifeldin Sherif Fathy Ali Elnozahy</dc:creator>
			<dc:creator>Senthill C. Pari</dc:creator>
			<dc:creator>Lee Chu Liang</dc:creator>
		<dc:identifier>doi: 10.3390/iot6020031</dc:identifier>
	<dc:source>IoT</dc:source>
	<dc:date>2025-05-20</dc:date>

	<prism:publicationName>IoT</prism:publicationName>
	<prism:publicationDate>2025-05-20</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>31</prism:startingPage>
		<prism:doi>10.3390/iot6020031</prism:doi>
	<prism:url>https://www.mdpi.com/2624-831X/6/2/31</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-831X/6/2/30">

	<title>IoT, Vol. 6, Pages 30: A Hybrid Learnable Fusion of ConvNeXt and Swin Transformer for Optimized Image Classification</title>
	<link>https://www.mdpi.com/2624-831X/6/2/30</link>
	<description>Medical image classification often relies on CNNs to capture local details (e.g., lesions, nodules) or on transformers to model long-range dependencies. However, each paradigm alone is limited in addressing both fine-grained structures and broader anatomical context. We propose ConvTransGFusion, a hybrid model that fuses ConvNeXt (for refined convolutional features) and Swin Transformer (for hierarchical global attention) using a learnable dual-attention gating mechanism. By aligning spatial dimensions, scaling each branch adaptively, and applying both channel and spatial attention, the proposed architecture bridges local and global representations, melding fine-grained lesion details with the broader anatomical context essential for accurate diagnosis. Tested on four diverse medical imaging datasets&amp;amp;mdash;including X-ray, ultrasound, and MRI scans&amp;amp;mdash;the proposed model consistently achieves superior accuracy, precision, recall, F1, and AUC over state-of-the-art CNNs and transformers. Our findings highlight the benefits of combining convolutional inductive biases and transformer-based global context in a single learnable framework, positioning ConvTransGFusion as a robust and versatile solution for real-world clinical applications.</description>
	<pubDate>2025-05-16</pubDate>

	<content:encoded><![CDATA[
	<p><b>IoT, Vol. 6, Pages 30: A Hybrid Learnable Fusion of ConvNeXt and Swin Transformer for Optimized Image Classification</b></p>
	<p>IoT <a href="https://www.mdpi.com/2624-831X/6/2/30">doi: 10.3390/iot6020030</a></p>
	<p>Authors:
		Jaber Qezelbash-Chamak
		Karen Hicklin
		</p>
	<p>Medical image classification often relies on CNNs to capture local details (e.g., lesions, nodules) or on transformers to model long-range dependencies. However, each paradigm alone is limited in addressing both fine-grained structures and broader anatomical context. We propose ConvTransGFusion, a hybrid model that fuses ConvNeXt (for refined convolutional features) and Swin Transformer (for hierarchical global attention) using a learnable dual-attention gating mechanism. By aligning spatial dimensions, scaling each branch adaptively, and applying both channel and spatial attention, the proposed architecture bridges local and global representations, melding fine-grained lesion details with the broader anatomical context essential for accurate diagnosis. Tested on four diverse medical imaging datasets&amp;amp;mdash;including X-ray, ultrasound, and MRI scans&amp;amp;mdash;the proposed model consistently achieves superior accuracy, precision, recall, F1, and AUC over state-of-the-art CNNs and transformers. Our findings highlight the benefits of combining convolutional inductive biases and transformer-based global context in a single learnable framework, positioning ConvTransGFusion as a robust and versatile solution for real-world clinical applications.</p>
	]]></content:encoded>

	<dc:title>A Hybrid Learnable Fusion of ConvNeXt and Swin Transformer for Optimized Image Classification</dc:title>
			<dc:creator>Jaber Qezelbash-Chamak</dc:creator>
			<dc:creator>Karen Hicklin</dc:creator>
		<dc:identifier>doi: 10.3390/iot6020030</dc:identifier>
	<dc:source>IoT</dc:source>
	<dc:date>2025-05-16</dc:date>

	<prism:publicationName>IoT</prism:publicationName>
	<prism:publicationDate>2025-05-16</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>30</prism:startingPage>
		<prism:doi>10.3390/iot6020030</prism:doi>
	<prism:url>https://www.mdpi.com/2624-831X/6/2/30</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-831X/6/2/29">

	<title>IoT, Vol. 6, Pages 29: Low-Memory-Footprint CNN-Based Biomedical Signal Processing for Wearable Devices</title>
	<link>https://www.mdpi.com/2624-831X/6/2/29</link>
	<description>The rise of wearable devices has enabled real-time processing of sensor data for critical health monitoring applications, such as human activity recognition (HAR) and cardiac disorder classification (CDC). However, the limited computational and memory resources of wearables necessitate lightweight yet accurate classification models. While deep neural networks (DNNs), including convolutional neural networks (CNNs) and long short-term memory networks, have shown high accuracy for HAR and CDC, their large parameter sizes hinder deployment on edge devices. On the other hand, various DNN compression techniques have been proposed, but exploiting the combination of various compression techniques with the aim of achieving memory efficient DNN models for HAR and CDC tasks remains under-investigated. This work studies the impact of CNN architecture parameters, focusing on the convolutional and dense layers, to identify configurations that balance accuracy and efficiency. We derive two versions of each model&amp;amp;mdash;lean and fat&amp;amp;mdash;based on their memory characteristics. Subsequently, we apply three complementary compression techniques: filter-based pruning, low-rank factorization, and dynamic range quantization. Experiments across three diverse DNNs demonstrate that this multi-faceted compression approach can significantly reduce memory and computational requirements while maintaining validation accuracy, leading to DNN models suitable for intelligent health monitoring on resource-constrained wearable devices.</description>
	<pubDate>2025-05-08</pubDate>

	<content:encoded><![CDATA[
	<p><b>IoT, Vol. 6, Pages 29: Low-Memory-Footprint CNN-Based Biomedical Signal Processing for Wearable Devices</b></p>
	<p>IoT <a href="https://www.mdpi.com/2624-831X/6/2/29">doi: 10.3390/iot6020029</a></p>
	<p>Authors:
		Zahra Kokhazad
		Dimitrios Gkountelos
		Milad Kokhazadeh
		Charalampos Bournas
		Georgios Keramidas
		Vasilios Kelefouras
		</p>
	<p>The rise of wearable devices has enabled real-time processing of sensor data for critical health monitoring applications, such as human activity recognition (HAR) and cardiac disorder classification (CDC). However, the limited computational and memory resources of wearables necessitate lightweight yet accurate classification models. While deep neural networks (DNNs), including convolutional neural networks (CNNs) and long short-term memory networks, have shown high accuracy for HAR and CDC, their large parameter sizes hinder deployment on edge devices. On the other hand, various DNN compression techniques have been proposed, but exploiting the combination of various compression techniques with the aim of achieving memory efficient DNN models for HAR and CDC tasks remains under-investigated. This work studies the impact of CNN architecture parameters, focusing on the convolutional and dense layers, to identify configurations that balance accuracy and efficiency. We derive two versions of each model&amp;amp;mdash;lean and fat&amp;amp;mdash;based on their memory characteristics. Subsequently, we apply three complementary compression techniques: filter-based pruning, low-rank factorization, and dynamic range quantization. Experiments across three diverse DNNs demonstrate that this multi-faceted compression approach can significantly reduce memory and computational requirements while maintaining validation accuracy, leading to DNN models suitable for intelligent health monitoring on resource-constrained wearable devices.</p>
	]]></content:encoded>

	<dc:title>Low-Memory-Footprint CNN-Based Biomedical Signal Processing for Wearable Devices</dc:title>
			<dc:creator>Zahra Kokhazad</dc:creator>
			<dc:creator>Dimitrios Gkountelos</dc:creator>
			<dc:creator>Milad Kokhazadeh</dc:creator>
			<dc:creator>Charalampos Bournas</dc:creator>
			<dc:creator>Georgios Keramidas</dc:creator>
			<dc:creator>Vasilios Kelefouras</dc:creator>
		<dc:identifier>doi: 10.3390/iot6020029</dc:identifier>
	<dc:source>IoT</dc:source>
	<dc:date>2025-05-08</dc:date>

	<prism:publicationName>IoT</prism:publicationName>
	<prism:publicationDate>2025-05-08</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>29</prism:startingPage>
		<prism:doi>10.3390/iot6020029</prism:doi>
	<prism:url>https://www.mdpi.com/2624-831X/6/2/29</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-831X/6/2/28">

	<title>IoT, Vol. 6, Pages 28: Power&amp;ndash;Packet Conversion Methods and Analysis of Scheduling Schemes for Wireless Power Transfer</title>
	<link>https://www.mdpi.com/2624-831X/6/2/28</link>
	<description>Recently, electromagnetic wireless power transfer (WPT) has emerged as a promising technology for supplying power to multiple terminals. Previous studies have devised packet transmission methods, commonly used in telecommunication, for power analysis. This study develops a simulator that calculates the received power by integrating a power&amp;amp;ndash;packet conversion method, based on previous research. The simulator incorporates several scheduling functions to facilitate the investigation of the efficiency of the power-feeding methods. This study analyzes the efficacy of a first-come&amp;amp;ndash;first-served (FCFS) method, a round-robin (RR) method, and a multilevel feedback queue (MFQ) scheme for wireless power transfer, all of which were devised based on existing scheduling methods used in operating systems. Simulation results show that, although the FCFS method is simple, it may lead to battery depletion due to delayed power supply, particularly in terminals with lower initial battery levels. The RR method improves fairness by allocating the power supply in time slices; however, its performance is sensitive to the slice duration. The MFQ method, which incorporates a promotion mechanism based on battery status and power demand, exhibits higher adaptability, achieving efficient and balanced power distribution even when terminals differ in distance from the transmitter or in power consumption. These evaluations were conducted using a proposed power&amp;amp;ndash;packet conversion method that discretizes continuous power into packet units, allowing for the application of communication network-inspired scheduling and control techniques. The capacity to construct such models enables the simulator to analyze the flow and distribution of power, predict potential issues that may arise in real systems in advance, and devise optimal control methodologies. Moreover, the model can be employed to enhance the efficiency of power management systems and construct smart grids, and it is anticipated to be utilized for the integration of power and communication systems.</description>
	<pubDate>2025-05-08</pubDate>

	<content:encoded><![CDATA[
	<p><b>IoT, Vol. 6, Pages 28: Power&amp;ndash;Packet Conversion Methods and Analysis of Scheduling Schemes for Wireless Power Transfer</b></p>
	<p>IoT <a href="https://www.mdpi.com/2624-831X/6/2/28">doi: 10.3390/iot6020028</a></p>
	<p>Authors:
		Yuma Takahashi
		Takefumi Hiraguri
		Kazuki Maruta
		Shuma Okita
		Takahiro Matsuda
		Tomotaka Kimura
		Noboru Sekino
		</p>
	<p>Recently, electromagnetic wireless power transfer (WPT) has emerged as a promising technology for supplying power to multiple terminals. Previous studies have devised packet transmission methods, commonly used in telecommunication, for power analysis. This study develops a simulator that calculates the received power by integrating a power&amp;amp;ndash;packet conversion method, based on previous research. The simulator incorporates several scheduling functions to facilitate the investigation of the efficiency of the power-feeding methods. This study analyzes the efficacy of a first-come&amp;amp;ndash;first-served (FCFS) method, a round-robin (RR) method, and a multilevel feedback queue (MFQ) scheme for wireless power transfer, all of which were devised based on existing scheduling methods used in operating systems. Simulation results show that, although the FCFS method is simple, it may lead to battery depletion due to delayed power supply, particularly in terminals with lower initial battery levels. The RR method improves fairness by allocating the power supply in time slices; however, its performance is sensitive to the slice duration. The MFQ method, which incorporates a promotion mechanism based on battery status and power demand, exhibits higher adaptability, achieving efficient and balanced power distribution even when terminals differ in distance from the transmitter or in power consumption. These evaluations were conducted using a proposed power&amp;amp;ndash;packet conversion method that discretizes continuous power into packet units, allowing for the application of communication network-inspired scheduling and control techniques. The capacity to construct such models enables the simulator to analyze the flow and distribution of power, predict potential issues that may arise in real systems in advance, and devise optimal control methodologies. Moreover, the model can be employed to enhance the efficiency of power management systems and construct smart grids, and it is anticipated to be utilized for the integration of power and communication systems.</p>
	]]></content:encoded>

	<dc:title>Power&amp;amp;ndash;Packet Conversion Methods and Analysis of Scheduling Schemes for Wireless Power Transfer</dc:title>
			<dc:creator>Yuma Takahashi</dc:creator>
			<dc:creator>Takefumi Hiraguri</dc:creator>
			<dc:creator>Kazuki Maruta</dc:creator>
			<dc:creator>Shuma Okita</dc:creator>
			<dc:creator>Takahiro Matsuda</dc:creator>
			<dc:creator>Tomotaka Kimura</dc:creator>
			<dc:creator>Noboru Sekino</dc:creator>
		<dc:identifier>doi: 10.3390/iot6020028</dc:identifier>
	<dc:source>IoT</dc:source>
	<dc:date>2025-05-08</dc:date>

	<prism:publicationName>IoT</prism:publicationName>
	<prism:publicationDate>2025-05-08</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>28</prism:startingPage>
		<prism:doi>10.3390/iot6020028</prism:doi>
	<prism:url>https://www.mdpi.com/2624-831X/6/2/28</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-831X/6/2/27">

	<title>IoT, Vol. 6, Pages 27: Development of a Low-Cost Internet of Things Platform for Three-Phase Energy Monitoring in a University Campus</title>
	<link>https://www.mdpi.com/2624-831X/6/2/27</link>
	<description>This article highlights the development of a platform for monitoring three-phase energy consumption within a university campus. The core of this platform is low-cost IoT energy sensors, which are designed to transmit real-time data to the data center&amp;amp;rsquo;s server through different IoT communication technologies, enhancing the preexisting electrical measurement network. The newly recommended measurement structure enables the electrical consumption data collection required for analyzing patterns and proposing forecast models to optimize electricity usage. The major contribution of this work is the design and implementation of smart three-phase energy meters based on the selection of various energy sensors and wireless communication technologies, and then the set up of a global IoT architecture that offers real-time data acquisition, storage, download, and visualization, capitalizing on the campus&amp;amp;rsquo;s diverse energy profiles for detailed characterization. The proposed platform is considered the cornerstone toward the implementation of a collaborative smart microgrid, allowing forecasting and electrical consumption optimization, enabling research into potential opportunities for energy efficiency in our campus, and enhancing the performance of existing electrical infrastructure.</description>
	<pubDate>2025-05-04</pubDate>

	<content:encoded><![CDATA[
	<p><b>IoT, Vol. 6, Pages 27: Development of a Low-Cost Internet of Things Platform for Three-Phase Energy Monitoring in a University Campus</b></p>
	<p>IoT <a href="https://www.mdpi.com/2624-831X/6/2/27">doi: 10.3390/iot6020027</a></p>
	<p>Authors:
		Abdessamad Rhesri
		Fatima Aabadi
		Rachid Bennani
		Yann Ben Maissa
		Ahmed Tamtaoui
		Hamza Dahmouni
		</p>
	<p>This article highlights the development of a platform for monitoring three-phase energy consumption within a university campus. The core of this platform is low-cost IoT energy sensors, which are designed to transmit real-time data to the data center&amp;amp;rsquo;s server through different IoT communication technologies, enhancing the preexisting electrical measurement network. The newly recommended measurement structure enables the electrical consumption data collection required for analyzing patterns and proposing forecast models to optimize electricity usage. The major contribution of this work is the design and implementation of smart three-phase energy meters based on the selection of various energy sensors and wireless communication technologies, and then the set up of a global IoT architecture that offers real-time data acquisition, storage, download, and visualization, capitalizing on the campus&amp;amp;rsquo;s diverse energy profiles for detailed characterization. The proposed platform is considered the cornerstone toward the implementation of a collaborative smart microgrid, allowing forecasting and electrical consumption optimization, enabling research into potential opportunities for energy efficiency in our campus, and enhancing the performance of existing electrical infrastructure.</p>
	]]></content:encoded>

	<dc:title>Development of a Low-Cost Internet of Things Platform for Three-Phase Energy Monitoring in a University Campus</dc:title>
			<dc:creator>Abdessamad Rhesri</dc:creator>
			<dc:creator>Fatima Aabadi</dc:creator>
			<dc:creator>Rachid Bennani</dc:creator>
			<dc:creator>Yann Ben Maissa</dc:creator>
			<dc:creator>Ahmed Tamtaoui</dc:creator>
			<dc:creator>Hamza Dahmouni</dc:creator>
		<dc:identifier>doi: 10.3390/iot6020027</dc:identifier>
	<dc:source>IoT</dc:source>
	<dc:date>2025-05-04</dc:date>

	<prism:publicationName>IoT</prism:publicationName>
	<prism:publicationDate>2025-05-04</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>27</prism:startingPage>
		<prism:doi>10.3390/iot6020027</prism:doi>
	<prism:url>https://www.mdpi.com/2624-831X/6/2/27</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-831X/6/2/26">

	<title>IoT, Vol. 6, Pages 26: From Machine Learning-Based to LLM-Enhanced: An Application-Focused Analysis of How Social IoT Benefits from LLMs</title>
	<link>https://www.mdpi.com/2624-831X/6/2/26</link>
	<description>Recent advancements in large language models (LLMs) have added a transformative dimension to the social Internet of Things (SIoT), which is the combination of social networks and IoT. With LLMs&amp;amp;rsquo; natural language understanding and data synthesis capabilities, LLMs are regarded as strong tools to enhance SIoT applications such as recommendation, search, and data management. This application-focused review synthesizes the latest related research by identifying both the synergies and the current research gaps at the intersection of LLMs and SIoT, as well as the evolutionary road from machine learning-based solutions to LLM-enhanced ones.</description>
	<pubDate>2025-04-30</pubDate>

	<content:encoded><![CDATA[
	<p><b>IoT, Vol. 6, Pages 26: From Machine Learning-Based to LLM-Enhanced: An Application-Focused Analysis of How Social IoT Benefits from LLMs</b></p>
	<p>IoT <a href="https://www.mdpi.com/2624-831X/6/2/26">doi: 10.3390/iot6020026</a></p>
	<p>Authors:
		Lijie Yang
		Runbo Su
		</p>
	<p>Recent advancements in large language models (LLMs) have added a transformative dimension to the social Internet of Things (SIoT), which is the combination of social networks and IoT. With LLMs&amp;amp;rsquo; natural language understanding and data synthesis capabilities, LLMs are regarded as strong tools to enhance SIoT applications such as recommendation, search, and data management. This application-focused review synthesizes the latest related research by identifying both the synergies and the current research gaps at the intersection of LLMs and SIoT, as well as the evolutionary road from machine learning-based solutions to LLM-enhanced ones.</p>
	]]></content:encoded>

	<dc:title>From Machine Learning-Based to LLM-Enhanced: An Application-Focused Analysis of How Social IoT Benefits from LLMs</dc:title>
			<dc:creator>Lijie Yang</dc:creator>
			<dc:creator>Runbo Su</dc:creator>
		<dc:identifier>doi: 10.3390/iot6020026</dc:identifier>
	<dc:source>IoT</dc:source>
	<dc:date>2025-04-30</dc:date>

	<prism:publicationName>IoT</prism:publicationName>
	<prism:publicationDate>2025-04-30</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Review</prism:section>
	<prism:startingPage>26</prism:startingPage>
		<prism:doi>10.3390/iot6020026</prism:doi>
	<prism:url>https://www.mdpi.com/2624-831X/6/2/26</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-831X/6/2/25">

	<title>IoT, Vol. 6, Pages 25: Blockchain-Based Mobile IoT System with Configurable Sensor Modules</title>
	<link>https://www.mdpi.com/2624-831X/6/2/25</link>
	<description>In this study, a Multi-Sensor IoT Device (MSID) is developed that is designed to collect various environmental data and interconnect with the cloud and blockchain to ensure reliable data management. The MSID is designed with a flexible, modular structure that supports a variety of sensor configurations and is easily expandable with 3D-printed components. The system performance was monitored in real-time, with a high cloud upload success rate of 98.35% and an average transmission delay of only 0.64 s, confirming stable data collection every minute. Blockchain-based sensor data storage ensured data integrity and tamper-proofness, with all transactions successfully recorded and verified via smart contract. The proposed Blockchain-based Mobile IoT System (BMIS) has shown strong potential for use in environmental monitoring, industrial asset management, and other areas that require reliable data collection and long-term preservation.</description>
	<pubDate>2025-04-22</pubDate>

	<content:encoded><![CDATA[
	<p><b>IoT, Vol. 6, Pages 25: Blockchain-Based Mobile IoT System with Configurable Sensor Modules</b></p>
	<p>IoT <a href="https://www.mdpi.com/2624-831X/6/2/25">doi: 10.3390/iot6020025</a></p>
	<p>Authors:
		Jooho Lee
		Jihyun Byun
		Sangoh Kim
		</p>
	<p>In this study, a Multi-Sensor IoT Device (MSID) is developed that is designed to collect various environmental data and interconnect with the cloud and blockchain to ensure reliable data management. The MSID is designed with a flexible, modular structure that supports a variety of sensor configurations and is easily expandable with 3D-printed components. The system performance was monitored in real-time, with a high cloud upload success rate of 98.35% and an average transmission delay of only 0.64 s, confirming stable data collection every minute. Blockchain-based sensor data storage ensured data integrity and tamper-proofness, with all transactions successfully recorded and verified via smart contract. The proposed Blockchain-based Mobile IoT System (BMIS) has shown strong potential for use in environmental monitoring, industrial asset management, and other areas that require reliable data collection and long-term preservation.</p>
	]]></content:encoded>

	<dc:title>Blockchain-Based Mobile IoT System with Configurable Sensor Modules</dc:title>
			<dc:creator>Jooho Lee</dc:creator>
			<dc:creator>Jihyun Byun</dc:creator>
			<dc:creator>Sangoh Kim</dc:creator>
		<dc:identifier>doi: 10.3390/iot6020025</dc:identifier>
	<dc:source>IoT</dc:source>
	<dc:date>2025-04-22</dc:date>

	<prism:publicationName>IoT</prism:publicationName>
	<prism:publicationDate>2025-04-22</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>25</prism:startingPage>
		<prism:doi>10.3390/iot6020025</prism:doi>
	<prism:url>https://www.mdpi.com/2624-831X/6/2/25</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-831X/6/2/24">

	<title>IoT, Vol. 6, Pages 24: Optimizing Customer Experience by Exploiting Real-Time Data Generated by IoT and Leveraging Distributed Web Systems in CRM Systems</title>
	<link>https://www.mdpi.com/2624-831X/6/2/24</link>
	<description>Integrating smart devices from the Internet of Things (IoT) with Customer Relationship Management (CRM) systems presents significant opportunities for enhancing customer experience through real-time data utilization. This article explores the technological frameworks and practical solutions for achieving seamless integration of IoT data within CRM platforms. By leveraging distributed Web systems, this study demonstrates how companies can improve scalability, responsiveness, and personalization in managing customer relationships. This paper outlines key architectural designs for distributed Web systems that ensure efficient real-time data processing while addressing challenges such as security, system integration, and the demands of analytics. This research provides insights into overcoming these challenges with strategies like load balancing, edge processing, and advanced encryption protocols. Results from simulations and practical implementations underscore the effectiveness of these approaches in optimizing operational efficiency and delivering hyper-personalized customer experiences. This study aims to bridge the gap between theoretical possibilities and real-world applications, offering actionable guidelines for organizations to fully leverage IoT-driven CRM systems.</description>
	<pubDate>2025-04-21</pubDate>

	<content:encoded><![CDATA[
	<p><b>IoT, Vol. 6, Pages 24: Optimizing Customer Experience by Exploiting Real-Time Data Generated by IoT and Leveraging Distributed Web Systems in CRM Systems</b></p>
	<p>IoT <a href="https://www.mdpi.com/2624-831X/6/2/24">doi: 10.3390/iot6020024</a></p>
	<p>Authors:
		Marian Ileana
		Pavel Petrov
		Vassil Milev
		</p>
	<p>Integrating smart devices from the Internet of Things (IoT) with Customer Relationship Management (CRM) systems presents significant opportunities for enhancing customer experience through real-time data utilization. This article explores the technological frameworks and practical solutions for achieving seamless integration of IoT data within CRM platforms. By leveraging distributed Web systems, this study demonstrates how companies can improve scalability, responsiveness, and personalization in managing customer relationships. This paper outlines key architectural designs for distributed Web systems that ensure efficient real-time data processing while addressing challenges such as security, system integration, and the demands of analytics. This research provides insights into overcoming these challenges with strategies like load balancing, edge processing, and advanced encryption protocols. Results from simulations and practical implementations underscore the effectiveness of these approaches in optimizing operational efficiency and delivering hyper-personalized customer experiences. This study aims to bridge the gap between theoretical possibilities and real-world applications, offering actionable guidelines for organizations to fully leverage IoT-driven CRM systems.</p>
	]]></content:encoded>

	<dc:title>Optimizing Customer Experience by Exploiting Real-Time Data Generated by IoT and Leveraging Distributed Web Systems in CRM Systems</dc:title>
			<dc:creator>Marian Ileana</dc:creator>
			<dc:creator>Pavel Petrov</dc:creator>
			<dc:creator>Vassil Milev</dc:creator>
		<dc:identifier>doi: 10.3390/iot6020024</dc:identifier>
	<dc:source>IoT</dc:source>
	<dc:date>2025-04-21</dc:date>

	<prism:publicationName>IoT</prism:publicationName>
	<prism:publicationDate>2025-04-21</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>24</prism:startingPage>
		<prism:doi>10.3390/iot6020024</prism:doi>
	<prism:url>https://www.mdpi.com/2624-831X/6/2/24</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-831X/6/2/23">

	<title>IoT, Vol. 6, Pages 23: A Lightweight Encryption Method for IoT-Based Healthcare Applications: A Review and Future Prospects</title>
	<link>https://www.mdpi.com/2624-831X/6/2/23</link>
	<description>The rapid proliferation of Internet of Things (IoT) devices in healthcare, from wearable sensors to implantable medical devices, has revolutionised patient monitoring, personalised treatment, and remote care delivery. However, the resource-constrained nature of IoT devices, coupled with the sensitivity of medical data, presents critical security challenges. Traditional encryption methods, while robust, are computationally intensive and unsuitable for IoT environments, leaving sensitive patient information vulnerable to cyber threats. Addressing this gap, lightweight encryption methods have emerged as a pivotal solution to balance security with the limited processing power, memory, and energy resources of IoT devices. This paper explores lightweight encryption methods tailored for IoT healthcare applications, evaluating their effectiveness in securing sensitive data while operating under resource constraints. A comparative analysis is conducted on encryption techniques such as AES-128, LEA, Ascon, GIFT, HIGHT, PRINCE, and RC5-32/12/16, based on key performance metrics including block size, key size, encryption and decryption speeds, throughput, and security levels. The findings highlight that AES-128, LEA, ASCON, and GIFT are best suited for high-sensitivity healthcare data due to their strong security features, while HIGHT and PRINCE provide balanced protection for medium-sensitivity applications. RC5-32/12/16, on the other hand, prioritises efficiency over comprehensive security, making it suitable for low-risk scenarios where computational overhead must be minimised. The paper underscores the significant trade-offs between efficiency, security, and resource consumption, emphasising the need for careful selection of encryption methods based on the specific requirements of IoT healthcare environments. Additionally, the paper highlights the growing demand for lightweight encryption methods that balance energy efficiency with robust protection against cyber threats. These insights offer valuable guidance for researchers and practitioners seeking to enhance the security of IoT-based healthcare systems while ensuring optimal performance in resource-constrained settings.</description>
	<pubDate>2025-04-20</pubDate>

	<content:encoded><![CDATA[
	<p><b>IoT, Vol. 6, Pages 23: A Lightweight Encryption Method for IoT-Based Healthcare Applications: A Review and Future Prospects</b></p>
	<p>IoT <a href="https://www.mdpi.com/2624-831X/6/2/23">doi: 10.3390/iot6020023</a></p>
	<p>Authors:
		Omar Sabri
		Bassam Al-Shargabi
		Abdelrahman Abuarqoub
		Tahani Ali Hakami
		</p>
	<p>The rapid proliferation of Internet of Things (IoT) devices in healthcare, from wearable sensors to implantable medical devices, has revolutionised patient monitoring, personalised treatment, and remote care delivery. However, the resource-constrained nature of IoT devices, coupled with the sensitivity of medical data, presents critical security challenges. Traditional encryption methods, while robust, are computationally intensive and unsuitable for IoT environments, leaving sensitive patient information vulnerable to cyber threats. Addressing this gap, lightweight encryption methods have emerged as a pivotal solution to balance security with the limited processing power, memory, and energy resources of IoT devices. This paper explores lightweight encryption methods tailored for IoT healthcare applications, evaluating their effectiveness in securing sensitive data while operating under resource constraints. A comparative analysis is conducted on encryption techniques such as AES-128, LEA, Ascon, GIFT, HIGHT, PRINCE, and RC5-32/12/16, based on key performance metrics including block size, key size, encryption and decryption speeds, throughput, and security levels. The findings highlight that AES-128, LEA, ASCON, and GIFT are best suited for high-sensitivity healthcare data due to their strong security features, while HIGHT and PRINCE provide balanced protection for medium-sensitivity applications. RC5-32/12/16, on the other hand, prioritises efficiency over comprehensive security, making it suitable for low-risk scenarios where computational overhead must be minimised. The paper underscores the significant trade-offs between efficiency, security, and resource consumption, emphasising the need for careful selection of encryption methods based on the specific requirements of IoT healthcare environments. Additionally, the paper highlights the growing demand for lightweight encryption methods that balance energy efficiency with robust protection against cyber threats. These insights offer valuable guidance for researchers and practitioners seeking to enhance the security of IoT-based healthcare systems while ensuring optimal performance in resource-constrained settings.</p>
	]]></content:encoded>

	<dc:title>A Lightweight Encryption Method for IoT-Based Healthcare Applications: A Review and Future Prospects</dc:title>
			<dc:creator>Omar Sabri</dc:creator>
			<dc:creator>Bassam Al-Shargabi</dc:creator>
			<dc:creator>Abdelrahman Abuarqoub</dc:creator>
			<dc:creator>Tahani Ali Hakami</dc:creator>
		<dc:identifier>doi: 10.3390/iot6020023</dc:identifier>
	<dc:source>IoT</dc:source>
	<dc:date>2025-04-20</dc:date>

	<prism:publicationName>IoT</prism:publicationName>
	<prism:publicationDate>2025-04-20</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Review</prism:section>
	<prism:startingPage>23</prism:startingPage>
		<prism:doi>10.3390/iot6020023</prism:doi>
	<prism:url>https://www.mdpi.com/2624-831X/6/2/23</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-831X/6/2/22">

	<title>IoT, Vol. 6, Pages 22: Text Mining and Unsupervised Deep Learning for Intrusion Detection in Smart-Grid Communication Networks</title>
	<link>https://www.mdpi.com/2624-831X/6/2/22</link>
	<description>The Manufacturing Message Specification (MMS) protocol is frequently used to automate processes in IEC 61850-based substations and smart-grid systems. However, it may be susceptible to a variety of cyber-attacks. A frequently used protection strategy is to deploy intrusion detection systems to monitor network traffic for anomalies. Conventional approaches to detecting anomalies require a large number of labeled samples and are therefore incompatible with high-dimensional time series data. This work proposes an anomaly detection method for high-dimensional sequences based on a bidirectional LSTM autoencoder. Additionally, a text-mining strategy based on a TF-IDF vectorizer and truncated SVD is presented for data preparation and feature extraction. The proposed data representation approach outperformed word embeddings (Doc2Vec) by better preserving critical domain-specific keywords in MMS traffic while reducing the complexity of model training. Unlike embeddings, which attempt to capture semantic relationships that may not exist in structured network protocols, TF-IDF focuses on token frequency and importance, making it more suitable for anomaly detection in MMS communications. To address the limitations of existing approaches that rely on labeled samples, the proposed model learns the properties and patterns of a large number of normal samples in an unsupervised manner. The results demonstrate that the proposed approach can learn potential features from high-dimensional time series data while maintaining a high True Positive Rate.</description>
	<pubDate>2025-03-26</pubDate>

	<content:encoded><![CDATA[
	<p><b>IoT, Vol. 6, Pages 22: Text Mining and Unsupervised Deep Learning for Intrusion Detection in Smart-Grid Communication Networks</b></p>
	<p>IoT <a href="https://www.mdpi.com/2624-831X/6/2/22">doi: 10.3390/iot6020022</a></p>
	<p>Authors:
		Joseph Azar
		Mohammed Al Saleh
		Raphaël Couturier
		Hassan Noura
		</p>
	<p>The Manufacturing Message Specification (MMS) protocol is frequently used to automate processes in IEC 61850-based substations and smart-grid systems. However, it may be susceptible to a variety of cyber-attacks. A frequently used protection strategy is to deploy intrusion detection systems to monitor network traffic for anomalies. Conventional approaches to detecting anomalies require a large number of labeled samples and are therefore incompatible with high-dimensional time series data. This work proposes an anomaly detection method for high-dimensional sequences based on a bidirectional LSTM autoencoder. Additionally, a text-mining strategy based on a TF-IDF vectorizer and truncated SVD is presented for data preparation and feature extraction. The proposed data representation approach outperformed word embeddings (Doc2Vec) by better preserving critical domain-specific keywords in MMS traffic while reducing the complexity of model training. Unlike embeddings, which attempt to capture semantic relationships that may not exist in structured network protocols, TF-IDF focuses on token frequency and importance, making it more suitable for anomaly detection in MMS communications. To address the limitations of existing approaches that rely on labeled samples, the proposed model learns the properties and patterns of a large number of normal samples in an unsupervised manner. The results demonstrate that the proposed approach can learn potential features from high-dimensional time series data while maintaining a high True Positive Rate.</p>
	]]></content:encoded>

	<dc:title>Text Mining and Unsupervised Deep Learning for Intrusion Detection in Smart-Grid Communication Networks</dc:title>
			<dc:creator>Joseph Azar</dc:creator>
			<dc:creator>Mohammed Al Saleh</dc:creator>
			<dc:creator>Raphaël Couturier</dc:creator>
			<dc:creator>Hassan Noura</dc:creator>
		<dc:identifier>doi: 10.3390/iot6020022</dc:identifier>
	<dc:source>IoT</dc:source>
	<dc:date>2025-03-26</dc:date>

	<prism:publicationName>IoT</prism:publicationName>
	<prism:publicationDate>2025-03-26</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>22</prism:startingPage>
		<prism:doi>10.3390/iot6020022</prism:doi>
	<prism:url>https://www.mdpi.com/2624-831X/6/2/22</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2624-831X/6/2/21">

	<title>IoT, Vol. 6, Pages 21: Driving Supply Chain Transformation with IoT and AI Integration: A Dual Approach Using Bibliometric Analysis and Topic Modeling</title>
	<link>https://www.mdpi.com/2624-831X/6/2/21</link>
	<description>The objective of this study is to conduct an analysis of the scientific literature on the application of the Internet of Things (IoT) and artificial intelligence (AI) in enhancing supply chain operations. This research applies a dual approach combining bibliometric analysis and topic modeling to explore both quantitative citation trends and qualitative thematic insights. By examining 810 qualified articles, published between 2011 and 2024, this research aims to identify the main topics, key authors, influential sources, and the most-cited articles within the literature. The study addresses critical research questions on the state of IoT and AI integration into supply chains and the role of these technologies in resolving digital supply chain management challenges. The convergence of IoT and AI holds immense potential to redefine supply chain management practices, improving productivity, visibility, and sustainability in interconnected global supply chains. This research not only highlights the continuous evolution of the supply chain field in light of Industry 4.0 technologies&amp;amp;mdash;such as machine learning, big data analytics, cloud computing, cyber&amp;amp;ndash;physical systems, and 5G networks&amp;amp;mdash;but also provides an updated overview of advanced IoT and AI technologies currently applied in supply chain operations, documenting their evolution from rudimentary stages to their current state of advancement.</description>
	<pubDate>2025-03-25</pubDate>

	<content:encoded><![CDATA[
	<p><b>IoT, Vol. 6, Pages 21: Driving Supply Chain Transformation with IoT and AI Integration: A Dual Approach Using Bibliometric Analysis and Topic Modeling</b></p>
	<p>IoT <a href="https://www.mdpi.com/2624-831X/6/2/21">doi: 10.3390/iot6020021</a></p>
	<p>Authors:
		Jerifa Zaman
		Atefeh Shoomal
		Mohammad Jahanbakht
		Dervis Ozay
		</p>
	<p>The objective of this study is to conduct an analysis of the scientific literature on the application of the Internet of Things (IoT) and artificial intelligence (AI) in enhancing supply chain operations. This research applies a dual approach combining bibliometric analysis and topic modeling to explore both quantitative citation trends and qualitative thematic insights. By examining 810 qualified articles, published between 2011 and 2024, this research aims to identify the main topics, key authors, influential sources, and the most-cited articles within the literature. The study addresses critical research questions on the state of IoT and AI integration into supply chains and the role of these technologies in resolving digital supply chain management challenges. The convergence of IoT and AI holds immense potential to redefine supply chain management practices, improving productivity, visibility, and sustainability in interconnected global supply chains. This research not only highlights the continuous evolution of the supply chain field in light of Industry 4.0 technologies&amp;amp;mdash;such as machine learning, big data analytics, cloud computing, cyber&amp;amp;ndash;physical systems, and 5G networks&amp;amp;mdash;but also provides an updated overview of advanced IoT and AI technologies currently applied in supply chain operations, documenting their evolution from rudimentary stages to their current state of advancement.</p>
	]]></content:encoded>

	<dc:title>Driving Supply Chain Transformation with IoT and AI Integration: A Dual Approach Using Bibliometric Analysis and Topic Modeling</dc:title>
			<dc:creator>Jerifa Zaman</dc:creator>
			<dc:creator>Atefeh Shoomal</dc:creator>
			<dc:creator>Mohammad Jahanbakht</dc:creator>
			<dc:creator>Dervis Ozay</dc:creator>
		<dc:identifier>doi: 10.3390/iot6020021</dc:identifier>
	<dc:source>IoT</dc:source>
	<dc:date>2025-03-25</dc:date>

	<prism:publicationName>IoT</prism:publicationName>
	<prism:publicationDate>2025-03-25</prism:publicationDate>
	<prism:volume>6</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Review</prism:section>
	<prism:startingPage>21</prism:startingPage>
		<prism:doi>10.3390/iot6020021</prism:doi>
	<prism:url>https://www.mdpi.com/2624-831X/6/2/21</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
    
<cc:License rdf:about="https://creativecommons.org/licenses/by/4.0/">
	<cc:permits rdf:resource="https://creativecommons.org/ns#Reproduction" />
	<cc:permits rdf:resource="https://creativecommons.org/ns#Distribution" />
	<cc:permits rdf:resource="https://creativecommons.org/ns#DerivativeWorks" />
</cc:License>

</rdf:RDF>
