<?xml version="1.0" encoding="UTF-8"?>
<rdf:RDF xmlns="http://purl.org/rss/1.0/"
 xmlns:dc="http://purl.org/dc/elements/1.1/"
 xmlns:dcterms="http://purl.org/dc/terms/"
 xmlns:cc="http://web.resource.org/cc/"
 xmlns:prism="http://prismstandard.org/namespaces/basic/2.0/"
 xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
 xmlns:admin="http://webns.net/mvcb/"
 xmlns:content="http://purl.org/rss/1.0/modules/content/">
    <channel rdf:about="https://www.mdpi.com/rss/journal/computers">
		<title>Computers</title>
		<description>Latest open access articles published in Computers at https://www.mdpi.com/journal/computers</description>
		<link>https://www.mdpi.com/journal/computers</link>
		<admin:generatorAgent rdf:resource="https://www.mdpi.com/journal/computers"/>
		<admin:errorReportsTo rdf:resource="mailto:support@mdpi.com"/>
		<dc:publisher>MDPI</dc:publisher>
		<dc:language>en</dc:language>
		<dc:rights>Creative Commons Attribution (CC-BY)</dc:rights>
						<prism:copyright>MDPI</prism:copyright>
		<prism:rightsAgent>support@mdpi.com</prism:rightsAgent>
		<image rdf:resource="https://pub.mdpi-res.com/img/design/mdpi-pub-logo.png?13cf3b5bd783e021?1778581344"/>
				<items>
			<rdf:Seq>
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/5/307" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/5/306" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/5/305" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/5/304" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/5/303" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/5/302" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/5/301" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/5/300" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/5/299" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/5/298" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/5/297" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/5/296" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/5/295" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/5/294" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/5/293" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/5/292" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/5/291" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/5/290" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/5/289" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/5/288" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/5/287" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/5/286" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/5/285" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/5/284" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/5/283" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/5/282" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/5/281" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/5/280" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/5/279" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/5/278" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/5/277" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/5/276" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/5/275" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/5/274" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/5/273" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/5/271" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/5/272" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/5/270" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/5/269" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/5/268" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/5/267" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/5/266" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/5/265" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/5/264" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/5/263" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/5/262" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/5/261" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/4/260" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/4/259" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/4/258" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/4/257" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/4/256" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/4/255" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/4/254" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/4/253" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/4/252" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/4/251" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/4/250" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/4/249" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/4/248" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/4/247" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/4/246" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/4/245" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/4/244" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/4/243" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/4/242" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/4/241" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/4/240" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/4/239" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/4/238" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/4/237" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/4/236" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/4/235" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/4/234" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/4/233" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/4/232" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/4/231" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/4/230" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/4/229" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/4/228" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/4/227" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/4/226" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/4/225" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/4/224" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/4/223" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/4/222" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/4/221" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/4/220" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/4/219" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/4/217" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/4/218" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/4/216" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/4/215" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/4/214" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/4/213" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/4/212" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/4/211" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/4/210" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/4/209" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2073-431X/15/4/208" />
                    	</rdf:Seq>
		</items>
				<cc:license rdf:resource="https://creativecommons.org/licenses/by/4.0/" />
	</channel>

        <item rdf:about="https://www.mdpi.com/2073-431X/15/5/307">

	<title>Computers, Vol. 15, Pages 307: Semantic Segmentation-Based Identification and Quantitative Analysis of Cross-Sectional Quality Features in Luzhou-Flavor Liquor Daqu</title>
	<link>https://www.mdpi.com/2073-431X/15/5/307</link>
	<description>The objective evaluation of Daqu cross-sectional quality is challenging due to its heterogeneous structure, small features, and low contrast. This study proposes a semantic-segmentation-based framework for the automated identification and quantitative analysis of Luzhou-flavor Daqu cross-sections. Four representative architectures&amp;amp;mdash;including three convolutional neural network (CNN)-based models (U-Net, U-Net++, and U2-Net) and one Transformer-based model (SegFormer)&amp;amp;mdash;were systematically benchmarked. To address severe class imbalance and enhance model robustness, a task-specific data augmentation pipeline was implemented. With these optimized augmentation strategies, the U2-Net model demonstrated the best performance, with a peak mean Intersection over Union (mIoU) of 87.54% and a Dice score of 98.30%. Based on the predicted masks, quantitative indicators such as plaque area ratio, pizhang thickness, and fissure length were precisely extracted. The proposed framework provides an objective and scalable solution for Daqu quality inspection, offering significant practical value for industrial scenarios involving complex materials and fine-grained defect patterns.</description>
	<pubDate>2026-05-12</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 307: Semantic Segmentation-Based Identification and Quantitative Analysis of Cross-Sectional Quality Features in Luzhou-Flavor Liquor Daqu</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/5/307">doi: 10.3390/computers15050307</a></p>
	<p>Authors:
		Zheli Song
		Yi Dong
		Chao Wang
		Xiu Zhang
		Aibao Sun
		Cuiping You
		Jian Mao
		Shuangping Liu
		</p>
	<p>The objective evaluation of Daqu cross-sectional quality is challenging due to its heterogeneous structure, small features, and low contrast. This study proposes a semantic-segmentation-based framework for the automated identification and quantitative analysis of Luzhou-flavor Daqu cross-sections. Four representative architectures&amp;amp;mdash;including three convolutional neural network (CNN)-based models (U-Net, U-Net++, and U2-Net) and one Transformer-based model (SegFormer)&amp;amp;mdash;were systematically benchmarked. To address severe class imbalance and enhance model robustness, a task-specific data augmentation pipeline was implemented. With these optimized augmentation strategies, the U2-Net model demonstrated the best performance, with a peak mean Intersection over Union (mIoU) of 87.54% and a Dice score of 98.30%. Based on the predicted masks, quantitative indicators such as plaque area ratio, pizhang thickness, and fissure length were precisely extracted. The proposed framework provides an objective and scalable solution for Daqu quality inspection, offering significant practical value for industrial scenarios involving complex materials and fine-grained defect patterns.</p>
	]]></content:encoded>

	<dc:title>Semantic Segmentation-Based Identification and Quantitative Analysis of Cross-Sectional Quality Features in Luzhou-Flavor Liquor Daqu</dc:title>
			<dc:creator>Zheli Song</dc:creator>
			<dc:creator>Yi Dong</dc:creator>
			<dc:creator>Chao Wang</dc:creator>
			<dc:creator>Xiu Zhang</dc:creator>
			<dc:creator>Aibao Sun</dc:creator>
			<dc:creator>Cuiping You</dc:creator>
			<dc:creator>Jian Mao</dc:creator>
			<dc:creator>Shuangping Liu</dc:creator>
		<dc:identifier>doi: 10.3390/computers15050307</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-05-12</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-05-12</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>307</prism:startingPage>
		<prism:doi>10.3390/computers15050307</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/5/307</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/5/306">

	<title>Computers, Vol. 15, Pages 306: Machine Learning Prediction Model and Interpretability Analysis of Depression Risk in Patients with Chronic Kidney Disease</title>
	<link>https://www.mdpi.com/2073-431X/15/5/306</link>
	<description>Patients with chronic kidney disease (CKD) frequently experience depressive symptoms, which substantially impair their quality of life. To facilitate the early identification of high-risk individuals, this study aimed to develop a predictive model for assessing depression risk among CKD patients. This study was based on data from the China Health and Retirement Longitudinal Study (CHARLS) 2018 wave, including 1777 middle-aged and elderly participants with self-reported CKD diagnosed by a physician. Depressive symptoms were assessed using the 10-item Center for Epidemiologic Studies Depression Scale (CES-D 10). A total of 29 variables were included, covering lifestyle factors, health status, comorbidities, and sociodemographic characteristics. The Elastic Net algorithm was employed to select 11 features with the highest predictive value. Seven machine learning models, including XGBoost and support vector machine (SVM), were compared, with CHARLS 2020 data used as a temporal validation set. In the multi-model comparison, XGBoost demonstrated discrimination performance comparable to logistic regression (LR), SVM, and multilayer perceptron (MLP) (DeLong test, p &amp;amp;gt; 0.05). However, considering its superior calibration performance and ability to capture nonlinear interactions, XGBoost was selected as the final model. In the validation set, the model achieved an area under the curve (AUC) of 0.8017 and an accuracy of 72.39%. SHAP analysis further revealed the nonlinear effects of predictors, with life satisfaction, sleep duration, and self-rated health showing high contributions and negative associations with depression risk, whereas limitations in activities of daily living (ADL), physical pain, and digestive system diseases were significantly associated with an increased risk of depression. Overall, the risk of depression in CKD patients is influenced by multiple dimensions, including psychological cognition, quality of life, physical function, and social environment. The predictive model developed in this study may provide a valuable reference for the early screening of high-risk populations. However, its applicability to non-CKD populations requires further validation.</description>
	<pubDate>2026-05-12</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 306: Machine Learning Prediction Model and Interpretability Analysis of Depression Risk in Patients with Chronic Kidney Disease</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/5/306">doi: 10.3390/computers15050306</a></p>
	<p>Authors:
		Hongli Yan
		Xu Peng
		Shuang Geng
		Yueming Gao
		Junfeng Liao
		</p>
	<p>Patients with chronic kidney disease (CKD) frequently experience depressive symptoms, which substantially impair their quality of life. To facilitate the early identification of high-risk individuals, this study aimed to develop a predictive model for assessing depression risk among CKD patients. This study was based on data from the China Health and Retirement Longitudinal Study (CHARLS) 2018 wave, including 1777 middle-aged and elderly participants with self-reported CKD diagnosed by a physician. Depressive symptoms were assessed using the 10-item Center for Epidemiologic Studies Depression Scale (CES-D 10). A total of 29 variables were included, covering lifestyle factors, health status, comorbidities, and sociodemographic characteristics. The Elastic Net algorithm was employed to select 11 features with the highest predictive value. Seven machine learning models, including XGBoost and support vector machine (SVM), were compared, with CHARLS 2020 data used as a temporal validation set. In the multi-model comparison, XGBoost demonstrated discrimination performance comparable to logistic regression (LR), SVM, and multilayer perceptron (MLP) (DeLong test, p &amp;amp;gt; 0.05). However, considering its superior calibration performance and ability to capture nonlinear interactions, XGBoost was selected as the final model. In the validation set, the model achieved an area under the curve (AUC) of 0.8017 and an accuracy of 72.39%. SHAP analysis further revealed the nonlinear effects of predictors, with life satisfaction, sleep duration, and self-rated health showing high contributions and negative associations with depression risk, whereas limitations in activities of daily living (ADL), physical pain, and digestive system diseases were significantly associated with an increased risk of depression. Overall, the risk of depression in CKD patients is influenced by multiple dimensions, including psychological cognition, quality of life, physical function, and social environment. The predictive model developed in this study may provide a valuable reference for the early screening of high-risk populations. However, its applicability to non-CKD populations requires further validation.</p>
	]]></content:encoded>

	<dc:title>Machine Learning Prediction Model and Interpretability Analysis of Depression Risk in Patients with Chronic Kidney Disease</dc:title>
			<dc:creator>Hongli Yan</dc:creator>
			<dc:creator>Xu Peng</dc:creator>
			<dc:creator>Shuang Geng</dc:creator>
			<dc:creator>Yueming Gao</dc:creator>
			<dc:creator>Junfeng Liao</dc:creator>
		<dc:identifier>doi: 10.3390/computers15050306</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-05-12</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-05-12</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>306</prism:startingPage>
		<prism:doi>10.3390/computers15050306</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/5/306</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/5/305">

	<title>Computers, Vol. 15, Pages 305: Knowledge Management in Manufacturing: Current Practices, Barriers, and Automation Potential for LLM-Supported Systems</title>
	<link>https://www.mdpi.com/2073-431X/15/5/305</link>
	<description>Knowledge management (KM) is increasingly becoming a critical success factor in Germany&amp;amp;rsquo;s manufacturing industry due to demographic change, the shortage of a skilled workforce, and the growing need for flexible and resilient production systems. This study contributes empirical evidence on current KM practices in manufacturing and derives practice-oriented design implications for future LLM-supported KM systems. Two consecutive survey rounds involving six companies in Survey 1 and five companies in Survey 2 were conducted in order to identify current KM practices, recurring barriers, and design implications for large language model (LLM)-supported KM. The results show that KM is perceived as highly relevant, but is implemented only incompletely in practice. Across both datasets, central themes such as fragmented documentation practices, reliance on interpersonal transfer of tacit knowledge and uneven integration of digital KM tools recur consistently. Based on the identified practices, the paper further derives areas in which LLMs may support or augment existing KM processes, particularly with regard to semantic retrieval, contextualization, onboarding, and the preservation of tacit knowledge. The findings also highlight that successful implementation of artificial intelligence (AI)-enabled KM in manufacturing will depend on technical feasibility, trust, usability, and organizational acceptance.</description>
	<pubDate>2026-05-11</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 305: Knowledge Management in Manufacturing: Current Practices, Barriers, and Automation Potential for LLM-Supported Systems</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/5/305">doi: 10.3390/computers15050305</a></p>
	<p>Authors:
		Pius Finkel
		Peter Wurster
		</p>
	<p>Knowledge management (KM) is increasingly becoming a critical success factor in Germany&amp;amp;rsquo;s manufacturing industry due to demographic change, the shortage of a skilled workforce, and the growing need for flexible and resilient production systems. This study contributes empirical evidence on current KM practices in manufacturing and derives practice-oriented design implications for future LLM-supported KM systems. Two consecutive survey rounds involving six companies in Survey 1 and five companies in Survey 2 were conducted in order to identify current KM practices, recurring barriers, and design implications for large language model (LLM)-supported KM. The results show that KM is perceived as highly relevant, but is implemented only incompletely in practice. Across both datasets, central themes such as fragmented documentation practices, reliance on interpersonal transfer of tacit knowledge and uneven integration of digital KM tools recur consistently. Based on the identified practices, the paper further derives areas in which LLMs may support or augment existing KM processes, particularly with regard to semantic retrieval, contextualization, onboarding, and the preservation of tacit knowledge. The findings also highlight that successful implementation of artificial intelligence (AI)-enabled KM in manufacturing will depend on technical feasibility, trust, usability, and organizational acceptance.</p>
	]]></content:encoded>

	<dc:title>Knowledge Management in Manufacturing: Current Practices, Barriers, and Automation Potential for LLM-Supported Systems</dc:title>
			<dc:creator>Pius Finkel</dc:creator>
			<dc:creator>Peter Wurster</dc:creator>
		<dc:identifier>doi: 10.3390/computers15050305</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-05-11</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-05-11</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>305</prism:startingPage>
		<prism:doi>10.3390/computers15050305</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/5/305</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/5/304">

	<title>Computers, Vol. 15, Pages 304: A Survey of Fault and Intrusion Tolerance Approaches for Scientific Workflow Scheduling in Cloud Computing</title>
	<link>https://www.mdpi.com/2073-431X/15/5/304</link>
	<description>To provide reliable services in the cloud, fault tolerance is perhaps the most important consideration. The inherent sensitivity to failure hampers cloud services&amp;amp;rsquo; performance and reliability. As a result, fault tolerance becomes a required characteristic to maintain reliability, which is difficult to provide due to the dynamic architecture and complex inter-dependencies. To address the issues of cloud reliability, many fault-tolerant approaches have been developed in the literature. This paper presents a recent research survey that seeks to classify the various faults and intrusion tolerance architectures. Furthermore, it provides a thorough critical analysis of existing fault and intrusion tolerance, as well as combined approaches, aimed at enhancing the dependability, availability, and execution of cloud services. The report also includes a comparison of the studied systems&amp;amp;rsquo; framework based on various essential criteria such as cost, makespan, reliability, security, resource utilization, energy consumption, and failure ratio. This study aims to comprehensively review this subject for researchers to draw insights from existing patterns in the literature and provide deeper perspectives into some of the challenging issues and prospects. This will enhance the development of highly resilient fault-tolerant and intrusion-resistive scheduling algorithms for current and future cloud applications.</description>
	<pubDate>2026-05-10</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 304: A Survey of Fault and Intrusion Tolerance Approaches for Scientific Workflow Scheduling in Cloud Computing</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/5/304">doi: 10.3390/computers15050304</a></p>
	<p>Authors:
		Mazen Farid
		Oluwatosin Ahmed Amodu
		Heng Siong Lim
		Jamil Abedalrahim Jamil Alsayaydeh
		Mohammed Fadhl Abdullah
		Faten A. Saif
		</p>
	<p>To provide reliable services in the cloud, fault tolerance is perhaps the most important consideration. The inherent sensitivity to failure hampers cloud services&amp;amp;rsquo; performance and reliability. As a result, fault tolerance becomes a required characteristic to maintain reliability, which is difficult to provide due to the dynamic architecture and complex inter-dependencies. To address the issues of cloud reliability, many fault-tolerant approaches have been developed in the literature. This paper presents a recent research survey that seeks to classify the various faults and intrusion tolerance architectures. Furthermore, it provides a thorough critical analysis of existing fault and intrusion tolerance, as well as combined approaches, aimed at enhancing the dependability, availability, and execution of cloud services. The report also includes a comparison of the studied systems&amp;amp;rsquo; framework based on various essential criteria such as cost, makespan, reliability, security, resource utilization, energy consumption, and failure ratio. This study aims to comprehensively review this subject for researchers to draw insights from existing patterns in the literature and provide deeper perspectives into some of the challenging issues and prospects. This will enhance the development of highly resilient fault-tolerant and intrusion-resistive scheduling algorithms for current and future cloud applications.</p>
	]]></content:encoded>

	<dc:title>A Survey of Fault and Intrusion Tolerance Approaches for Scientific Workflow Scheduling in Cloud Computing</dc:title>
			<dc:creator>Mazen Farid</dc:creator>
			<dc:creator>Oluwatosin Ahmed Amodu</dc:creator>
			<dc:creator>Heng Siong Lim</dc:creator>
			<dc:creator>Jamil Abedalrahim Jamil Alsayaydeh</dc:creator>
			<dc:creator>Mohammed Fadhl Abdullah</dc:creator>
			<dc:creator>Faten A. Saif</dc:creator>
		<dc:identifier>doi: 10.3390/computers15050304</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-05-10</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-05-10</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Review</prism:section>
	<prism:startingPage>304</prism:startingPage>
		<prism:doi>10.3390/computers15050304</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/5/304</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/5/303">

	<title>Computers, Vol. 15, Pages 303: EEG Fatigue Judgment Method Based on Approximate Nearest Neighbor Search</title>
	<link>https://www.mdpi.com/2073-431X/15/5/303</link>
	<description>Fatigue seriously affects work efficiency and brings potential safety hazards, and electroencephalogram (EEG) serves as a valuable physiological indicator for fatigue monitoring, as it directly reflects underlying brain neural activity. A key characteristic in EEG fatigue research is that the feature spaces of pre-fatigue and post-fatigue EEG signals exhibit obvious spatial separation&amp;amp;mdash;this separation is caused by significant changes in brain electrical activity when the human body transitions from a normal awake state to a fatigue state. Existing EEG-based fatigue judgment methods mostly focus on binary classification, which fails to fully leverage the inherent spatial separation characteristic of pre-fatigue and post-fatigue feature spaces, making it difficult to achieve simple, efficient, and accurate fatigue judgment. To address this problem, this paper proposes an EEG fatigue judgment method based on feature space spatial separation and Approximate Nearest Neighbor Search (ANNS). The 16-channel pre-fatigue (Group A) and post-fatigue (Group B) EEG signals acquired from seven subjects are segmented and subjected to feature extraction, projecting the signals into a unified feature space. An ANNS index is constructed using feature vectors from both Group A and Group B, with each vector annotated by its corresponding class label. A separate test set (Group C) is utilized, and the k-nearest neighbors of each test feature vector are retrieved from the built ANNS index. The mental fatigue state is then identified via majority voting according to the class labels of the k-nearest neighbors. Experimental results demonstrate that the proposed method can effectively exploit the spatial separation between pre-fatigue and post-fatigue feature distributions, yielding an average single-subject classification accuracy of approximately 90%.</description>
	<pubDate>2026-05-10</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 303: EEG Fatigue Judgment Method Based on Approximate Nearest Neighbor Search</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/5/303">doi: 10.3390/computers15050303</a></p>
	<p>Authors:
		Yingjie Cui
		Xu Li
		Zhongxian Chen
		Yan Li
		</p>
	<p>Fatigue seriously affects work efficiency and brings potential safety hazards, and electroencephalogram (EEG) serves as a valuable physiological indicator for fatigue monitoring, as it directly reflects underlying brain neural activity. A key characteristic in EEG fatigue research is that the feature spaces of pre-fatigue and post-fatigue EEG signals exhibit obvious spatial separation&amp;amp;mdash;this separation is caused by significant changes in brain electrical activity when the human body transitions from a normal awake state to a fatigue state. Existing EEG-based fatigue judgment methods mostly focus on binary classification, which fails to fully leverage the inherent spatial separation characteristic of pre-fatigue and post-fatigue feature spaces, making it difficult to achieve simple, efficient, and accurate fatigue judgment. To address this problem, this paper proposes an EEG fatigue judgment method based on feature space spatial separation and Approximate Nearest Neighbor Search (ANNS). The 16-channel pre-fatigue (Group A) and post-fatigue (Group B) EEG signals acquired from seven subjects are segmented and subjected to feature extraction, projecting the signals into a unified feature space. An ANNS index is constructed using feature vectors from both Group A and Group B, with each vector annotated by its corresponding class label. A separate test set (Group C) is utilized, and the k-nearest neighbors of each test feature vector are retrieved from the built ANNS index. The mental fatigue state is then identified via majority voting according to the class labels of the k-nearest neighbors. Experimental results demonstrate that the proposed method can effectively exploit the spatial separation between pre-fatigue and post-fatigue feature distributions, yielding an average single-subject classification accuracy of approximately 90%.</p>
	]]></content:encoded>

	<dc:title>EEG Fatigue Judgment Method Based on Approximate Nearest Neighbor Search</dc:title>
			<dc:creator>Yingjie Cui</dc:creator>
			<dc:creator>Xu Li</dc:creator>
			<dc:creator>Zhongxian Chen</dc:creator>
			<dc:creator>Yan Li</dc:creator>
		<dc:identifier>doi: 10.3390/computers15050303</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-05-10</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-05-10</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>303</prism:startingPage>
		<prism:doi>10.3390/computers15050303</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/5/303</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/5/302">

	<title>Computers, Vol. 15, Pages 302: An Efficient Quantum-Dot Cellular Automata Memory Architecture for Internet of Things Systems</title>
	<link>https://www.mdpi.com/2073-431X/15/5/302</link>
	<description>Internet of Things (IoT) nodes continuously acquire, buffer, and transmit sensor data under strict constraints on area, latency, and energy consumption. However, conventional complementary metal&amp;amp;ndash;oxide&amp;amp;ndash;semiconductor (CMOS)-based memory-access circuits face increasing power loss, parasitic effects, interconnect complexity, and sensitivity to process variations at the nanoscale. To address these limitations, this paper proposes a quantum-dot cellular automata (QCA)-based decoder-driven static random-access memory (SRAM)-access architecture for compact and energy-efficient IoT perception-layer memory. The proposed framework integrates three main components: a majority-logic RAM cell with feedback-based storage and non-destructive readout, a compact 2 &amp;amp;times; 4 decoder with enable and auxiliary asynchronous set/reset control, and a 1 &amp;amp;times; 4 SRAM array in which the decoder is embedded to reduce routing and clocking overhead. The circuit layouts were implemented and functionally verified using QCADesigner 2.0.3, while the energy behavior was evaluated using QCADesigner-E. Simulation results confirm correct write/read (W/R) and address-selection behavior. The proposed 2 &amp;amp;times; 4 decoder achieves 86 QCA cells, 0.08 &amp;amp;micro;m2 occupied area, and one clocking unit, reducing cell count, area, and clocking by 48.19%, 50.00%, and 20.00%, respectively, compared with the best selected decoder baseline. The integrated 1 &amp;amp;times; 4 SRAM array achieves 684 cells and 14 clocking units, improving timing by 30.00% compared with the closest SRAM-array baseline. These results demonstrate that the proposed QCA-based memory-access structure provides a compact and low-overhead solution for energy-constrained IoT communication systems.</description>
	<pubDate>2026-05-09</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 302: An Efficient Quantum-Dot Cellular Automata Memory Architecture for Internet of Things Systems</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/5/302">doi: 10.3390/computers15050302</a></p>
	<p>Authors:
		B. S. Premananda
		Mohsen Vahabi
		Muhammad Zohaib
		Seyed-Sajad Ahmadpour
		M. Barath
		K. R. Sreesha
		</p>
	<p>Internet of Things (IoT) nodes continuously acquire, buffer, and transmit sensor data under strict constraints on area, latency, and energy consumption. However, conventional complementary metal&amp;amp;ndash;oxide&amp;amp;ndash;semiconductor (CMOS)-based memory-access circuits face increasing power loss, parasitic effects, interconnect complexity, and sensitivity to process variations at the nanoscale. To address these limitations, this paper proposes a quantum-dot cellular automata (QCA)-based decoder-driven static random-access memory (SRAM)-access architecture for compact and energy-efficient IoT perception-layer memory. The proposed framework integrates three main components: a majority-logic RAM cell with feedback-based storage and non-destructive readout, a compact 2 &amp;amp;times; 4 decoder with enable and auxiliary asynchronous set/reset control, and a 1 &amp;amp;times; 4 SRAM array in which the decoder is embedded to reduce routing and clocking overhead. The circuit layouts were implemented and functionally verified using QCADesigner 2.0.3, while the energy behavior was evaluated using QCADesigner-E. Simulation results confirm correct write/read (W/R) and address-selection behavior. The proposed 2 &amp;amp;times; 4 decoder achieves 86 QCA cells, 0.08 &amp;amp;micro;m2 occupied area, and one clocking unit, reducing cell count, area, and clocking by 48.19%, 50.00%, and 20.00%, respectively, compared with the best selected decoder baseline. The integrated 1 &amp;amp;times; 4 SRAM array achieves 684 cells and 14 clocking units, improving timing by 30.00% compared with the closest SRAM-array baseline. These results demonstrate that the proposed QCA-based memory-access structure provides a compact and low-overhead solution for energy-constrained IoT communication systems.</p>
	]]></content:encoded>

	<dc:title>An Efficient Quantum-Dot Cellular Automata Memory Architecture for Internet of Things Systems</dc:title>
			<dc:creator>B. S. Premananda</dc:creator>
			<dc:creator>Mohsen Vahabi</dc:creator>
			<dc:creator>Muhammad Zohaib</dc:creator>
			<dc:creator>Seyed-Sajad Ahmadpour</dc:creator>
			<dc:creator>M. Barath</dc:creator>
			<dc:creator>K. R. Sreesha</dc:creator>
		<dc:identifier>doi: 10.3390/computers15050302</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-05-09</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-05-09</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>302</prism:startingPage>
		<prism:doi>10.3390/computers15050302</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/5/302</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/5/301">

	<title>Computers, Vol. 15, Pages 301: Quantifying the Impact of Signal Simplification, Data Quantity, and Task Difficulty on Vision Transformer Performance for ECG Rhythm Classification</title>
	<link>https://www.mdpi.com/2073-431X/15/5/301</link>
	<description>Vision transformers (ViTs) have demonstrated considerable promise for classifying electrocardiogram (ECG) rhythms. However, much of the existing research is conducted in highly controlled, data-sterile settings that fail to reflect the substantial variability present in real-world ECG signals. This paper seeks to address this gap by examining how signal simplification, data quantity, and task difficulty influence the performance of the SwinV2 ViT model in ECG rhythm classification. Through systematic analysis, we highlight that classifying highly abstracted signals yields only a limited impact on model performance, with all models achieving over 95% accuracy, while the amount of training data plays a crucial role with an almost 15% accuracy difference between the models trained on the most data and the least data. Finally, our analysis shows the model&amp;amp;rsquo;s ability to effectively adapt to an increased class count, which is essential due to the varying nature of ECG diagnosis. In summary, these results highlight the importance of carefully balancing data clarity, dataset size, and diagnostic variety when designing ECG classification systems. Achieving this balance is crucial for building reliable and scalable AI solutions for cardiac assessment.</description>
	<pubDate>2026-05-09</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 301: Quantifying the Impact of Signal Simplification, Data Quantity, and Task Difficulty on Vision Transformer Performance for ECG Rhythm Classification</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/5/301">doi: 10.3390/computers15050301</a></p>
	<p>Authors:
		Jarod P. Hartley
		W. Joseph MacInnes
		</p>
	<p>Vision transformers (ViTs) have demonstrated considerable promise for classifying electrocardiogram (ECG) rhythms. However, much of the existing research is conducted in highly controlled, data-sterile settings that fail to reflect the substantial variability present in real-world ECG signals. This paper seeks to address this gap by examining how signal simplification, data quantity, and task difficulty influence the performance of the SwinV2 ViT model in ECG rhythm classification. Through systematic analysis, we highlight that classifying highly abstracted signals yields only a limited impact on model performance, with all models achieving over 95% accuracy, while the amount of training data plays a crucial role with an almost 15% accuracy difference between the models trained on the most data and the least data. Finally, our analysis shows the model&amp;amp;rsquo;s ability to effectively adapt to an increased class count, which is essential due to the varying nature of ECG diagnosis. In summary, these results highlight the importance of carefully balancing data clarity, dataset size, and diagnostic variety when designing ECG classification systems. Achieving this balance is crucial for building reliable and scalable AI solutions for cardiac assessment.</p>
	]]></content:encoded>

	<dc:title>Quantifying the Impact of Signal Simplification, Data Quantity, and Task Difficulty on Vision Transformer Performance for ECG Rhythm Classification</dc:title>
			<dc:creator>Jarod P. Hartley</dc:creator>
			<dc:creator>W. Joseph MacInnes</dc:creator>
		<dc:identifier>doi: 10.3390/computers15050301</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-05-09</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-05-09</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>301</prism:startingPage>
		<prism:doi>10.3390/computers15050301</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/5/301</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/5/300">

	<title>Computers, Vol. 15, Pages 300: An Integrated Open-Source Software System for the Generation and Analysis of Subject-Specific Blood Flow Simulation Ensembles</title>
	<link>https://www.mdpi.com/2073-431X/15/5/300</link>
	<description>Hemodynamic analysis of blood flow is critical for diagnosing cardiovascular diseases and investigating cardiovascular parameters, such as aneurysms and wall shear stress. For subject-specific analyses, the anatomy and blood flow of the subject can be captured non-invasively using structural and 4D Magnetic Resonance Imaging (MRI), respectively. Computational fluid dynamics (CFD), on the other hand, can be used to generate blood flow simulations. To generate and analyze subject-specific blood flow simulations, MRI and CFD have to be brought together. We present an interactive, customizable, and user-oriented visual analysis tool that integrates measured data and CFD simulations. Thus, our open-source tool supports both medical and numerical analysis workflows. It enables the creation of simulation ensembles with a high variety of parameters. Furthermore, it allows for visual and analytical examination of simulations and measurements through 2D embeddings. To demonstrate the effectiveness of our tool, we applied it to three real-world use cases, showcasing its ability to configure simulation ensembles and analyze blood flow. We evaluated our example cases together with MRI and CFD experts. By combining the strengths of both CFD and MRI, our tool provides a comprehensive understanding of hemodynamic parameters, facilitating accurate analysis of hemodynamic biomarkers.</description>
	<pubDate>2026-05-09</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 300: An Integrated Open-Source Software System for the Generation and Analysis of Subject-Specific Blood Flow Simulation Ensembles</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/5/300">doi: 10.3390/computers15050300</a></p>
	<p>Authors:
		Simon Leistikow
		Thomas Miro
		Adrian Kummerländer
		Ali Nahardani
		Katja Grün
		Marcus Franz
		Verena Hoerr
		Mathias J. Krause
		Lars Linsen
		</p>
	<p>Hemodynamic analysis of blood flow is critical for diagnosing cardiovascular diseases and investigating cardiovascular parameters, such as aneurysms and wall shear stress. For subject-specific analyses, the anatomy and blood flow of the subject can be captured non-invasively using structural and 4D Magnetic Resonance Imaging (MRI), respectively. Computational fluid dynamics (CFD), on the other hand, can be used to generate blood flow simulations. To generate and analyze subject-specific blood flow simulations, MRI and CFD have to be brought together. We present an interactive, customizable, and user-oriented visual analysis tool that integrates measured data and CFD simulations. Thus, our open-source tool supports both medical and numerical analysis workflows. It enables the creation of simulation ensembles with a high variety of parameters. Furthermore, it allows for visual and analytical examination of simulations and measurements through 2D embeddings. To demonstrate the effectiveness of our tool, we applied it to three real-world use cases, showcasing its ability to configure simulation ensembles and analyze blood flow. We evaluated our example cases together with MRI and CFD experts. By combining the strengths of both CFD and MRI, our tool provides a comprehensive understanding of hemodynamic parameters, facilitating accurate analysis of hemodynamic biomarkers.</p>
	]]></content:encoded>

	<dc:title>An Integrated Open-Source Software System for the Generation and Analysis of Subject-Specific Blood Flow Simulation Ensembles</dc:title>
			<dc:creator>Simon Leistikow</dc:creator>
			<dc:creator>Thomas Miro</dc:creator>
			<dc:creator>Adrian Kummerländer</dc:creator>
			<dc:creator>Ali Nahardani</dc:creator>
			<dc:creator>Katja Grün</dc:creator>
			<dc:creator>Marcus Franz</dc:creator>
			<dc:creator>Verena Hoerr</dc:creator>
			<dc:creator>Mathias J. Krause</dc:creator>
			<dc:creator>Lars Linsen</dc:creator>
		<dc:identifier>doi: 10.3390/computers15050300</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-05-09</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-05-09</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>300</prism:startingPage>
		<prism:doi>10.3390/computers15050300</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/5/300</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/5/299">

	<title>Computers, Vol. 15, Pages 299: ASL Recognition and Game-Based Interaction: A Machine Learning&amp;mdash;Driven, Gamified and Accessible Vocabulary Learning System for Deaf Learners</title>
	<link>https://www.mdpi.com/2073-431X/15/5/299</link>
	<description>Digital learning tools for American Sign Language (ASL) often lack the interactive depth necessary to engage learners effectively. This paper introduces a novel, browser-based word search game designed to facilitate ASL vocabulary familiarization through gamified interaction. The system employs a two-tier architecture consisting of a React-based frontend and a Flask-based backend. At its core, the application integrates a lightweight, skeleton-based Isolated Sign Language Recognition (ISLR) model, utilizing a Stacked Transformer-based Spatial-Temporal Attention Network to enable real-time webcam-based word entry during the configuration phase. This model, trained on the WLASL-100 dataset, achieves a Top-5 test accuracy of 88.48% with an average model inference latency of 141 ms, enabling real-time webcam input without proprietary hardware. Furthermore, we implement a constraint-satisfaction puzzle generation algorithm that achieves a 100% success rate in creating interlocked, multi-directional grids. Our results demonstrate that merging computer vision with pedagogical game mechanics provides an accessible, high-performance tool for the Deaf and Hard-of-Hearing (DHH) community, bridging the gap between static instruction and active linguistic practice.</description>
	<pubDate>2026-05-07</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 299: ASL Recognition and Game-Based Interaction: A Machine Learning&amp;mdash;Driven, Gamified and Accessible Vocabulary Learning System for Deaf Learners</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/5/299">doi: 10.3390/computers15050299</a></p>
	<p>Authors:
		Stefanie Amiruzzaman
		Raga Mouni Batchu
		Md Amiruzzaman
		Linh Ngo
		M. Ali Akber Dewan
		</p>
	<p>Digital learning tools for American Sign Language (ASL) often lack the interactive depth necessary to engage learners effectively. This paper introduces a novel, browser-based word search game designed to facilitate ASL vocabulary familiarization through gamified interaction. The system employs a two-tier architecture consisting of a React-based frontend and a Flask-based backend. At its core, the application integrates a lightweight, skeleton-based Isolated Sign Language Recognition (ISLR) model, utilizing a Stacked Transformer-based Spatial-Temporal Attention Network to enable real-time webcam-based word entry during the configuration phase. This model, trained on the WLASL-100 dataset, achieves a Top-5 test accuracy of 88.48% with an average model inference latency of 141 ms, enabling real-time webcam input without proprietary hardware. Furthermore, we implement a constraint-satisfaction puzzle generation algorithm that achieves a 100% success rate in creating interlocked, multi-directional grids. Our results demonstrate that merging computer vision with pedagogical game mechanics provides an accessible, high-performance tool for the Deaf and Hard-of-Hearing (DHH) community, bridging the gap between static instruction and active linguistic practice.</p>
	]]></content:encoded>

	<dc:title>ASL Recognition and Game-Based Interaction: A Machine Learning&amp;amp;mdash;Driven, Gamified and Accessible Vocabulary Learning System for Deaf Learners</dc:title>
			<dc:creator>Stefanie Amiruzzaman</dc:creator>
			<dc:creator>Raga Mouni Batchu</dc:creator>
			<dc:creator>Md Amiruzzaman</dc:creator>
			<dc:creator>Linh Ngo</dc:creator>
			<dc:creator>M. Ali Akber Dewan</dc:creator>
		<dc:identifier>doi: 10.3390/computers15050299</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-05-07</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-05-07</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>299</prism:startingPage>
		<prism:doi>10.3390/computers15050299</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/5/299</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/5/298">

	<title>Computers, Vol. 15, Pages 298: A Multi-Source Pipeline for Extracting Traditional-Style Chinese Melody Data from Symbolic Files and Score Images</title>
	<link>https://www.mdpi.com/2073-431X/15/5/298</link>
	<description>Large-scale symbolic melody datasets are essential for data-driven music information retrieval and generation, yet traditional-style Chinese melodies remain scattered across heterogeneous score formats and image sources. Existing extraction pipelines typically focus on single modalities&amp;amp;mdash;either MIDI archives or standard staff notation&amp;amp;mdash;and lack unified handling for numbered musical notation (Jianpu) and automated quality assurance. We propose the Multi-Source Melody Pipeline (MSMP), a systems-integration prototype whose front-end admits MIDI, MusicXML, Jianpu images, and staff images, and whose back-end converges on a standardized event-level representation; the present case study exercises the image branch&amp;amp;mdash;in particular the Jianpu branch, through a Gemini-2.5-flash vision language model&amp;amp;mdash;and treats the MIDI/MusicXML ingestion paths as architectural slots that are wired in but not experimentally validated in this submission. The system employs notation-aware routing to direct score images to appropriate backends (a VLM for Jianpu and rule-based OMR for staff) and enforces a structural validity gate (schema conformance plus at least one melodic track with at least one musical event) on every candidate segment. Validation on a 292-page representative prototype cohort yielded an 80.1% structural-acceptance rate&amp;amp;mdash;explicitly not a transcription accuracy number&amp;amp;mdash;and a newly added ground-truth benchmark on 50 manually annotated Jianpu pages reports 95.8% time-signature exact accuracy, 77.1% tonal-pitch-class key accuracy, 100% tempo agreement within &amp;amp;plusmn;5 BPM, and, on a 10-page note-level subset, a mean first-16-note pitch F1 of 0.898 (octave-sensitive) with a Symbol Error Rate of 0.150. A companion 10-page K = 3 self-consistency audit indicates that metadata errors are systematic rather than stochastic. This work, therefore, contributes a reproducible integration architecture and a quantitative baseline on the Jianpu branch, rather than a new OMR algorithm, a new dataset release, or a fully benchmarked multi-format corpus; ongoing work addresses out-of-distribution classifier evaluation, comparison against dedicated Jianpu OMR baselines, and release of a copyright-cleared corpus.</description>
	<pubDate>2026-05-07</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 298: A Multi-Source Pipeline for Extracting Traditional-Style Chinese Melody Data from Symbolic Files and Score Images</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/5/298">doi: 10.3390/computers15050298</a></p>
	<p>Authors:
		Xuanfei Zhou
		Yinxuan Huang
		Sining Han
		Jiangyao Bai
		</p>
	<p>Large-scale symbolic melody datasets are essential for data-driven music information retrieval and generation, yet traditional-style Chinese melodies remain scattered across heterogeneous score formats and image sources. Existing extraction pipelines typically focus on single modalities&amp;amp;mdash;either MIDI archives or standard staff notation&amp;amp;mdash;and lack unified handling for numbered musical notation (Jianpu) and automated quality assurance. We propose the Multi-Source Melody Pipeline (MSMP), a systems-integration prototype whose front-end admits MIDI, MusicXML, Jianpu images, and staff images, and whose back-end converges on a standardized event-level representation; the present case study exercises the image branch&amp;amp;mdash;in particular the Jianpu branch, through a Gemini-2.5-flash vision language model&amp;amp;mdash;and treats the MIDI/MusicXML ingestion paths as architectural slots that are wired in but not experimentally validated in this submission. The system employs notation-aware routing to direct score images to appropriate backends (a VLM for Jianpu and rule-based OMR for staff) and enforces a structural validity gate (schema conformance plus at least one melodic track with at least one musical event) on every candidate segment. Validation on a 292-page representative prototype cohort yielded an 80.1% structural-acceptance rate&amp;amp;mdash;explicitly not a transcription accuracy number&amp;amp;mdash;and a newly added ground-truth benchmark on 50 manually annotated Jianpu pages reports 95.8% time-signature exact accuracy, 77.1% tonal-pitch-class key accuracy, 100% tempo agreement within &amp;amp;plusmn;5 BPM, and, on a 10-page note-level subset, a mean first-16-note pitch F1 of 0.898 (octave-sensitive) with a Symbol Error Rate of 0.150. A companion 10-page K = 3 self-consistency audit indicates that metadata errors are systematic rather than stochastic. This work, therefore, contributes a reproducible integration architecture and a quantitative baseline on the Jianpu branch, rather than a new OMR algorithm, a new dataset release, or a fully benchmarked multi-format corpus; ongoing work addresses out-of-distribution classifier evaluation, comparison against dedicated Jianpu OMR baselines, and release of a copyright-cleared corpus.</p>
	]]></content:encoded>

	<dc:title>A Multi-Source Pipeline for Extracting Traditional-Style Chinese Melody Data from Symbolic Files and Score Images</dc:title>
			<dc:creator>Xuanfei Zhou</dc:creator>
			<dc:creator>Yinxuan Huang</dc:creator>
			<dc:creator>Sining Han</dc:creator>
			<dc:creator>Jiangyao Bai</dc:creator>
		<dc:identifier>doi: 10.3390/computers15050298</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-05-07</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-05-07</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>298</prism:startingPage>
		<prism:doi>10.3390/computers15050298</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/5/298</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/5/297">

	<title>Computers, Vol. 15, Pages 297: Integrating Thesaurus-Based Knowledge into Transformer Models for Semantic Understanding of Domain-Specific Texts</title>
	<link>https://www.mdpi.com/2073-431X/15/5/297</link>
	<description>Integrating structured linguistic resources into deep learning architectures represents a key challenge in domain-oriented NLP. This study proposes a framework for incorporating knowledge from a military thesaurus of the Ground Forces, structured according to the XML Zthes standard, into pre-trained transformed language models, including KazBERT, multilingual BERT, and XLM-RoBERTA. The approach addresses two interrelated tasks in specialized terminology processing: concept linking and semantic search. Unlike existing knowledge-injection methods designed primarily for general-domain applications, this framework formalizes the mapping of Zthes elements, such as Term, Broader term, Narrower term, Related term, ScopeNote, Language, and Source into structured textual representations that can be directly processed by transformer architectures. Fine-tuning is conducted on a dataset of 18,400 training instances automatically generated from the thesaurus, including synonym pairs, hierarchical relations (hyperonymy and hyponymy), associative links, and definitional descriptions. Experimental evaluation demonstrated that thesaurus-enriched models outperform baseline architectures across all major metrics. XLM-RoBERTA model achieves F1 = 0.84 and Top-5 accuracy = 0.94 in the concept linking task, representing a five-point improvement over the baseline. The model reaches Macro-F1 = 0.84 across four relation types. Results obtained on a specialized test set derived from terminology databases of Kazakhstan&amp;amp;rsquo;s Armed Forces confirm robust cross-lingual generalization across Kazakh, Russian and English military discourse.</description>
	<pubDate>2026-05-07</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 297: Integrating Thesaurus-Based Knowledge into Transformer Models for Semantic Understanding of Domain-Specific Texts</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/5/297">doi: 10.3390/computers15050297</a></p>
	<p>Authors:
		Bayangali Abdygalym
		Saule Tazhibayeva
		Madina Sambetbayeva
		Aigerim Yerimbetova
		Roman Taberkhan
		Manzura Abjalova
		Aidos Sabdenov
		Elmira Daiyrbayeva
		</p>
	<p>Integrating structured linguistic resources into deep learning architectures represents a key challenge in domain-oriented NLP. This study proposes a framework for incorporating knowledge from a military thesaurus of the Ground Forces, structured according to the XML Zthes standard, into pre-trained transformed language models, including KazBERT, multilingual BERT, and XLM-RoBERTA. The approach addresses two interrelated tasks in specialized terminology processing: concept linking and semantic search. Unlike existing knowledge-injection methods designed primarily for general-domain applications, this framework formalizes the mapping of Zthes elements, such as Term, Broader term, Narrower term, Related term, ScopeNote, Language, and Source into structured textual representations that can be directly processed by transformer architectures. Fine-tuning is conducted on a dataset of 18,400 training instances automatically generated from the thesaurus, including synonym pairs, hierarchical relations (hyperonymy and hyponymy), associative links, and definitional descriptions. Experimental evaluation demonstrated that thesaurus-enriched models outperform baseline architectures across all major metrics. XLM-RoBERTA model achieves F1 = 0.84 and Top-5 accuracy = 0.94 in the concept linking task, representing a five-point improvement over the baseline. The model reaches Macro-F1 = 0.84 across four relation types. Results obtained on a specialized test set derived from terminology databases of Kazakhstan&amp;amp;rsquo;s Armed Forces confirm robust cross-lingual generalization across Kazakh, Russian and English military discourse.</p>
	]]></content:encoded>

	<dc:title>Integrating Thesaurus-Based Knowledge into Transformer Models for Semantic Understanding of Domain-Specific Texts</dc:title>
			<dc:creator>Bayangali Abdygalym</dc:creator>
			<dc:creator>Saule Tazhibayeva</dc:creator>
			<dc:creator>Madina Sambetbayeva</dc:creator>
			<dc:creator>Aigerim Yerimbetova</dc:creator>
			<dc:creator>Roman Taberkhan</dc:creator>
			<dc:creator>Manzura Abjalova</dc:creator>
			<dc:creator>Aidos Sabdenov</dc:creator>
			<dc:creator>Elmira Daiyrbayeva</dc:creator>
		<dc:identifier>doi: 10.3390/computers15050297</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-05-07</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-05-07</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>297</prism:startingPage>
		<prism:doi>10.3390/computers15050297</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/5/297</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/5/296">

	<title>Computers, Vol. 15, Pages 296: AI-Driven Clustering-Based Stratification of Allergic Patients Towards Smart Healthcare Systems in Southern Italy</title>
	<link>https://www.mdpi.com/2073-431X/15/5/296</link>
	<description>A clustering analysis was conducted to identify distinct patient subgroups with White Blood Cells (WBC) count alongside Age and Total Immunoglobulin E (IgE) biomarkers. All data were obtained from a coordinated primary care network operating in Apulia (Southern Italy). We analyzed 300 patient records, performed preprocessing and exploratory data analysis, and then applied unsupervised clustering directly to the standardized three-variable feature space (Age, WBC, and Total IgE), followed by supervised validation steps. Several algorithms were applied for clustering. Among the evaluated methods, K-means and Spectral Clustering showed the most favorable internal validation profiles, based on Silhouette Score (SS), Calinski&amp;amp;ndash;Harabasz Index (CH), and Davies&amp;amp;ndash;Bouldin Index (DB). K-means achieved the best scores (SS = 0.406, CH = 190.00, DB = 0.900), closely followed by Spectral Clustering (SS = 0.398, CH = 182.57, DB = 0.936), outperforming Agglomerative Clustering (SS = 0.361, CH = 160.41, DB = 1.016) and Gaussian Mixture Models (SS = 0.233, CH = 103.89, DB = 1.289). Post-clustering ANOVA analyses indicated significant differences in WBC, age, and total IgE across the five consensus clusters. An evaluation of cluster internal separability occurred through the training of a Random Forest classifier to predict cluster membership. The results indicate internal cluster separability within the analyzed dataset, but more external verification and clinical evidence are necessary for validation. The research group established clinical descriptions along with suggested treatment plans and detected co-existing diseases to help validate model-based findings. A simplified cluster-informed clinical summary based on biomarker ranges was derived to support interpretation of the identified patient profiles. This integrated method preliminarily suggests that patient strata may be identified from routine clinical variables, while highlighting the importance of internal validation and clinical interpretability in clustering research.</description>
	<pubDate>2026-05-07</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 296: AI-Driven Clustering-Based Stratification of Allergic Patients Towards Smart Healthcare Systems in Southern Italy</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/5/296">doi: 10.3390/computers15050296</a></p>
	<p>Authors:
		Stefano Palazzo
		Esra Hazar
		Arife Uslu Gokceoglu
		Giovanni Zambetta
		Roberto Caldelli
		Claudio Loconsole
		</p>
	<p>A clustering analysis was conducted to identify distinct patient subgroups with White Blood Cells (WBC) count alongside Age and Total Immunoglobulin E (IgE) biomarkers. All data were obtained from a coordinated primary care network operating in Apulia (Southern Italy). We analyzed 300 patient records, performed preprocessing and exploratory data analysis, and then applied unsupervised clustering directly to the standardized three-variable feature space (Age, WBC, and Total IgE), followed by supervised validation steps. Several algorithms were applied for clustering. Among the evaluated methods, K-means and Spectral Clustering showed the most favorable internal validation profiles, based on Silhouette Score (SS), Calinski&amp;amp;ndash;Harabasz Index (CH), and Davies&amp;amp;ndash;Bouldin Index (DB). K-means achieved the best scores (SS = 0.406, CH = 190.00, DB = 0.900), closely followed by Spectral Clustering (SS = 0.398, CH = 182.57, DB = 0.936), outperforming Agglomerative Clustering (SS = 0.361, CH = 160.41, DB = 1.016) and Gaussian Mixture Models (SS = 0.233, CH = 103.89, DB = 1.289). Post-clustering ANOVA analyses indicated significant differences in WBC, age, and total IgE across the five consensus clusters. An evaluation of cluster internal separability occurred through the training of a Random Forest classifier to predict cluster membership. The results indicate internal cluster separability within the analyzed dataset, but more external verification and clinical evidence are necessary for validation. The research group established clinical descriptions along with suggested treatment plans and detected co-existing diseases to help validate model-based findings. A simplified cluster-informed clinical summary based on biomarker ranges was derived to support interpretation of the identified patient profiles. This integrated method preliminarily suggests that patient strata may be identified from routine clinical variables, while highlighting the importance of internal validation and clinical interpretability in clustering research.</p>
	]]></content:encoded>

	<dc:title>AI-Driven Clustering-Based Stratification of Allergic Patients Towards Smart Healthcare Systems in Southern Italy</dc:title>
			<dc:creator>Stefano Palazzo</dc:creator>
			<dc:creator>Esra Hazar</dc:creator>
			<dc:creator>Arife Uslu Gokceoglu</dc:creator>
			<dc:creator>Giovanni Zambetta</dc:creator>
			<dc:creator>Roberto Caldelli</dc:creator>
			<dc:creator>Claudio Loconsole</dc:creator>
		<dc:identifier>doi: 10.3390/computers15050296</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-05-07</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-05-07</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>296</prism:startingPage>
		<prism:doi>10.3390/computers15050296</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/5/296</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/5/295">

	<title>Computers, Vol. 15, Pages 295: Editorial: Machine Learning and Statistical Learning with Applications 2025</title>
	<link>https://www.mdpi.com/2073-431X/15/5/295</link>
	<description>Machine learning and statistical learning have become central to modern scientific discovery and technological innovation [...]</description>
	<pubDate>2026-05-07</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 295: Editorial: Machine Learning and Statistical Learning with Applications 2025</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/5/295">doi: 10.3390/computers15050295</a></p>
	<p>Authors:
		Yan Zhang
		</p>
	<p>Machine learning and statistical learning have become central to modern scientific discovery and technological innovation [...]</p>
	]]></content:encoded>

	<dc:title>Editorial: Machine Learning and Statistical Learning with Applications 2025</dc:title>
			<dc:creator>Yan Zhang</dc:creator>
		<dc:identifier>doi: 10.3390/computers15050295</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-05-07</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-05-07</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Editorial</prism:section>
	<prism:startingPage>295</prism:startingPage>
		<prism:doi>10.3390/computers15050295</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/5/295</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/5/294">

	<title>Computers, Vol. 15, Pages 294: A Self-Adaptive LLM-Based Framework for Automated Extraction and Structuring of Earthquake Information from Heterogeneous Web Sources</title>
	<link>https://www.mdpi.com/2073-431X/15/5/294</link>
	<description>The rapid growth of heterogeneous web sources has created significant challenges for the automated extraction and structuring of critical domain-specific information, particularly in real-time seismic monitoring scenarios. Despite the existence of official governmental reporting systems, relevant earthquake-related data are often distributed across diverse online platforms with highly variable and dynamically evolving HTML (HyperText Markup Language) structures, leading to incomplete, delayed, or inconsistent information retrieval. Existing rule-based and semi-automated approaches lack scalability and robustness under such conditions. To address this gap, this study proposes a self-adaptive framework based on large language models (LLMs) for the automated extraction and structuring of earthquake-related web content. The proposed approach integrates transformer-based schema generation, repository-guided schema matching, and an iterative refinement mechanism, enabling the system to dynamically adapt to heterogeneous document structures. A formal utility-based decision mechanism is introduced to optimize schema selection and reuse, while embedding-based similarity modeling facilitates efficient transfer of extraction patterns across structurally related webpages. The experimental evaluation was conducted on a heterogeneous benchmark dataset comprising multiple web domains with diverse structural characteristics. The results demonstrate that the proposed framework achieves a success rate of 85% across all evaluated models, with the best-performing configuration reaching an extraction accuracy of 96.5% and a final composite score of 84.26. Additional analysis reveals significant improvements in extraction completeness, reduction in false positives and false negatives, and effective reuse of a compact set of robust schemas. Error analysis indicates that the primary challenges are associated with noisy HTML structures and incorrect DOM (Document Object Model) element selection, rather than deficiencies in textual content. The findings confirm that combining lightweight transformer models with adaptive memory and schema reuse mechanisms enables the development of scalable, robust, and high-performance web extraction systems. The proposed approach is particularly suitable for real-time information retrieval in safety-critical domains, where timely and accurate data aggregation from heterogeneous sources is essential.</description>
	<pubDate>2026-05-05</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 294: A Self-Adaptive LLM-Based Framework for Automated Extraction and Structuring of Earthquake Information from Heterogeneous Web Sources</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/5/294">doi: 10.3390/computers15050294</a></p>
	<p>Authors:
		Assem Turarbek
		Diana Rakhimova
		Yeldos Adetbekov
		Azat Nurgali
		</p>
	<p>The rapid growth of heterogeneous web sources has created significant challenges for the automated extraction and structuring of critical domain-specific information, particularly in real-time seismic monitoring scenarios. Despite the existence of official governmental reporting systems, relevant earthquake-related data are often distributed across diverse online platforms with highly variable and dynamically evolving HTML (HyperText Markup Language) structures, leading to incomplete, delayed, or inconsistent information retrieval. Existing rule-based and semi-automated approaches lack scalability and robustness under such conditions. To address this gap, this study proposes a self-adaptive framework based on large language models (LLMs) for the automated extraction and structuring of earthquake-related web content. The proposed approach integrates transformer-based schema generation, repository-guided schema matching, and an iterative refinement mechanism, enabling the system to dynamically adapt to heterogeneous document structures. A formal utility-based decision mechanism is introduced to optimize schema selection and reuse, while embedding-based similarity modeling facilitates efficient transfer of extraction patterns across structurally related webpages. The experimental evaluation was conducted on a heterogeneous benchmark dataset comprising multiple web domains with diverse structural characteristics. The results demonstrate that the proposed framework achieves a success rate of 85% across all evaluated models, with the best-performing configuration reaching an extraction accuracy of 96.5% and a final composite score of 84.26. Additional analysis reveals significant improvements in extraction completeness, reduction in false positives and false negatives, and effective reuse of a compact set of robust schemas. Error analysis indicates that the primary challenges are associated with noisy HTML structures and incorrect DOM (Document Object Model) element selection, rather than deficiencies in textual content. The findings confirm that combining lightweight transformer models with adaptive memory and schema reuse mechanisms enables the development of scalable, robust, and high-performance web extraction systems. The proposed approach is particularly suitable for real-time information retrieval in safety-critical domains, where timely and accurate data aggregation from heterogeneous sources is essential.</p>
	]]></content:encoded>

	<dc:title>A Self-Adaptive LLM-Based Framework for Automated Extraction and Structuring of Earthquake Information from Heterogeneous Web Sources</dc:title>
			<dc:creator>Assem Turarbek</dc:creator>
			<dc:creator>Diana Rakhimova</dc:creator>
			<dc:creator>Yeldos Adetbekov</dc:creator>
			<dc:creator>Azat Nurgali</dc:creator>
		<dc:identifier>doi: 10.3390/computers15050294</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-05-05</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-05-05</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>294</prism:startingPage>
		<prism:doi>10.3390/computers15050294</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/5/294</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/5/293">

	<title>Computers, Vol. 15, Pages 293: Non-Standard Squat Posture Detection Method Using Human Skeleton</title>
	<link>https://www.mdpi.com/2073-431X/15/5/293</link>
	<description>Squats are essential for assessing lower limb strength. However, performing them incorrectly without professional guidance often leads to sports injuries. Currently, most detection methods rely heavily on deep neural networks and massive datasets. This approach brings several downsides. It involves high data labeling costs and heavy computing demands. It is also difficult to achieve low-latency feedback on mobile devices. Furthermore, these models often lack robustness when dealing with individual body differences. To tackle these issues, we propose a new real-time squat detection method. Our approach is built on prior rules and statistical models. Here is how it works. First, we use MediaPipe to track the body&amp;amp;rsquo;s skeleton joints in real-time from video feeds, calculating the hip and knee angles frame by frame. Next, we build a hip-knee coordination model using linear regression. This step helps us measure how these joints move together dynamically. Finally, we verify the squat depth using a geometry-based tolerance mechanism. This feature accounts for measurement noise and natural body variations, allowing us to accurately judge if the overall posture is standard. We tested our approach on three different squat styles. The results show that our method catches improper forms quickly and efficiently in real time, achieving an accuracy of 90%.</description>
	<pubDate>2026-05-05</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 293: Non-Standard Squat Posture Detection Method Using Human Skeleton</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/5/293">doi: 10.3390/computers15050293</a></p>
	<p>Authors:
		Leiyue Yao
		Zhiqiang Dai
		Keyun Xiong
		</p>
	<p>Squats are essential for assessing lower limb strength. However, performing them incorrectly without professional guidance often leads to sports injuries. Currently, most detection methods rely heavily on deep neural networks and massive datasets. This approach brings several downsides. It involves high data labeling costs and heavy computing demands. It is also difficult to achieve low-latency feedback on mobile devices. Furthermore, these models often lack robustness when dealing with individual body differences. To tackle these issues, we propose a new real-time squat detection method. Our approach is built on prior rules and statistical models. Here is how it works. First, we use MediaPipe to track the body&amp;amp;rsquo;s skeleton joints in real-time from video feeds, calculating the hip and knee angles frame by frame. Next, we build a hip-knee coordination model using linear regression. This step helps us measure how these joints move together dynamically. Finally, we verify the squat depth using a geometry-based tolerance mechanism. This feature accounts for measurement noise and natural body variations, allowing us to accurately judge if the overall posture is standard. We tested our approach on three different squat styles. The results show that our method catches improper forms quickly and efficiently in real time, achieving an accuracy of 90%.</p>
	]]></content:encoded>

	<dc:title>Non-Standard Squat Posture Detection Method Using Human Skeleton</dc:title>
			<dc:creator>Leiyue Yao</dc:creator>
			<dc:creator>Zhiqiang Dai</dc:creator>
			<dc:creator>Keyun Xiong</dc:creator>
		<dc:identifier>doi: 10.3390/computers15050293</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-05-05</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-05-05</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>293</prism:startingPage>
		<prism:doi>10.3390/computers15050293</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/5/293</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/5/292">

	<title>Computers, Vol. 15, Pages 292: A Language for Modeling Declarative Knowledge Bases in the Context of Model-Driven Engineering</title>
	<link>https://www.mdpi.com/2073-431X/15/5/292</link>
	<description>End-user development (EUD) and model-driven engineering (MDE) are particularly valuable for building classical intelligent systems that rely on declarative knowledge bases. In these knowledge bases, the key dependencies of the domain can be described in the form of logical rules. The general-purpose modeling language used in MDE, specifically UML, enables modeling of static data structures and the dynamics of object behavior; however, it does not primarily support the modeling logical rules. In this paper, we propose a rule visual modeling language inspired by UML&amp;amp;mdash;Rule Visual Modeling Language (RVML)&amp;amp;mdash;which expands the capabilities of MDE in terms of using domain-specific visual languages. This approach substantially supports end-users in constructing declarative knowledge bases. We present the formal semantics, visual syntax, and features of RVML, along with two industrial case studies. We empirically evaluate the effectiveness of RVML in development compared to other graphic notations used for modeling logical rules. Our evaluation demonstrates that RVML provides superior expressiveness and better preservation of semantic integrity.</description>
	<pubDate>2026-05-04</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 292: A Language for Modeling Declarative Knowledge Bases in the Context of Model-Driven Engineering</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/5/292">doi: 10.3390/computers15050292</a></p>
	<p>Authors:
		Aleksandr Yurin
		Nikita Dorodnykh
		</p>
	<p>End-user development (EUD) and model-driven engineering (MDE) are particularly valuable for building classical intelligent systems that rely on declarative knowledge bases. In these knowledge bases, the key dependencies of the domain can be described in the form of logical rules. The general-purpose modeling language used in MDE, specifically UML, enables modeling of static data structures and the dynamics of object behavior; however, it does not primarily support the modeling logical rules. In this paper, we propose a rule visual modeling language inspired by UML&amp;amp;mdash;Rule Visual Modeling Language (RVML)&amp;amp;mdash;which expands the capabilities of MDE in terms of using domain-specific visual languages. This approach substantially supports end-users in constructing declarative knowledge bases. We present the formal semantics, visual syntax, and features of RVML, along with two industrial case studies. We empirically evaluate the effectiveness of RVML in development compared to other graphic notations used for modeling logical rules. Our evaluation demonstrates that RVML provides superior expressiveness and better preservation of semantic integrity.</p>
	]]></content:encoded>

	<dc:title>A Language for Modeling Declarative Knowledge Bases in the Context of Model-Driven Engineering</dc:title>
			<dc:creator>Aleksandr Yurin</dc:creator>
			<dc:creator>Nikita Dorodnykh</dc:creator>
		<dc:identifier>doi: 10.3390/computers15050292</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-05-04</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-05-04</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>292</prism:startingPage>
		<prism:doi>10.3390/computers15050292</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/5/292</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/5/291">

	<title>Computers, Vol. 15, Pages 291: FetalNet 1.0: TOPSIS-Guided Ensemble Learning with Genetic Feature Selection and SHAP Explainability for Fetal Health Classification from Cardiotocography</title>
	<link>https://www.mdpi.com/2073-431X/15/5/291</link>
	<description>Fetal health assessment is a crucial aspect of prenatal care, aimed at the early detection of potential complications to ensure optimal outcomes for both mother and child. Traditional methods, such as the visual analysis of cardiotocography (CTG) data by healthcare professionals, are valuable but often subjective and time-consuming. This work investigates the application of machine learning techniques, with a focus on ensemble learning, to enhance the accuracy and efficiency of fetal health classification based on CTG data. Genetic Algorithm (GA) is employed for optimal feature selection, identifying the most discriminative subset of CTG attributes to improve model performance and reduce computational complexity. We employ a combination of advanced machine learning models, including AdaBoost, Gaussian Na&amp;amp;iuml;ve Bayes, Decision Tree, k-nearest neighbors (KNN), and Logistic Regression. The top two models were selected based on comprehensive performance metrics using the TOPSIS (Technique for Order Preference by Similarity to Ideal Solution) method. These models were then integrated through ensemble learning approaches, such as stacking, Particle Swarm Optimization (PSO) weighted averaging, and soft voting, to improve prediction reliability. Our proposed stacking ensemble model achieves a remarkable accuracy of 97.9%, demonstrating its potential as a robust, data-driven tool for fetal health monitoring and the early identification of at-risk pregnancies. The results indicate that machine learning can effectively complement traditional fetal health assessment methods by providing an objective framework to support clinical decision-making.</description>
	<pubDate>2026-05-02</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 291: FetalNet 1.0: TOPSIS-Guided Ensemble Learning with Genetic Feature Selection and SHAP Explainability for Fetal Health Classification from Cardiotocography</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/5/291">doi: 10.3390/computers15050291</a></p>
	<p>Authors:
		 Shweta
		Neha Gupta
		Meenakshi Gupta
		Massimo Donelli
		Yogita Arora
		Achin Jain
		</p>
	<p>Fetal health assessment is a crucial aspect of prenatal care, aimed at the early detection of potential complications to ensure optimal outcomes for both mother and child. Traditional methods, such as the visual analysis of cardiotocography (CTG) data by healthcare professionals, are valuable but often subjective and time-consuming. This work investigates the application of machine learning techniques, with a focus on ensemble learning, to enhance the accuracy and efficiency of fetal health classification based on CTG data. Genetic Algorithm (GA) is employed for optimal feature selection, identifying the most discriminative subset of CTG attributes to improve model performance and reduce computational complexity. We employ a combination of advanced machine learning models, including AdaBoost, Gaussian Na&amp;amp;iuml;ve Bayes, Decision Tree, k-nearest neighbors (KNN), and Logistic Regression. The top two models were selected based on comprehensive performance metrics using the TOPSIS (Technique for Order Preference by Similarity to Ideal Solution) method. These models were then integrated through ensemble learning approaches, such as stacking, Particle Swarm Optimization (PSO) weighted averaging, and soft voting, to improve prediction reliability. Our proposed stacking ensemble model achieves a remarkable accuracy of 97.9%, demonstrating its potential as a robust, data-driven tool for fetal health monitoring and the early identification of at-risk pregnancies. The results indicate that machine learning can effectively complement traditional fetal health assessment methods by providing an objective framework to support clinical decision-making.</p>
	]]></content:encoded>

	<dc:title>FetalNet 1.0: TOPSIS-Guided Ensemble Learning with Genetic Feature Selection and SHAP Explainability for Fetal Health Classification from Cardiotocography</dc:title>
			<dc:creator> Shweta</dc:creator>
			<dc:creator>Neha Gupta</dc:creator>
			<dc:creator>Meenakshi Gupta</dc:creator>
			<dc:creator>Massimo Donelli</dc:creator>
			<dc:creator>Yogita Arora</dc:creator>
			<dc:creator>Achin Jain</dc:creator>
		<dc:identifier>doi: 10.3390/computers15050291</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-05-02</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-05-02</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>291</prism:startingPage>
		<prism:doi>10.3390/computers15050291</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/5/291</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/5/290">

	<title>Computers, Vol. 15, Pages 290: Performance Evaluation of Post-Quantum Digital Signature in QPSK- and 16QAM-Based WDM Communication Systems</title>
	<link>https://www.mdpi.com/2073-431X/15/5/290</link>
	<description>The integration of post-quantum digital signature (PQDS) algorithms into coherent wavelength-division multiplexing (WDM) optical networks introduces a non-negligible cryptographic overhead that fundamentally alters physical-layer performance characteristics. Unlike conventional studies that treat security and transmission independently, this work provides a cross-layer evaluation of PQDS-induced payload expansion and its direct impact on coherent optical system behavior under realistic, DSP-aligned conditions. A structured and reproducible evaluation framework is proposed to systematically analyze this interaction across multiple transmission scenarios, ranging from a single-channel QPSK baseline to a 16-channel WDM system employing both QPSK and 16QAM modulation formats. Key system parameters&amp;amp;mdash;including launch power, local oscillator power, bit rate, and fiber length&amp;amp;mdash;are jointly optimized, while performance is rigorously assessed in terms of bit error rate (BER), Q-factor, and maximum transmission reach. The results demonstrate a clear performance degradation trend driven by both spectral efficiency scaling and cryptographic payload expansion. The single-channel QPSK system achieves a maximum reach of 203 km, which decreases to 194 km in the 16-channel WDM QPSK configuration due to inter-channel interference and nonlinear effects. In contrast, the 16-channel WDM 16QAM system exhibits a significantly reduced reach of 103 km, reflecting its heightened sensitivity to noise, chromatic dispersion, and fiber nonlinearities. Furthermore, increased payload size associated with PQDS schemes is shown to exacerbate transmission impairments by extending frame duration and intensifying inter-channel interactions. These findings identify PQDS-induced overhead as a critical system-level constraint that directly governs transmission efficiency, scalability, and performance limits. The study highlights the necessity of cross-layer co-design strategies, where cryptographic mechanisms and physical-layer parameters are jointly optimized to enable efficient, reliable, and quantum-safe coherent optical communication systems.</description>
	<pubDate>2026-05-01</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 290: Performance Evaluation of Post-Quantum Digital Signature in QPSK- and 16QAM-Based WDM Communication Systems</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/5/290">doi: 10.3390/computers15050290</a></p>
	<p>Authors:
		Duaa J. Khalaf
		Arwa A. Moosa
		Tayseer S. Atia
		</p>
	<p>The integration of post-quantum digital signature (PQDS) algorithms into coherent wavelength-division multiplexing (WDM) optical networks introduces a non-negligible cryptographic overhead that fundamentally alters physical-layer performance characteristics. Unlike conventional studies that treat security and transmission independently, this work provides a cross-layer evaluation of PQDS-induced payload expansion and its direct impact on coherent optical system behavior under realistic, DSP-aligned conditions. A structured and reproducible evaluation framework is proposed to systematically analyze this interaction across multiple transmission scenarios, ranging from a single-channel QPSK baseline to a 16-channel WDM system employing both QPSK and 16QAM modulation formats. Key system parameters&amp;amp;mdash;including launch power, local oscillator power, bit rate, and fiber length&amp;amp;mdash;are jointly optimized, while performance is rigorously assessed in terms of bit error rate (BER), Q-factor, and maximum transmission reach. The results demonstrate a clear performance degradation trend driven by both spectral efficiency scaling and cryptographic payload expansion. The single-channel QPSK system achieves a maximum reach of 203 km, which decreases to 194 km in the 16-channel WDM QPSK configuration due to inter-channel interference and nonlinear effects. In contrast, the 16-channel WDM 16QAM system exhibits a significantly reduced reach of 103 km, reflecting its heightened sensitivity to noise, chromatic dispersion, and fiber nonlinearities. Furthermore, increased payload size associated with PQDS schemes is shown to exacerbate transmission impairments by extending frame duration and intensifying inter-channel interactions. These findings identify PQDS-induced overhead as a critical system-level constraint that directly governs transmission efficiency, scalability, and performance limits. The study highlights the necessity of cross-layer co-design strategies, where cryptographic mechanisms and physical-layer parameters are jointly optimized to enable efficient, reliable, and quantum-safe coherent optical communication systems.</p>
	]]></content:encoded>

	<dc:title>Performance Evaluation of Post-Quantum Digital Signature in QPSK- and 16QAM-Based WDM Communication Systems</dc:title>
			<dc:creator>Duaa J. Khalaf</dc:creator>
			<dc:creator>Arwa A. Moosa</dc:creator>
			<dc:creator>Tayseer S. Atia</dc:creator>
		<dc:identifier>doi: 10.3390/computers15050290</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-05-01</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-05-01</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>290</prism:startingPage>
		<prism:doi>10.3390/computers15050290</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/5/290</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/5/289">

	<title>Computers, Vol. 15, Pages 289: Generative AI for Education in Infrastructure Systems: Lessons from a BIM-Based Rule-Checking</title>
	<link>https://www.mdpi.com/2073-431X/15/5/289</link>
	<description>This study investigates the educational potential of Large Language Models (LLMs) for automating rule-checking tasks in Building Information Modeling (BIM) instruction. A quasi-experimental classroom implementation was conducted over two consecutive semesters with 55 graduate students in a Construction Management program. In Fall 2024, students were taught manual rule-checking techniques, whereas in Spring 2025, students received additional instruction in LLM-based prompting and Python code generation for automated compliance checking. A mixed-methods evaluation was conducted using surveys, NASA Task Load Index ratings, assignment-based learning outcomes, and structured interviews. Compared with the manual-only cohort, the LLM-assisted cohort reported significantly lower mental, temporal, and frustration demands, as well as higher perceived time efficiency and overall effectiveness. The LLM-assisted group also achieved significantly higher performance in violation detection and method accuracy, although no significant differences were observed in code interpretation or reflective analysis. Qualitative findings further revealed both the efficiency benefits of AI-assisted automation and persistent challenges related to prompt refinement, debugging, and output validation. These findings suggest that LLMs can enhance BIM instruction when paired with structured pedagogical scaffolding to support critical oversight and novice learners.</description>
	<pubDate>2026-05-01</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 289: Generative AI for Education in Infrastructure Systems: Lessons from a BIM-Based Rule-Checking</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/5/289">doi: 10.3390/computers15050289</a></p>
	<p>Authors:
		Islem Sahraoui
		Kinam Kim
		Lu Gao
		Zia Ud Din
		Ahmed Senouci
		</p>
	<p>This study investigates the educational potential of Large Language Models (LLMs) for automating rule-checking tasks in Building Information Modeling (BIM) instruction. A quasi-experimental classroom implementation was conducted over two consecutive semesters with 55 graduate students in a Construction Management program. In Fall 2024, students were taught manual rule-checking techniques, whereas in Spring 2025, students received additional instruction in LLM-based prompting and Python code generation for automated compliance checking. A mixed-methods evaluation was conducted using surveys, NASA Task Load Index ratings, assignment-based learning outcomes, and structured interviews. Compared with the manual-only cohort, the LLM-assisted cohort reported significantly lower mental, temporal, and frustration demands, as well as higher perceived time efficiency and overall effectiveness. The LLM-assisted group also achieved significantly higher performance in violation detection and method accuracy, although no significant differences were observed in code interpretation or reflective analysis. Qualitative findings further revealed both the efficiency benefits of AI-assisted automation and persistent challenges related to prompt refinement, debugging, and output validation. These findings suggest that LLMs can enhance BIM instruction when paired with structured pedagogical scaffolding to support critical oversight and novice learners.</p>
	]]></content:encoded>

	<dc:title>Generative AI for Education in Infrastructure Systems: Lessons from a BIM-Based Rule-Checking</dc:title>
			<dc:creator>Islem Sahraoui</dc:creator>
			<dc:creator>Kinam Kim</dc:creator>
			<dc:creator>Lu Gao</dc:creator>
			<dc:creator>Zia Ud Din</dc:creator>
			<dc:creator>Ahmed Senouci</dc:creator>
		<dc:identifier>doi: 10.3390/computers15050289</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-05-01</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-05-01</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>289</prism:startingPage>
		<prism:doi>10.3390/computers15050289</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/5/289</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/5/288">

	<title>Computers, Vol. 15, Pages 288: AIGU-DPFL: Adaptive Differentially Private Federated Learning with Importance-Based Gradient Updates</title>
	<link>https://www.mdpi.com/2073-431X/15/5/288</link>
	<description>Federated learning, a decentralized machine learning framework, allows multiple participants to jointly train models while keeping their raw data local and unshared. Nevertheless, during the exchange of model updates, the communicated information can still introduce privacy vulnerabilities and potentially result in the exposure of user data. Over the past few years, differential privacy methods have been broadly incorporated into federated learning frameworks to strengthen the protection of sensitive data. Nevertheless, the noise required to satisfy differential privacy guarantees often causes significant degradation in model performance. Prior studies have typically employed a fixed noise-injection strategy following gradient clipping. Although such methods provide privacy protection, they overlook the varying importance of different gradient dimensions, resulting in noise being injected into unimportant or redundant parameters, thereby causing unnecessary performance loss. To address these limitations, we propose an adaptive differentially private federated learning scheme with importance-based gradient updates (AIGU-DPFL). Specifically, we focus on coordinates with high information content and introduce an adaptive noise injection mechanism, which perturbs gradient updates to satisfy differential privacy guarantees while dynamically controlling noise intensity, thereby achieving sparse and noise-effective gradient updates. AIGU-DPFL markedly enhances the training effectiveness of federated learning models. Comprehensive evaluations conducted on real-world datasets indicate that the proposed method achieves superior performance compared to existing differentially private federated learning techniques.</description>
	<pubDate>2026-05-01</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 288: AIGU-DPFL: Adaptive Differentially Private Federated Learning with Importance-Based Gradient Updates</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/5/288">doi: 10.3390/computers15050288</a></p>
	<p>Authors:
		Fangfang Shan
		Zhuo Chen
		Yifan Mao
		Yuhang Liu
		Lulu Fan
		Yanlong Lu
		</p>
	<p>Federated learning, a decentralized machine learning framework, allows multiple participants to jointly train models while keeping their raw data local and unshared. Nevertheless, during the exchange of model updates, the communicated information can still introduce privacy vulnerabilities and potentially result in the exposure of user data. Over the past few years, differential privacy methods have been broadly incorporated into federated learning frameworks to strengthen the protection of sensitive data. Nevertheless, the noise required to satisfy differential privacy guarantees often causes significant degradation in model performance. Prior studies have typically employed a fixed noise-injection strategy following gradient clipping. Although such methods provide privacy protection, they overlook the varying importance of different gradient dimensions, resulting in noise being injected into unimportant or redundant parameters, thereby causing unnecessary performance loss. To address these limitations, we propose an adaptive differentially private federated learning scheme with importance-based gradient updates (AIGU-DPFL). Specifically, we focus on coordinates with high information content and introduce an adaptive noise injection mechanism, which perturbs gradient updates to satisfy differential privacy guarantees while dynamically controlling noise intensity, thereby achieving sparse and noise-effective gradient updates. AIGU-DPFL markedly enhances the training effectiveness of federated learning models. Comprehensive evaluations conducted on real-world datasets indicate that the proposed method achieves superior performance compared to existing differentially private federated learning techniques.</p>
	]]></content:encoded>

	<dc:title>AIGU-DPFL: Adaptive Differentially Private Federated Learning with Importance-Based Gradient Updates</dc:title>
			<dc:creator>Fangfang Shan</dc:creator>
			<dc:creator>Zhuo Chen</dc:creator>
			<dc:creator>Yifan Mao</dc:creator>
			<dc:creator>Yuhang Liu</dc:creator>
			<dc:creator>Lulu Fan</dc:creator>
			<dc:creator>Yanlong Lu</dc:creator>
		<dc:identifier>doi: 10.3390/computers15050288</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-05-01</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-05-01</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>288</prism:startingPage>
		<prism:doi>10.3390/computers15050288</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/5/288</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/5/287">

	<title>Computers, Vol. 15, Pages 287: On-Device Transformer Architectures for Speech Evaluation in Neurodegenerative Disease Detection</title>
	<link>https://www.mdpi.com/2073-431X/15/5/287</link>
	<description>Speech alterations are early markers of neurodegenerative diseases. Transformer-based speech models such as Whisper have advanced automated speech assessment, but most systems rely on cloud-based computation, raising privacy concerns. On-device processing could offer a scalable and privacy-preserving alternative. This research&amp;amp;rsquo;s objective was to evaluate whether a fully on-device speech analysis pipeline can achieve competitive accuracy in detecting Alzheimer&amp;amp;rsquo;s disease and to quantify the contributions of acoustic, linguistic, and embedding features. Therefore, we developed an iOS application running all components, including acoustic analysis, two transformer-based speech-to-text modules (WhisperBase and quantized CrisperWhisper), linguistic feature extraction, and embedding generation, directly on the device. Using the ADReSS Challenge 2020 dataset (N = 156), we trained classical machine-learning classifiers across 20 configurations and evaluated them via a stratified 10-fold cross-validation. Area under the receiver operating curve (AUC), accuracy, precision, recall, and F1 scores were used as performance metrics. An ablation study examined the relevance of each feature group. The best-performing setup (Random Forest with CrisperWhisper transcription and Apple embeddings) achieved an accuracy of 85.4% and an AUC of 0.85. Performance was 5&amp;amp;ndash;7% below benchmark models relying on manual transcripts or server-based processing. Embedding features provided the strongest individual contribution, but the highest accuracy required combining acoustic, linguistic, and embedding information. A fully on-device pipeline for Alzheimer&amp;amp;rsquo;s disease detection from speech is feasible and achieves competitive accuracy while maintaining strict data privacy. These findings highlight the potential of on-device transformer architectures for scalable, privacy-preserving digital screening. Future work should validate the approach in larger and more diverse cohorts.</description>
	<pubDate>2026-05-01</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 287: On-Device Transformer Architectures for Speech Evaluation in Neurodegenerative Disease Detection</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/5/287">doi: 10.3390/computers15050287</a></p>
	<p>Authors:
		Lara Marie Reimer
		Leonard Pries
		Florian Schweizer
		Leon Nissen
		Stephan M. Jonas
		</p>
	<p>Speech alterations are early markers of neurodegenerative diseases. Transformer-based speech models such as Whisper have advanced automated speech assessment, but most systems rely on cloud-based computation, raising privacy concerns. On-device processing could offer a scalable and privacy-preserving alternative. This research&amp;amp;rsquo;s objective was to evaluate whether a fully on-device speech analysis pipeline can achieve competitive accuracy in detecting Alzheimer&amp;amp;rsquo;s disease and to quantify the contributions of acoustic, linguistic, and embedding features. Therefore, we developed an iOS application running all components, including acoustic analysis, two transformer-based speech-to-text modules (WhisperBase and quantized CrisperWhisper), linguistic feature extraction, and embedding generation, directly on the device. Using the ADReSS Challenge 2020 dataset (N = 156), we trained classical machine-learning classifiers across 20 configurations and evaluated them via a stratified 10-fold cross-validation. Area under the receiver operating curve (AUC), accuracy, precision, recall, and F1 scores were used as performance metrics. An ablation study examined the relevance of each feature group. The best-performing setup (Random Forest with CrisperWhisper transcription and Apple embeddings) achieved an accuracy of 85.4% and an AUC of 0.85. Performance was 5&amp;amp;ndash;7% below benchmark models relying on manual transcripts or server-based processing. Embedding features provided the strongest individual contribution, but the highest accuracy required combining acoustic, linguistic, and embedding information. A fully on-device pipeline for Alzheimer&amp;amp;rsquo;s disease detection from speech is feasible and achieves competitive accuracy while maintaining strict data privacy. These findings highlight the potential of on-device transformer architectures for scalable, privacy-preserving digital screening. Future work should validate the approach in larger and more diverse cohorts.</p>
	]]></content:encoded>

	<dc:title>On-Device Transformer Architectures for Speech Evaluation in Neurodegenerative Disease Detection</dc:title>
			<dc:creator>Lara Marie Reimer</dc:creator>
			<dc:creator>Leonard Pries</dc:creator>
			<dc:creator>Florian Schweizer</dc:creator>
			<dc:creator>Leon Nissen</dc:creator>
			<dc:creator>Stephan M. Jonas</dc:creator>
		<dc:identifier>doi: 10.3390/computers15050287</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-05-01</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-05-01</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>287</prism:startingPage>
		<prism:doi>10.3390/computers15050287</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/5/287</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/5/286">

	<title>Computers, Vol. 15, Pages 286: Multi-Centre Liver Tumour Classification via Federated Learning: Investigating Data Heterogeneity, Transfer Learning, and Model Efficiency</title>
	<link>https://www.mdpi.com/2073-431X/15/5/286</link>
	<description>This paper investigates federated multi-centre liver tumour classification from contrast-enhanced CT under realistic data heterogeneity and domain shift. To address the practical constraint that medical data are often siloed across institutions, we develop a FedProx-based federated learning pipeline that enables collaborative training without exchanging raw patient data. Using the LiTS dataset as the training domain, we construct a slice-level binary classification task based on voxel-level annotations, while rigorously assessing out-of-distribution generalisation on an external held-out dataset, 3D-IRCADb. We conduct comprehensive experiments across multiple backbone architectures, including ResNet-50, EfficientNet-B3, ViT-B/16, and MobileNetV3-Small, comparing FedProx and FedAvg under three heterogeneity intensities (IID, mild non-IID, and severe non-IID). Furthermore, we evaluate transfer learning strategies, ranging from frozen backbones to partial fine-tuning of the last stage, and perform ablations on the proximal coefficient &amp;amp;mu; and local epochs E to characterise optimisation behaviour. Our results show that FedProx is generally comparable to FedAvg, with slightly more stable behaviour in some heterogeneous settings. We also observe a clear validation-to-external gap, indicating that external-domain robustness remains challenging and requires cautious interpretation for deployment. ImageNet pretraining yields consistent gains, particularly for data-sparse clients, while partial fine-tuning enhances adaptation to CT-specific features. Finally, MobileNetV3-Small offers a favourable performance&amp;amp;ndash;efficiency trade-off by reducing communication payload and computation cost, supporting practical deployment on resource-constrained clinical edge devices.</description>
	<pubDate>2026-05-01</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 286: Multi-Centre Liver Tumour Classification via Federated Learning: Investigating Data Heterogeneity, Transfer Learning, and Model Efficiency</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/5/286">doi: 10.3390/computers15050286</a></p>
	<p>Authors:
		Degang Zhu
		Shiqi Wei
		Xinming Zhang
		</p>
	<p>This paper investigates federated multi-centre liver tumour classification from contrast-enhanced CT under realistic data heterogeneity and domain shift. To address the practical constraint that medical data are often siloed across institutions, we develop a FedProx-based federated learning pipeline that enables collaborative training without exchanging raw patient data. Using the LiTS dataset as the training domain, we construct a slice-level binary classification task based on voxel-level annotations, while rigorously assessing out-of-distribution generalisation on an external held-out dataset, 3D-IRCADb. We conduct comprehensive experiments across multiple backbone architectures, including ResNet-50, EfficientNet-B3, ViT-B/16, and MobileNetV3-Small, comparing FedProx and FedAvg under three heterogeneity intensities (IID, mild non-IID, and severe non-IID). Furthermore, we evaluate transfer learning strategies, ranging from frozen backbones to partial fine-tuning of the last stage, and perform ablations on the proximal coefficient &amp;amp;mu; and local epochs E to characterise optimisation behaviour. Our results show that FedProx is generally comparable to FedAvg, with slightly more stable behaviour in some heterogeneous settings. We also observe a clear validation-to-external gap, indicating that external-domain robustness remains challenging and requires cautious interpretation for deployment. ImageNet pretraining yields consistent gains, particularly for data-sparse clients, while partial fine-tuning enhances adaptation to CT-specific features. Finally, MobileNetV3-Small offers a favourable performance&amp;amp;ndash;efficiency trade-off by reducing communication payload and computation cost, supporting practical deployment on resource-constrained clinical edge devices.</p>
	]]></content:encoded>

	<dc:title>Multi-Centre Liver Tumour Classification via Federated Learning: Investigating Data Heterogeneity, Transfer Learning, and Model Efficiency</dc:title>
			<dc:creator>Degang Zhu</dc:creator>
			<dc:creator>Shiqi Wei</dc:creator>
			<dc:creator>Xinming Zhang</dc:creator>
		<dc:identifier>doi: 10.3390/computers15050286</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-05-01</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-05-01</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>286</prism:startingPage>
		<prism:doi>10.3390/computers15050286</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/5/286</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/5/285">

	<title>Computers, Vol. 15, Pages 285: A Rigorous Comparative Study of Supervised Machine Learning Techniques for Network Anomaly Detection: Empirical Insights from the UNSW-NB15 Dataset</title>
	<link>https://www.mdpi.com/2073-431X/15/5/285</link>
	<description>The increasing complexity of modern network infrastructures has intensified the need for reliable and efficient intrusion detection systems. While advanced deep learning approaches have demonstrated strong performance, their high computational cost and limited interpretability restrict their practical deployment in real-time environments. This study presents a systematic empirical evaluation of four supervised machine learning models&amp;amp;mdash;Decision Tree, Random Forest, Support Vector Machine (SVM), and XGBoost&amp;amp;mdash;for network anomaly detection using the UNSW-NB15 dataset. To ensure methodological rigor, a structured preprocessing pipeline and a five-fold stratified cross-validation framework were employed. Model performance was assessed using multiple evaluation metrics, including accuracy, precision, recall, F1-score, and area under the ROC curve (AUC). In addition, a feature importance analysis was conducted to identify the most influential network traffic attributes contributing to anomaly detection. The results show that ensemble-based methods outperform individual classifiers, with XGBoost achieving the best overall performance (accuracy = 0.97, AUC = 0.98) along with high stability across validation folds. The analysis further reveals that a subset of flow-based and temporal features&amp;amp;mdash;such as sttl, sload, and dload&amp;amp;mdash;plays a critical role in distinguishing between normal and malicious traffic. This study provides a rigorous, interpretable, and reproducible benchmarking framework for supervised machine learning in network anomaly detection. The findings provide practical insights for developing efficient and scalable intrusion detection systems suitable for real-world deployment.</description>
	<pubDate>2026-05-01</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 285: A Rigorous Comparative Study of Supervised Machine Learning Techniques for Network Anomaly Detection: Empirical Insights from the UNSW-NB15 Dataset</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/5/285">doi: 10.3390/computers15050285</a></p>
	<p>Authors:
		Nouf Alkhater
		</p>
	<p>The increasing complexity of modern network infrastructures has intensified the need for reliable and efficient intrusion detection systems. While advanced deep learning approaches have demonstrated strong performance, their high computational cost and limited interpretability restrict their practical deployment in real-time environments. This study presents a systematic empirical evaluation of four supervised machine learning models&amp;amp;mdash;Decision Tree, Random Forest, Support Vector Machine (SVM), and XGBoost&amp;amp;mdash;for network anomaly detection using the UNSW-NB15 dataset. To ensure methodological rigor, a structured preprocessing pipeline and a five-fold stratified cross-validation framework were employed. Model performance was assessed using multiple evaluation metrics, including accuracy, precision, recall, F1-score, and area under the ROC curve (AUC). In addition, a feature importance analysis was conducted to identify the most influential network traffic attributes contributing to anomaly detection. The results show that ensemble-based methods outperform individual classifiers, with XGBoost achieving the best overall performance (accuracy = 0.97, AUC = 0.98) along with high stability across validation folds. The analysis further reveals that a subset of flow-based and temporal features&amp;amp;mdash;such as sttl, sload, and dload&amp;amp;mdash;plays a critical role in distinguishing between normal and malicious traffic. This study provides a rigorous, interpretable, and reproducible benchmarking framework for supervised machine learning in network anomaly detection. The findings provide practical insights for developing efficient and scalable intrusion detection systems suitable for real-world deployment.</p>
	]]></content:encoded>

	<dc:title>A Rigorous Comparative Study of Supervised Machine Learning Techniques for Network Anomaly Detection: Empirical Insights from the UNSW-NB15 Dataset</dc:title>
			<dc:creator>Nouf Alkhater</dc:creator>
		<dc:identifier>doi: 10.3390/computers15050285</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-05-01</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-05-01</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>285</prism:startingPage>
		<prism:doi>10.3390/computers15050285</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/5/285</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/5/284">

	<title>Computers, Vol. 15, Pages 284: Immersive VR-MoCap for Creative Motion Design in Character Animation Training: A Classroom-Based Comparative Study</title>
	<link>https://www.mdpi.com/2073-431X/15/5/284</link>
	<description>Although motion capture has become integral to contemporary animation pipelines, university teaching still asks students to learn motion largely through screen-based keyframing. To address this gap, this classroom-based comparative study evaluated one structured motion-design lesson within an immersive MoCap-supported training module. Sixty-eight undergraduates in a computer animation course completed the same task in either a Keyframe condition (n = 33) or a VR-MoCap condition (n = 35), with instructional delivery mode as the only difference. Creative performance was assessed in originality, fluency, aesthetic quality, clarity, and a composite score. MANOVA revealed a significant multivariate effect of condition (Pillai&amp;amp;rsquo;s trace = 0.454, F(4, 63) = 13.12, p &amp;amp;lt; 0.001). Relative to keyframe instruction, VR-MoCap produced significantly higher originality, fluency, clarity, and composite performance, whereas aesthetic quality did not differ significantly. Supplementary group-interview responses further indicated that students experienced the immersive condition as more engaging, more intuitive, and better suited to immediate feedback and embodied movement exploration. Immersive VR-MoCap appears most useful in the early phases of motion design and is better understood as complementing, rather than replacing, conventional keyframe training.</description>
	<pubDate>2026-04-30</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 284: Immersive VR-MoCap for Creative Motion Design in Character Animation Training: A Classroom-Based Comparative Study</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/5/284">doi: 10.3390/computers15050284</a></p>
	<p>Authors:
		Xinyi Jiang
		Muying Luo
		Zainuddin Ibrahim
		Azlan Abdul Aziz
		Azhar Jamil
		</p>
	<p>Although motion capture has become integral to contemporary animation pipelines, university teaching still asks students to learn motion largely through screen-based keyframing. To address this gap, this classroom-based comparative study evaluated one structured motion-design lesson within an immersive MoCap-supported training module. Sixty-eight undergraduates in a computer animation course completed the same task in either a Keyframe condition (n = 33) or a VR-MoCap condition (n = 35), with instructional delivery mode as the only difference. Creative performance was assessed in originality, fluency, aesthetic quality, clarity, and a composite score. MANOVA revealed a significant multivariate effect of condition (Pillai&amp;amp;rsquo;s trace = 0.454, F(4, 63) = 13.12, p &amp;amp;lt; 0.001). Relative to keyframe instruction, VR-MoCap produced significantly higher originality, fluency, clarity, and composite performance, whereas aesthetic quality did not differ significantly. Supplementary group-interview responses further indicated that students experienced the immersive condition as more engaging, more intuitive, and better suited to immediate feedback and embodied movement exploration. Immersive VR-MoCap appears most useful in the early phases of motion design and is better understood as complementing, rather than replacing, conventional keyframe training.</p>
	]]></content:encoded>

	<dc:title>Immersive VR-MoCap for Creative Motion Design in Character Animation Training: A Classroom-Based Comparative Study</dc:title>
			<dc:creator>Xinyi Jiang</dc:creator>
			<dc:creator>Muying Luo</dc:creator>
			<dc:creator>Zainuddin Ibrahim</dc:creator>
			<dc:creator>Azlan Abdul Aziz</dc:creator>
			<dc:creator>Azhar Jamil</dc:creator>
		<dc:identifier>doi: 10.3390/computers15050284</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-04-30</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-04-30</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>284</prism:startingPage>
		<prism:doi>10.3390/computers15050284</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/5/284</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/5/283">

	<title>Computers, Vol. 15, Pages 283: YOLOv12-WCIRS: An Improved YOLOv12-Based Framework for Small Intestinal Lesion Detection in WCE</title>
	<link>https://www.mdpi.com/2073-431X/15/5/283</link>
	<description>Accurate detection of small intestinal lesions in wireless capsule endoscopy (WCE) images remains challenging because lesions are often small, weakly contrasted, irregular in shape, and easily confused with complex mucosal backgrounds. To address these difficulties, this study proposes YOLOv12-WCIRS, a WCE-oriented improvement of YOLOv12 that jointly enhances local feature extraction, selective multi-scale fusion, background suppression, localization sensitivity, and scale-aware optimization. The proposed framework incorporates a Weighted Convolution (WConv) module, a Contextual Selection Fusion Module (CSFM), an Information Integration Attention Fusion (IIA_Fusion) module, a Receptive Field Attention-based detection head (RFAHeadDetect), and a Scale Dynamic Loss (SD Loss). Experiments on the SEE-AI dataset show that YOLOv12-WCIRS achieves 83.4% mAP@0.5 and 61.1% mAP@0.5:0.95, improving mAP@0.5 from 76.9% to 83.4% over the direct baseline YOLOv12 while maintaining competitive efficiency. Additional analyses, including cross-dataset validation on overlapping categories in Kvasir-Capsule, normal-frame false-alarm evaluation, false-positive/false-negative breakdown, and repeated-run statistical testing, further support the robustness and practical value of the proposed framework. These results indicate that YOLOv12-WCIRS provides an effective solution for automated lesion detection in WCE images and shows promise for computer-aided capsule endoscopy analysis.</description>
	<pubDate>2026-04-29</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 283: YOLOv12-WCIRS: An Improved YOLOv12-Based Framework for Small Intestinal Lesion Detection in WCE</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/5/283">doi: 10.3390/computers15050283</a></p>
	<p>Authors:
		Shiren Ye
		Liangjing Li
		Zetong Zhang
		Haipeng Ma
		</p>
	<p>Accurate detection of small intestinal lesions in wireless capsule endoscopy (WCE) images remains challenging because lesions are often small, weakly contrasted, irregular in shape, and easily confused with complex mucosal backgrounds. To address these difficulties, this study proposes YOLOv12-WCIRS, a WCE-oriented improvement of YOLOv12 that jointly enhances local feature extraction, selective multi-scale fusion, background suppression, localization sensitivity, and scale-aware optimization. The proposed framework incorporates a Weighted Convolution (WConv) module, a Contextual Selection Fusion Module (CSFM), an Information Integration Attention Fusion (IIA_Fusion) module, a Receptive Field Attention-based detection head (RFAHeadDetect), and a Scale Dynamic Loss (SD Loss). Experiments on the SEE-AI dataset show that YOLOv12-WCIRS achieves 83.4% mAP@0.5 and 61.1% mAP@0.5:0.95, improving mAP@0.5 from 76.9% to 83.4% over the direct baseline YOLOv12 while maintaining competitive efficiency. Additional analyses, including cross-dataset validation on overlapping categories in Kvasir-Capsule, normal-frame false-alarm evaluation, false-positive/false-negative breakdown, and repeated-run statistical testing, further support the robustness and practical value of the proposed framework. These results indicate that YOLOv12-WCIRS provides an effective solution for automated lesion detection in WCE images and shows promise for computer-aided capsule endoscopy analysis.</p>
	]]></content:encoded>

	<dc:title>YOLOv12-WCIRS: An Improved YOLOv12-Based Framework for Small Intestinal Lesion Detection in WCE</dc:title>
			<dc:creator>Shiren Ye</dc:creator>
			<dc:creator>Liangjing Li</dc:creator>
			<dc:creator>Zetong Zhang</dc:creator>
			<dc:creator>Haipeng Ma</dc:creator>
		<dc:identifier>doi: 10.3390/computers15050283</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-04-29</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-04-29</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>283</prism:startingPage>
		<prism:doi>10.3390/computers15050283</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/5/283</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/5/282">

	<title>Computers, Vol. 15, Pages 282: Empirical Performance and Operational Analysis of Monolithic and Distributed Database Architectures in Kubernetes Environments</title>
	<link>https://www.mdpi.com/2073-431X/15/5/282</link>
	<description>This study presents a systematic empirical evaluation of monolithic and distributed database architectures deployed in Kubernetes environments. As containerized and cloud-native infrastructures become increasingly prevalent, understanding the performance implications of running stateful data systems under orchestration platforms has become critical. We evaluate five widely used database systems&amp;amp;mdash;PostgreSQL, MySQL, MongoDB, Redis, and Cassandra&amp;amp;mdash;using standardized workload generation frameworks, including pgbench, sysbench, YCSB, redis-benchmark, and cassandra-stress. Controlled experiments were conducted across varying concurrency levels and workload types to measure throughput, latency, and scalability in both single-node and distributed deployments. Redis achieves a maximum throughput of 4.2 million operations per second with sub-millisecond latency. In contrast, Cassandra delivers 214,743 distributed read operations per second at ONE consistency, approaching Redis&amp;amp;rsquo;s non-pipelined baseline throughput (257,732&amp;amp;ndash;262,467 ops/sec) within a Kubernetes cluster. The write throughput of Cassandra decreases by 45.2% when the consistency level is elevated to QUORUM, accompanied by an elevenfold increase in run-to-run variability (CV from 7.1% to 84.7%), indicating that the consistency level is the primary performance determinant in distributed systems. PostgreSQL experiences a 72% decrease in write throughput in Kubernetes (74,072 &amp;amp;rarr; 20,805 TPS). In contrast, MySQL PXC anomalously attains a 37.3% increase in write throughput in Kubernetes compared to its monolithic deployment&amp;amp;mdash;the sole reversal noted among the five systems. These findings underscore a critical trade-off between vertical efficiency and horizontal scalability, illustrating that hybrid database architecture can be an effective solution for contemporary cloud-native applications compared to either paradigm independently.</description>
	<pubDate>2026-04-29</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 282: Empirical Performance and Operational Analysis of Monolithic and Distributed Database Architectures in Kubernetes Environments</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/5/282">doi: 10.3390/computers15050282</a></p>
	<p>Authors:
		Jasmin Redžepagić
		Ana Kapulica
		Nikola Malešević
		Vedran Dakić
		</p>
	<p>This study presents a systematic empirical evaluation of monolithic and distributed database architectures deployed in Kubernetes environments. As containerized and cloud-native infrastructures become increasingly prevalent, understanding the performance implications of running stateful data systems under orchestration platforms has become critical. We evaluate five widely used database systems&amp;amp;mdash;PostgreSQL, MySQL, MongoDB, Redis, and Cassandra&amp;amp;mdash;using standardized workload generation frameworks, including pgbench, sysbench, YCSB, redis-benchmark, and cassandra-stress. Controlled experiments were conducted across varying concurrency levels and workload types to measure throughput, latency, and scalability in both single-node and distributed deployments. Redis achieves a maximum throughput of 4.2 million operations per second with sub-millisecond latency. In contrast, Cassandra delivers 214,743 distributed read operations per second at ONE consistency, approaching Redis&amp;amp;rsquo;s non-pipelined baseline throughput (257,732&amp;amp;ndash;262,467 ops/sec) within a Kubernetes cluster. The write throughput of Cassandra decreases by 45.2% when the consistency level is elevated to QUORUM, accompanied by an elevenfold increase in run-to-run variability (CV from 7.1% to 84.7%), indicating that the consistency level is the primary performance determinant in distributed systems. PostgreSQL experiences a 72% decrease in write throughput in Kubernetes (74,072 &amp;amp;rarr; 20,805 TPS). In contrast, MySQL PXC anomalously attains a 37.3% increase in write throughput in Kubernetes compared to its monolithic deployment&amp;amp;mdash;the sole reversal noted among the five systems. These findings underscore a critical trade-off between vertical efficiency and horizontal scalability, illustrating that hybrid database architecture can be an effective solution for contemporary cloud-native applications compared to either paradigm independently.</p>
	]]></content:encoded>

	<dc:title>Empirical Performance and Operational Analysis of Monolithic and Distributed Database Architectures in Kubernetes Environments</dc:title>
			<dc:creator>Jasmin Redžepagić</dc:creator>
			<dc:creator>Ana Kapulica</dc:creator>
			<dc:creator>Nikola Malešević</dc:creator>
			<dc:creator>Vedran Dakić</dc:creator>
		<dc:identifier>doi: 10.3390/computers15050282</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-04-29</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-04-29</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>282</prism:startingPage>
		<prism:doi>10.3390/computers15050282</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/5/282</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/5/281">

	<title>Computers, Vol. 15, Pages 281: LACE-Net: A Swin Transformer with Local Frequency-Domain Energy and Adaptive Contrast Enhancement for Fine-Grained Land Cover Classification</title>
	<link>https://www.mdpi.com/2073-431X/15/5/281</link>
	<description>The Swin Transformer exhibits limitations in fine-grained land use and land cover (LULC) classification, particularly in capturing high-frequency texture details and representing low-contrast regions. To address these issues, we propose a novel network model, termed LACE-Net, which integrates local frequency-domain energy and adaptive contrast enhancement. Built upon the Swin Transformer backbone, the model introduces an innovative Local Frequency-Domain Energy-Adaptive Contrast Enhancement Multi-Scale Attention (LACE). This block consists of parallel branches for frequency-domain perception and contrast enhancement, which effectively combine texture and illumination physical priors. In addition, a texture-adaptive momentum adjustment mechanism is incorporated to refine the spatial enhancement attention weights dynamically. Consequently, LACE-Net greatly strengthens the modeling and representation of high-frequency details and complex spatial structural features. Experiments are performed on a self-constructed Guangxi regional dataset (denoted as GLC-30) and the publicly available remote sensing scene classification benchmark dataset NWPU-RESISC45. The results show that LACE-Net achieves a Top-1 accuracy (Top-1 Acc) of 96.48% and a macro-averaged F1 score (mF1) of 93.13%. These results outperform current mainstream vision models, particularly in mitigating the spectral confusion issue of &amp;amp;ldquo;same spectrum, different objects.&amp;amp;rdquo; The model exhibits superior fine-grained classification performance and robust generalization across datasets.</description>
	<pubDate>2026-04-28</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 281: LACE-Net: A Swin Transformer with Local Frequency-Domain Energy and Adaptive Contrast Enhancement for Fine-Grained Land Cover Classification</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/5/281">doi: 10.3390/computers15050281</a></p>
	<p>Authors:
		Yongmei Tan
		Gong Chen
		Yan Huang
		Hengzhou Ye
		Jincheng Tang
		</p>
	<p>The Swin Transformer exhibits limitations in fine-grained land use and land cover (LULC) classification, particularly in capturing high-frequency texture details and representing low-contrast regions. To address these issues, we propose a novel network model, termed LACE-Net, which integrates local frequency-domain energy and adaptive contrast enhancement. Built upon the Swin Transformer backbone, the model introduces an innovative Local Frequency-Domain Energy-Adaptive Contrast Enhancement Multi-Scale Attention (LACE). This block consists of parallel branches for frequency-domain perception and contrast enhancement, which effectively combine texture and illumination physical priors. In addition, a texture-adaptive momentum adjustment mechanism is incorporated to refine the spatial enhancement attention weights dynamically. Consequently, LACE-Net greatly strengthens the modeling and representation of high-frequency details and complex spatial structural features. Experiments are performed on a self-constructed Guangxi regional dataset (denoted as GLC-30) and the publicly available remote sensing scene classification benchmark dataset NWPU-RESISC45. The results show that LACE-Net achieves a Top-1 accuracy (Top-1 Acc) of 96.48% and a macro-averaged F1 score (mF1) of 93.13%. These results outperform current mainstream vision models, particularly in mitigating the spectral confusion issue of &amp;amp;ldquo;same spectrum, different objects.&amp;amp;rdquo; The model exhibits superior fine-grained classification performance and robust generalization across datasets.</p>
	]]></content:encoded>

	<dc:title>LACE-Net: A Swin Transformer with Local Frequency-Domain Energy and Adaptive Contrast Enhancement for Fine-Grained Land Cover Classification</dc:title>
			<dc:creator>Yongmei Tan</dc:creator>
			<dc:creator>Gong Chen</dc:creator>
			<dc:creator>Yan Huang</dc:creator>
			<dc:creator>Hengzhou Ye</dc:creator>
			<dc:creator>Jincheng Tang</dc:creator>
		<dc:identifier>doi: 10.3390/computers15050281</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-04-28</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-04-28</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>281</prism:startingPage>
		<prism:doi>10.3390/computers15050281</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/5/281</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/5/280">

	<title>Computers, Vol. 15, Pages 280: xjb: Fast Float to String Algorithm</title>
	<link>https://www.mdpi.com/2073-431X/15/5/280</link>
	<description>Efficiently and accurately converting floating-point numbers to decimal strings remains a fundamental challenge in numerical computation, data serialization, and human&amp;amp;ndash;computer interaction. While modern algorithms such as Ry&amp;amp;#363;, Dragonbox, and Schubfach rigorously satisfy the Steele&amp;amp;ndash;White criteria for correctness and minimal output length, their performance is frequently constrained by branch mispredictions, high-precision multiplication overhead, and suboptimal utilization of instruction-level parallelism. This paper introduces xjb, a novel floating-point&amp;amp;ndash;string conversion algorithm derived from Schubfach that systematically overcomes these bottlenecks. By restructuring the core computation to reduce instruction dependencies, adopting branchless decision logic, and exploiting SIMD instruction sets for decimal-to-ASCII formatting, xjb delivers state-of-the-art throughput across diverse hardware platforms. The algorithm requires only a single 64-by-128-bit multiplication for IEEE 754 binary64 conversions and a single 64-by-64-bit multiplication for binary32, drastically decreasing arithmetic complexity. Extensive benchmarking on AMD R7-7840H and Apple M1/M5 processors demonstrates that xjb consistently outperforms leading contemporary implementations. Notably, on the Apple M5, xjb achieves speedups of approximately 20% and 136% for binary64 and binary32 conversions, respectively, when compared to the highly optimized zmij library. The algorithm is fully compliant with the Steele&amp;amp;ndash;White principle; exhaustive validation over the entire binary32 space and extensive random testing across the binary64 range confirm both its theoretical soundness and practical robustness.</description>
	<pubDate>2026-04-27</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 280: xjb: Fast Float to String Algorithm</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/5/280">doi: 10.3390/computers15050280</a></p>
	<p>Authors:
		Junbo Xiang
		Tiejun Wang
		</p>
	<p>Efficiently and accurately converting floating-point numbers to decimal strings remains a fundamental challenge in numerical computation, data serialization, and human&amp;amp;ndash;computer interaction. While modern algorithms such as Ry&amp;amp;#363;, Dragonbox, and Schubfach rigorously satisfy the Steele&amp;amp;ndash;White criteria for correctness and minimal output length, their performance is frequently constrained by branch mispredictions, high-precision multiplication overhead, and suboptimal utilization of instruction-level parallelism. This paper introduces xjb, a novel floating-point&amp;amp;ndash;string conversion algorithm derived from Schubfach that systematically overcomes these bottlenecks. By restructuring the core computation to reduce instruction dependencies, adopting branchless decision logic, and exploiting SIMD instruction sets for decimal-to-ASCII formatting, xjb delivers state-of-the-art throughput across diverse hardware platforms. The algorithm requires only a single 64-by-128-bit multiplication for IEEE 754 binary64 conversions and a single 64-by-64-bit multiplication for binary32, drastically decreasing arithmetic complexity. Extensive benchmarking on AMD R7-7840H and Apple M1/M5 processors demonstrates that xjb consistently outperforms leading contemporary implementations. Notably, on the Apple M5, xjb achieves speedups of approximately 20% and 136% for binary64 and binary32 conversions, respectively, when compared to the highly optimized zmij library. The algorithm is fully compliant with the Steele&amp;amp;ndash;White principle; exhaustive validation over the entire binary32 space and extensive random testing across the binary64 range confirm both its theoretical soundness and practical robustness.</p>
	]]></content:encoded>

	<dc:title>xjb: Fast Float to String Algorithm</dc:title>
			<dc:creator>Junbo Xiang</dc:creator>
			<dc:creator>Tiejun Wang</dc:creator>
		<dc:identifier>doi: 10.3390/computers15050280</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-04-27</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-04-27</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>280</prism:startingPage>
		<prism:doi>10.3390/computers15050280</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/5/280</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/5/279">

	<title>Computers, Vol. 15, Pages 279: Reason2Decide-C: Adaptive Cycle-Consistent Training for Clinical Rationales</title>
	<link>https://www.mdpi.com/2073-431X/15/5/279</link>
	<description>Large Language Models (LLMs) used for clinical decision support must not only make accurate predictions but also generate rationales that are consistent with, and sufficient for, those predictions. Building on Reason2Decide, a two-stage rationale-driven multi-task framework, we propose Reason2Decide-C (R2D-C, where C denotes cycle consistency), which augments Reason2Decide&amp;amp;rsquo;s stage 2 training with confidence-adaptive scheduled sampling and cycle-consistent rationale-to-label training. In stage 1, we pretrain our model on rationale generation. In stage 2, we jointlytrain on label prediction and rationale generation, gradually replacing gold labels with model-predicted labels based on confidence. Simultaneously, we feed the rationale logits back into the model to recover the label, thus enforcing explanation sufficiency. We evaluate R2D-C on one proprietary triage dataset, as well as public biomedical QA and reasoning datasets. Across model sizes, R2D-C substantially improves rationale&amp;amp;ndash;prediction consistency (where stage 1 and stage 2 predictions agree) and sufficiency (where the rationale alone recovers the ground-truth label) over other baselines while matching or modestly improving predictive performance (F1); in several settings R2D-C surpasses 40&amp;amp;times; larger foundation models. Ablations confirm that the full combination is optimal, maximizing alignment and LLM-as-a-Judge rationale quality. These results demonstrate that confidence-adaptive scheduled sampling and cycle-consistent rationale-to-label training substantially enhance explanation alignment without sacrificing accuracy.</description>
	<pubDate>2026-04-27</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 279: Reason2Decide-C: Adaptive Cycle-Consistent Training for Clinical Rationales</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/5/279">doi: 10.3390/computers15050279</a></p>
	<p>Authors:
		H M Quamran Hasan
		Housam Khalifa Bashier Babiker
		Mi-Young Kim
		Randy Goebel
		</p>
	<p>Large Language Models (LLMs) used for clinical decision support must not only make accurate predictions but also generate rationales that are consistent with, and sufficient for, those predictions. Building on Reason2Decide, a two-stage rationale-driven multi-task framework, we propose Reason2Decide-C (R2D-C, where C denotes cycle consistency), which augments Reason2Decide&amp;amp;rsquo;s stage 2 training with confidence-adaptive scheduled sampling and cycle-consistent rationale-to-label training. In stage 1, we pretrain our model on rationale generation. In stage 2, we jointlytrain on label prediction and rationale generation, gradually replacing gold labels with model-predicted labels based on confidence. Simultaneously, we feed the rationale logits back into the model to recover the label, thus enforcing explanation sufficiency. We evaluate R2D-C on one proprietary triage dataset, as well as public biomedical QA and reasoning datasets. Across model sizes, R2D-C substantially improves rationale&amp;amp;ndash;prediction consistency (where stage 1 and stage 2 predictions agree) and sufficiency (where the rationale alone recovers the ground-truth label) over other baselines while matching or modestly improving predictive performance (F1); in several settings R2D-C surpasses 40&amp;amp;times; larger foundation models. Ablations confirm that the full combination is optimal, maximizing alignment and LLM-as-a-Judge rationale quality. These results demonstrate that confidence-adaptive scheduled sampling and cycle-consistent rationale-to-label training substantially enhance explanation alignment without sacrificing accuracy.</p>
	]]></content:encoded>

	<dc:title>Reason2Decide-C: Adaptive Cycle-Consistent Training for Clinical Rationales</dc:title>
			<dc:creator>H M Quamran Hasan</dc:creator>
			<dc:creator>Housam Khalifa Bashier Babiker</dc:creator>
			<dc:creator>Mi-Young Kim</dc:creator>
			<dc:creator>Randy Goebel</dc:creator>
		<dc:identifier>doi: 10.3390/computers15050279</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-04-27</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-04-27</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>279</prism:startingPage>
		<prism:doi>10.3390/computers15050279</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/5/279</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/5/278">

	<title>Computers, Vol. 15, Pages 278: A Validated Design Guideline for Mobile Applications Grounded in the Participation of Deaf Users for Accessible Development</title>
	<link>https://www.mdpi.com/2073-431X/15/5/278</link>
	<description>Mobile devices are widely used, yet accessibility for people with disabilities remains a critical challenge. Deaf users who rely primarily on sign language (SL) frequently encounter barriers when interacting with applications not designed for their communication needs. This study proposes a design guide for developing mobile applications tailored to sign language users. The guide was developed through the active participation of three groups: Deaf individuals, usability and user experience (UX) experts, and mobile application developers. Based on their contributions, thirteen design guidelines were defined, addressing sign language integration, visual feedback, navigation, content presentation, and interface design. The guidelines were validated through usability and UX evaluations conducted with the three participant groups. A mobile application was subsequently developed following the proposed guidelines to assess their practical applicability. The evaluation results indicate that the guide effectively supports the development of more accessible and usable mobile applications for Deaf users. Incorporating sign language-centered design principles significantly improves usability and user experience for individuals with hearing disabilities, contributing to more inclusive mobile application development.</description>
	<pubDate>2026-04-27</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 278: A Validated Design Guideline for Mobile Applications Grounded in the Participation of Deaf Users for Accessible Development</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/5/278">doi: 10.3390/computers15050278</a></p>
	<p>Authors:
		Andrés Eduardo Fuentes-Cortázar
		José Rafael Rojano-Cáceres
		</p>
	<p>Mobile devices are widely used, yet accessibility for people with disabilities remains a critical challenge. Deaf users who rely primarily on sign language (SL) frequently encounter barriers when interacting with applications not designed for their communication needs. This study proposes a design guide for developing mobile applications tailored to sign language users. The guide was developed through the active participation of three groups: Deaf individuals, usability and user experience (UX) experts, and mobile application developers. Based on their contributions, thirteen design guidelines were defined, addressing sign language integration, visual feedback, navigation, content presentation, and interface design. The guidelines were validated through usability and UX evaluations conducted with the three participant groups. A mobile application was subsequently developed following the proposed guidelines to assess their practical applicability. The evaluation results indicate that the guide effectively supports the development of more accessible and usable mobile applications for Deaf users. Incorporating sign language-centered design principles significantly improves usability and user experience for individuals with hearing disabilities, contributing to more inclusive mobile application development.</p>
	]]></content:encoded>

	<dc:title>A Validated Design Guideline for Mobile Applications Grounded in the Participation of Deaf Users for Accessible Development</dc:title>
			<dc:creator>Andrés Eduardo Fuentes-Cortázar</dc:creator>
			<dc:creator>José Rafael Rojano-Cáceres</dc:creator>
		<dc:identifier>doi: 10.3390/computers15050278</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-04-27</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-04-27</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>278</prism:startingPage>
		<prism:doi>10.3390/computers15050278</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/5/278</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/5/277">

	<title>Computers, Vol. 15, Pages 277: Cognitive Grounding for Perspective Integration in Multi-LLM Systems</title>
	<link>https://www.mdpi.com/2073-431X/15/5/277</link>
	<description>This paper investigates whether structured collaboration between multiple large language models (LLMs), each assigned a distinct cognitive role grounded in psychological theory, produces benefits beyond simple answer aggregation. We propose the Parallel Synthesis architecture, in which three cognitively specialized roles Analyzer (hierarchical decomposition), Creative (divergent thinking), and Critic (critical evaluation) process each task independently and in parallel, and a Synthesizer integrates their outputs into a final response. To evaluate collaborative reasoning, we introduce the Emergent Reasoning Score (ERS), a composite metric that separates perspective integration (Synthesis Effectiveness) from novel concept generation (Emergent Value). Experiments on Experiments on the AI2 Reasoning Challenge (ARC-Challenge) (1172 questions) and and the Massive Multitask Language Understanding benchmark (MMLU) (1531 questions) show two consistent findings. First, the architecture achieves high Synthesis Effectiveness (SE=0.711&amp;amp;ndash;0.744), indicating reliable integration of all three cognitive perspectives. Second, Emergent Value remains low (EV=0.096&amp;amp;ndash;0.112), indicating that synthesis primarily recombines existing concepts rather than generating substantial novel content. A Majority Voting baseline achieves comparable or slightly higher answer accuracy than the Synthesizer on both benchmarks, showing that the architecture&amp;amp;rsquo;s main contribution lies not in answer selection but in producing integrated reasoning traces that draw on multiple perspectives. These findings suggest that the practical value of cognitively grounded multi-agent architectures lies in reliable perspective integration, while ERS provides a reusable framework for distinguishing integration from genuinely novel reasoning in multi-agent LLM systems. The empirical results reported here constitute a pilot validation of the proposed framework on closed-form benchmarks, intended to establish a proof of concept and motivate larger-scale evaluation.</description>
	<pubDate>2026-04-27</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 277: Cognitive Grounding for Perspective Integration in Multi-LLM Systems</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/5/277">doi: 10.3390/computers15050277</a></p>
	<p>Authors:
		Lev Sukherman
		Yetunde Longe-Folajimi
		Marina Konkol
		</p>
	<p>This paper investigates whether structured collaboration between multiple large language models (LLMs), each assigned a distinct cognitive role grounded in psychological theory, produces benefits beyond simple answer aggregation. We propose the Parallel Synthesis architecture, in which three cognitively specialized roles Analyzer (hierarchical decomposition), Creative (divergent thinking), and Critic (critical evaluation) process each task independently and in parallel, and a Synthesizer integrates their outputs into a final response. To evaluate collaborative reasoning, we introduce the Emergent Reasoning Score (ERS), a composite metric that separates perspective integration (Synthesis Effectiveness) from novel concept generation (Emergent Value). Experiments on Experiments on the AI2 Reasoning Challenge (ARC-Challenge) (1172 questions) and and the Massive Multitask Language Understanding benchmark (MMLU) (1531 questions) show two consistent findings. First, the architecture achieves high Synthesis Effectiveness (SE=0.711&amp;amp;ndash;0.744), indicating reliable integration of all three cognitive perspectives. Second, Emergent Value remains low (EV=0.096&amp;amp;ndash;0.112), indicating that synthesis primarily recombines existing concepts rather than generating substantial novel content. A Majority Voting baseline achieves comparable or slightly higher answer accuracy than the Synthesizer on both benchmarks, showing that the architecture&amp;amp;rsquo;s main contribution lies not in answer selection but in producing integrated reasoning traces that draw on multiple perspectives. These findings suggest that the practical value of cognitively grounded multi-agent architectures lies in reliable perspective integration, while ERS provides a reusable framework for distinguishing integration from genuinely novel reasoning in multi-agent LLM systems. The empirical results reported here constitute a pilot validation of the proposed framework on closed-form benchmarks, intended to establish a proof of concept and motivate larger-scale evaluation.</p>
	]]></content:encoded>

	<dc:title>Cognitive Grounding for Perspective Integration in Multi-LLM Systems</dc:title>
			<dc:creator>Lev Sukherman</dc:creator>
			<dc:creator>Yetunde Longe-Folajimi</dc:creator>
			<dc:creator>Marina Konkol</dc:creator>
		<dc:identifier>doi: 10.3390/computers15050277</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-04-27</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-04-27</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>277</prism:startingPage>
		<prism:doi>10.3390/computers15050277</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/5/277</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/5/276">

	<title>Computers, Vol. 15, Pages 276: Monitoring of Customer Segment Dynamics Using Clustering and Event-Based Alerts</title>
	<link>https://www.mdpi.com/2073-431X/15/5/276</link>
	<description>Continuous customer activity generated by modern digital platforms drives the evolution of behavioral segments over time. Traditional customer segmentation methods typically rely on periodic batch analysis of historical data, producing static snapshots that may quickly become outdated and fail to capture emerging behavioral patterns. This paper presents a monitoring-oriented framework for detecting customer segment evolution and generating timely notifications about meaningful structural changes in the customer population. The proposed system continuously ingests user activity events, incrementally updates customer profiles, and periodically recomputes behavioral segments using fixed-k KMeans clustering over standardized recency, frequency, and monetary (RFM) features. To improve robustness and interpretability, the framework incorporates adaptive event scoring, stability-aware segment validation, drift-aware centroid matching, and persistence-based filtering of transient changes. These mechanisms reduce noisy alerts caused by repeated clustering updates while preserving meaningful signals about evolving customer behavior. The framework is evaluated on the Online Retail II and Instacart datasets under streaming simulation conditions. Experimental results show that the proposed approach maintains stable clustering structures, identifies persistent segment changes, and uncovers economically meaningful customer groups. Compared with static segmentation and periodic clustering baselines, the framework improves clustering quality while enabling continuous monitoring of segment evolution. Overall, the results suggest that adaptive monitoring can extend traditional customer segmentation into a practical continuous analytics process for moderate-scale dynamic environments.</description>
	<pubDate>2026-04-27</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 276: Monitoring of Customer Segment Dynamics Using Clustering and Event-Based Alerts</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/5/276">doi: 10.3390/computers15050276</a></p>
	<p>Authors:
		Stavroula Chatzinikolaou
		Giannis Vassiliou
		Efstratia Vasileiou
		Sotirios Batsakis
		Nikos Papadakis
		</p>
	<p>Continuous customer activity generated by modern digital platforms drives the evolution of behavioral segments over time. Traditional customer segmentation methods typically rely on periodic batch analysis of historical data, producing static snapshots that may quickly become outdated and fail to capture emerging behavioral patterns. This paper presents a monitoring-oriented framework for detecting customer segment evolution and generating timely notifications about meaningful structural changes in the customer population. The proposed system continuously ingests user activity events, incrementally updates customer profiles, and periodically recomputes behavioral segments using fixed-k KMeans clustering over standardized recency, frequency, and monetary (RFM) features. To improve robustness and interpretability, the framework incorporates adaptive event scoring, stability-aware segment validation, drift-aware centroid matching, and persistence-based filtering of transient changes. These mechanisms reduce noisy alerts caused by repeated clustering updates while preserving meaningful signals about evolving customer behavior. The framework is evaluated on the Online Retail II and Instacart datasets under streaming simulation conditions. Experimental results show that the proposed approach maintains stable clustering structures, identifies persistent segment changes, and uncovers economically meaningful customer groups. Compared with static segmentation and periodic clustering baselines, the framework improves clustering quality while enabling continuous monitoring of segment evolution. Overall, the results suggest that adaptive monitoring can extend traditional customer segmentation into a practical continuous analytics process for moderate-scale dynamic environments.</p>
	]]></content:encoded>

	<dc:title>Monitoring of Customer Segment Dynamics Using Clustering and Event-Based Alerts</dc:title>
			<dc:creator>Stavroula Chatzinikolaou</dc:creator>
			<dc:creator>Giannis Vassiliou</dc:creator>
			<dc:creator>Efstratia Vasileiou</dc:creator>
			<dc:creator>Sotirios Batsakis</dc:creator>
			<dc:creator>Nikos Papadakis</dc:creator>
		<dc:identifier>doi: 10.3390/computers15050276</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-04-27</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-04-27</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>276</prism:startingPage>
		<prism:doi>10.3390/computers15050276</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/5/276</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/5/275">

	<title>Computers, Vol. 15, Pages 275: The Missing Layer in Modern IT: Governance of Commitments, Not Just Compute and Data</title>
	<link>https://www.mdpi.com/2073-431X/15/5/275</link>
	<description>Contemporary enterprise IT operations are largely implemented on Shannon&amp;amp;ndash;Turing computing models in which programs execute read&amp;amp;ndash;compute&amp;amp;ndash;write cycles over data structures, while governance&amp;amp;mdash;fault handling, configuration control, auditability, continuity, and accounting&amp;amp;mdash;is applied externally through infrastructure platforms, observability stacks, and human operational processes. This separation scales analytical throughput but accumulates what we term coherence debt: locally expedient operational commitments whose provenance and revisability degrade over time until exposed by failures, security incidents, regulatory demands, or architectural transitions. This paper examines the evolution of operational computing models that integrate com-pupation with regulation at two distinct levels. First, Distributed Intelligent Managed Elements (DIME) extend the classical Turing cycle toward a supervised execution loop&amp;amp;mdash;read&amp;amp;ndash;check-with-oracle&amp;amp;ndash;compute&amp;amp;ndash;write&amp;amp;mdash;by incorporating signaling overlays and FCAPS (Fault, Configuration, Accounting, Performance, and Security) supervision into computation in progress. Second, the Autopoietic Management and Orchestration System (AMOS), grounded in the General Theory of Information, the Burgin&amp;amp;ndash;Mikkilineni Thesis, and Deutsch&amp;amp;rsquo;s epistemic framework, fully decouples process executors from governance by treating any Turing-equivalent engine as a replaceable execution substrate while elevating knowledge structures&amp;amp;mdash;encoded as local and global Digital Genomes&amp;amp;mdash;to first-class operational state within a governed knowledge network. Using a distributed microservice transaction testbed, we demonstrate how this approach operationalizes topology-as-data, a capability-oriented control plane, decoupled application-layer FCAPS independent of infrastructure management, and policy-selectable consistency/availability semantics. Our results show that the principal benefit of AMOS is not circumventing theoretical constraints such as the Consistency, Availability, and Partition tolerance (CAP) theorem, but governing their trade-offs as explicit, auditable commitments with defined convergence pathways and controlled return to a coherent system state, thereby reducing coherence debt and improving operational reliability in distributed AI-enabled enterprise systems.</description>
	<pubDate>2026-04-24</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 275: The Missing Layer in Modern IT: Governance of Commitments, Not Just Compute and Data</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/5/275">doi: 10.3390/computers15050275</a></p>
	<p>Authors:
		Rao Mikkilineni
		William Patrick Kelly
		</p>
	<p>Contemporary enterprise IT operations are largely implemented on Shannon&amp;amp;ndash;Turing computing models in which programs execute read&amp;amp;ndash;compute&amp;amp;ndash;write cycles over data structures, while governance&amp;amp;mdash;fault handling, configuration control, auditability, continuity, and accounting&amp;amp;mdash;is applied externally through infrastructure platforms, observability stacks, and human operational processes. This separation scales analytical throughput but accumulates what we term coherence debt: locally expedient operational commitments whose provenance and revisability degrade over time until exposed by failures, security incidents, regulatory demands, or architectural transitions. This paper examines the evolution of operational computing models that integrate com-pupation with regulation at two distinct levels. First, Distributed Intelligent Managed Elements (DIME) extend the classical Turing cycle toward a supervised execution loop&amp;amp;mdash;read&amp;amp;ndash;check-with-oracle&amp;amp;ndash;compute&amp;amp;ndash;write&amp;amp;mdash;by incorporating signaling overlays and FCAPS (Fault, Configuration, Accounting, Performance, and Security) supervision into computation in progress. Second, the Autopoietic Management and Orchestration System (AMOS), grounded in the General Theory of Information, the Burgin&amp;amp;ndash;Mikkilineni Thesis, and Deutsch&amp;amp;rsquo;s epistemic framework, fully decouples process executors from governance by treating any Turing-equivalent engine as a replaceable execution substrate while elevating knowledge structures&amp;amp;mdash;encoded as local and global Digital Genomes&amp;amp;mdash;to first-class operational state within a governed knowledge network. Using a distributed microservice transaction testbed, we demonstrate how this approach operationalizes topology-as-data, a capability-oriented control plane, decoupled application-layer FCAPS independent of infrastructure management, and policy-selectable consistency/availability semantics. Our results show that the principal benefit of AMOS is not circumventing theoretical constraints such as the Consistency, Availability, and Partition tolerance (CAP) theorem, but governing their trade-offs as explicit, auditable commitments with defined convergence pathways and controlled return to a coherent system state, thereby reducing coherence debt and improving operational reliability in distributed AI-enabled enterprise systems.</p>
	]]></content:encoded>

	<dc:title>The Missing Layer in Modern IT: Governance of Commitments, Not Just Compute and Data</dc:title>
			<dc:creator>Rao Mikkilineni</dc:creator>
			<dc:creator>William Patrick Kelly</dc:creator>
		<dc:identifier>doi: 10.3390/computers15050275</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-04-24</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-04-24</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>275</prism:startingPage>
		<prism:doi>10.3390/computers15050275</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/5/275</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/5/274">

	<title>Computers, Vol. 15, Pages 274: Case Studies on the Logical Structure of the Algorithms Tabu Search and Threshold Accepting for Generating Solutions in Searching and Solving the Bin-Packing Problem</title>
	<link>https://www.mdpi.com/2073-431X/15/5/274</link>
	<description>The logical structure of approximation algorithms has been identified by the scientific community in four principal parts: tuning parameters, generating initial solutions, generating neighbor solutions, and stopping algorithm execution. A review of the literature specifically for the algorithms Threshold Accepting (TA) and Tabu Search (TS) indicates that, in most cases, choices are performed on one or several of these logical parts, often implicitly guided by expert knowledge for improving algorithm performance. However, these design choices, particularly in the selection of initialization and neighborhood strategies, are rarely analyzed in a systematic and reproducible manner. A formal experimental framework is presented to systematically analyze logical structure design choices, which are typically based on empirical expertise, by isolating and evaluating the combined effects of methodologies in the logical parts of initialization and neighborhood under controlled conditions of TA and TS algorithms in solving the one-dimensional Bin Packing Problem (BPP). A total of 324 benchmark instances were used to assess multiple algorithmic variants. Performance was evaluated in terms of solution quality and computational effort, supported by graphical analysis and statistical methods, including Wilcoxon signed-rank tests, effect size measures, bootstrap-based confidence intervals, and linear regression. The experimental results consistently show that the simpler internal logical structure of TA and TS algorithms, specifically with a probability-guided initialization combined with a single neighborhood operator, can achieve a better balance between solution quality and computational effort compared to more complex alternatives in general instances of BPP.</description>
	<pubDate>2026-04-24</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 274: Case Studies on the Logical Structure of the Algorithms Tabu Search and Threshold Accepting for Generating Solutions in Searching and Solving the Bin-Packing Problem</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/5/274">doi: 10.3390/computers15050274</a></p>
	<p>Authors:
		Vanesa Landero-Nájera
		Joaquín Pérez-Ortega
		Laura Cruz-Reyes
		Claudia Guadalupe Gómez-Santillán
		Nelva N. Almanza-Ortega
		Carlos Rodríguez-Orta
		Carlos Andrés Collazos-Morales
		</p>
	<p>The logical structure of approximation algorithms has been identified by the scientific community in four principal parts: tuning parameters, generating initial solutions, generating neighbor solutions, and stopping algorithm execution. A review of the literature specifically for the algorithms Threshold Accepting (TA) and Tabu Search (TS) indicates that, in most cases, choices are performed on one or several of these logical parts, often implicitly guided by expert knowledge for improving algorithm performance. However, these design choices, particularly in the selection of initialization and neighborhood strategies, are rarely analyzed in a systematic and reproducible manner. A formal experimental framework is presented to systematically analyze logical structure design choices, which are typically based on empirical expertise, by isolating and evaluating the combined effects of methodologies in the logical parts of initialization and neighborhood under controlled conditions of TA and TS algorithms in solving the one-dimensional Bin Packing Problem (BPP). A total of 324 benchmark instances were used to assess multiple algorithmic variants. Performance was evaluated in terms of solution quality and computational effort, supported by graphical analysis and statistical methods, including Wilcoxon signed-rank tests, effect size measures, bootstrap-based confidence intervals, and linear regression. The experimental results consistently show that the simpler internal logical structure of TA and TS algorithms, specifically with a probability-guided initialization combined with a single neighborhood operator, can achieve a better balance between solution quality and computational effort compared to more complex alternatives in general instances of BPP.</p>
	]]></content:encoded>

	<dc:title>Case Studies on the Logical Structure of the Algorithms Tabu Search and Threshold Accepting for Generating Solutions in Searching and Solving the Bin-Packing Problem</dc:title>
			<dc:creator>Vanesa Landero-Nájera</dc:creator>
			<dc:creator>Joaquín Pérez-Ortega</dc:creator>
			<dc:creator>Laura Cruz-Reyes</dc:creator>
			<dc:creator>Claudia Guadalupe Gómez-Santillán</dc:creator>
			<dc:creator>Nelva N. Almanza-Ortega</dc:creator>
			<dc:creator>Carlos Rodríguez-Orta</dc:creator>
			<dc:creator>Carlos Andrés Collazos-Morales</dc:creator>
		<dc:identifier>doi: 10.3390/computers15050274</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-04-24</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-04-24</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>274</prism:startingPage>
		<prism:doi>10.3390/computers15050274</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/5/274</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/5/273">

	<title>Computers, Vol. 15, Pages 273: A GNN-Based Log Anomaly Detection Framework with Prompt Learning for Edge Computing</title>
	<link>https://www.mdpi.com/2073-431X/15/5/273</link>
	<description>System logs have been critical for analyzing the operational status and abnormal behavior of highly distributed and heterogeneous edge computing nodes. In edge environments, logs exhibit cross-event and cross-field structural interactions, making it difficult to uncover potential anomaly patterns from isolated events. Moreover, sparse annotations and varying log formats limit the effectiveness of existing methods. To address these challenges, we propose a graph neural network (GNN) anomaly detection framework with prompt learning. It leverages few-shot prompt learning to automatically extract key fields and constructs a weighted directed graph that jointly models semantic embeddings and temporal dependencies, fully representing the structural interactions and semantic associations across events and fields. Furthermore, the framework performs graph-level anomaly detection by jointly optimizing graph representation learning and classification objective within an enhanced one-class directed graph convolutional network, enabling effective identification of global structural anomaly patterns in log graphs. Experimental results demonstrate that the proposed method achieves an average F1-score of 93.3%, surpassing the current state-of-the-art (SOTA) methods by 6.93%.</description>
	<pubDate>2026-04-24</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 273: A GNN-Based Log Anomaly Detection Framework with Prompt Learning for Edge Computing</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/5/273">doi: 10.3390/computers15050273</a></p>
	<p>Authors:
		Xianlang Hu
		Guangsheng Feng
		Xinling Huang
		Xiangying Kong
		Hongwu Lv
		</p>
	<p>System logs have been critical for analyzing the operational status and abnormal behavior of highly distributed and heterogeneous edge computing nodes. In edge environments, logs exhibit cross-event and cross-field structural interactions, making it difficult to uncover potential anomaly patterns from isolated events. Moreover, sparse annotations and varying log formats limit the effectiveness of existing methods. To address these challenges, we propose a graph neural network (GNN) anomaly detection framework with prompt learning. It leverages few-shot prompt learning to automatically extract key fields and constructs a weighted directed graph that jointly models semantic embeddings and temporal dependencies, fully representing the structural interactions and semantic associations across events and fields. Furthermore, the framework performs graph-level anomaly detection by jointly optimizing graph representation learning and classification objective within an enhanced one-class directed graph convolutional network, enabling effective identification of global structural anomaly patterns in log graphs. Experimental results demonstrate that the proposed method achieves an average F1-score of 93.3%, surpassing the current state-of-the-art (SOTA) methods by 6.93%.</p>
	]]></content:encoded>

	<dc:title>A GNN-Based Log Anomaly Detection Framework with Prompt Learning for Edge Computing</dc:title>
			<dc:creator>Xianlang Hu</dc:creator>
			<dc:creator>Guangsheng Feng</dc:creator>
			<dc:creator>Xinling Huang</dc:creator>
			<dc:creator>Xiangying Kong</dc:creator>
			<dc:creator>Hongwu Lv</dc:creator>
		<dc:identifier>doi: 10.3390/computers15050273</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-04-24</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-04-24</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>273</prism:startingPage>
		<prism:doi>10.3390/computers15050273</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/5/273</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/5/271">

	<title>Computers, Vol. 15, Pages 271: A Multi-Stage YOLOv11-Based Deep Learning Framework for Robust Instance Segmentation and Material Quantification of Mixed Plastic Waste</title>
	<link>https://www.mdpi.com/2073-431X/15/5/271</link>
	<description>Instance segmentation in heterogeneous waste scenes remains challenging due to object variability, deformable shapes, partial occlusion, and large appearance differences across packaging types. This study presents a YOLOv11-based deep learning framework for mixed plastic waste instance segmentation, developed to connect visual perception with reliable material quantification. The framework integrates curated instance-level annotations, strict split isolation, multi-stage optimization, training strategy ablation, and seed-robustness analysis to support reproducible model selection. Experimental results on a held-out test set show that the optimized model achieves a mask mAP@50:95 of 0.9337, indicating strong segmentation performance under heterogeneous waste-scene conditions. To extend the analysis beyond standard vision metrics, the framework incorporates a physics-informed mask-to-mass module that converts predicted masks into class-specific mass estimates using geometric calibration and material priors. Applied to a representative stream of 1253 detected objects, the system estimated a total plastic mass of 15.48 &amp;amp;plusmn; 1.08 kg, corresponding to a theoretical H2 potential of 0.41 &amp;amp;plusmn; 0.04 kg and a greenhouse-gas avoidance of 34.57 &amp;amp;plusmn; 4.15 kg CO2e. Overall, the proposed framework extends waste-scene understanding beyond vision-level assessment toward physically grounded, data-driven decision support for smart material recovery systems.</description>
	<pubDate>2026-04-24</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 271: A Multi-Stage YOLOv11-Based Deep Learning Framework for Robust Instance Segmentation and Material Quantification of Mixed Plastic Waste</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/5/271">doi: 10.3390/computers15050271</a></p>
	<p>Authors:
		Andrew N. Shafik
		Mohamed H. Khafagy
		Alber S. Aziz
		Shereen A. Hussein
		</p>
	<p>Instance segmentation in heterogeneous waste scenes remains challenging due to object variability, deformable shapes, partial occlusion, and large appearance differences across packaging types. This study presents a YOLOv11-based deep learning framework for mixed plastic waste instance segmentation, developed to connect visual perception with reliable material quantification. The framework integrates curated instance-level annotations, strict split isolation, multi-stage optimization, training strategy ablation, and seed-robustness analysis to support reproducible model selection. Experimental results on a held-out test set show that the optimized model achieves a mask mAP@50:95 of 0.9337, indicating strong segmentation performance under heterogeneous waste-scene conditions. To extend the analysis beyond standard vision metrics, the framework incorporates a physics-informed mask-to-mass module that converts predicted masks into class-specific mass estimates using geometric calibration and material priors. Applied to a representative stream of 1253 detected objects, the system estimated a total plastic mass of 15.48 &amp;amp;plusmn; 1.08 kg, corresponding to a theoretical H2 potential of 0.41 &amp;amp;plusmn; 0.04 kg and a greenhouse-gas avoidance of 34.57 &amp;amp;plusmn; 4.15 kg CO2e. Overall, the proposed framework extends waste-scene understanding beyond vision-level assessment toward physically grounded, data-driven decision support for smart material recovery systems.</p>
	]]></content:encoded>

	<dc:title>A Multi-Stage YOLOv11-Based Deep Learning Framework for Robust Instance Segmentation and Material Quantification of Mixed Plastic Waste</dc:title>
			<dc:creator>Andrew N. Shafik</dc:creator>
			<dc:creator>Mohamed H. Khafagy</dc:creator>
			<dc:creator>Alber S. Aziz</dc:creator>
			<dc:creator>Shereen A. Hussein</dc:creator>
		<dc:identifier>doi: 10.3390/computers15050271</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-04-24</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-04-24</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>271</prism:startingPage>
		<prism:doi>10.3390/computers15050271</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/5/271</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/5/272">

	<title>Computers, Vol. 15, Pages 272: VIRTUOSO: A Multilayer Cloud Security and Risk Management Framework</title>
	<link>https://www.mdpi.com/2073-431X/15/5/272</link>
	<description>Despite its continued growth, cloud computing remains susceptible to significant security challenges, as shared virtualised environments pose threats at multiple levels. These vulnerabilities are caused by a lack of security coverage in the responsibility model between the provider and the tenant. In this work, we propose the multi-layered architecture VIRTUOSO (VIRTual Unified Operation Security Optimiser) to cover these security gaps through advanced automation and ML. VIRTUOSO has four layers. The Input Layer extracts key risk components from collected telemetry data. The Deep Automation Security Layer provides automated actions and continuous monitoring of security defences. Its counterpart, the Intelligent Security Layer, predicts threats using anomaly detection. The last layer, the Output Layer, returns an aggregated risk summary. The datasets we used were chosen for their relevance: the UNSW-NB15 dataset, a subset of the web-attack classification from CSE-CIC-IDS2018, and a sample of anonymised log events from AWS CloudTrail. Our ensemble classifiers achieve a best accuracy of 95.08% &amp;amp;plusmn; 0.13% on UNSW-NB15 (RF), with statistically significant differences among models confirmed by the Friedman test (p &amp;amp;lt; 0.004) and Nemenyi post hoc analysis, and 99.25% &amp;amp;plusmn; 0.52% on web-attack (CatBoost), where ensemble differences are not statistically significant (p = 0.093), consistent with the high separability of this dataset. The training-test gap and DNN curves show no overfitting, whereas our adversarial tests show a maximum accuracy loss of 8.1% at &amp;amp;epsilon; = 0.02. With these promising results, we can assert that, pending verification in an actual cloud environment and potential integration with FL, our ensemble classifier model appears to be a good real-world prototype.</description>
	<pubDate>2026-04-24</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 272: VIRTUOSO: A Multilayer Cloud Security and Risk Management Framework</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/5/272">doi: 10.3390/computers15050272</a></p>
	<p>Authors:
		 Anwar
		 Pastore
		 Abdullah
		</p>
	<p>Despite its continued growth, cloud computing remains susceptible to significant security challenges, as shared virtualised environments pose threats at multiple levels. These vulnerabilities are caused by a lack of security coverage in the responsibility model between the provider and the tenant. In this work, we propose the multi-layered architecture VIRTUOSO (VIRTual Unified Operation Security Optimiser) to cover these security gaps through advanced automation and ML. VIRTUOSO has four layers. The Input Layer extracts key risk components from collected telemetry data. The Deep Automation Security Layer provides automated actions and continuous monitoring of security defences. Its counterpart, the Intelligent Security Layer, predicts threats using anomaly detection. The last layer, the Output Layer, returns an aggregated risk summary. The datasets we used were chosen for their relevance: the UNSW-NB15 dataset, a subset of the web-attack classification from CSE-CIC-IDS2018, and a sample of anonymised log events from AWS CloudTrail. Our ensemble classifiers achieve a best accuracy of 95.08% &amp;amp;plusmn; 0.13% on UNSW-NB15 (RF), with statistically significant differences among models confirmed by the Friedman test (p &amp;amp;lt; 0.004) and Nemenyi post hoc analysis, and 99.25% &amp;amp;plusmn; 0.52% on web-attack (CatBoost), where ensemble differences are not statistically significant (p = 0.093), consistent with the high separability of this dataset. The training-test gap and DNN curves show no overfitting, whereas our adversarial tests show a maximum accuracy loss of 8.1% at &amp;amp;epsilon; = 0.02. With these promising results, we can assert that, pending verification in an actual cloud environment and potential integration with FL, our ensemble classifier model appears to be a good real-world prototype.</p>
	]]></content:encoded>

	<dc:title>VIRTUOSO: A Multilayer Cloud Security and Risk Management Framework</dc:title>
			<dc:creator> Anwar</dc:creator>
			<dc:creator> Pastore</dc:creator>
			<dc:creator> Abdullah</dc:creator>
		<dc:identifier>doi: 10.3390/computers15050272</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-04-24</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-04-24</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>272</prism:startingPage>
		<prism:doi>10.3390/computers15050272</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/5/272</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/5/270">

	<title>Computers, Vol. 15, Pages 270: R-Snort: A Performance-Optimized Multi-Agent NIDS Architecture for SOHO and Edge-of-Things Networks Using Snort 3 on Raspberry Pi 5</title>
	<link>https://www.mdpi.com/2073-431X/15/5/270</link>
	<description>Network Intrusion Detection Systems (NIDSs) are critical to ensuring the resilience of modern digital infrastructures. Although traditionally deployed in large-scale corporate environments, the expanding threat landscape requires the integration of robust security measures into Small Office/Home Office (SOHO) and Edge-of-Things (EoT) networks. However, these environments often face significant constraints in terms of specialized hardware and technical expertise. This article presents R-Snort, an open-source NIDS based on Snort 3, optimized for low-cost Raspberry Pi 5 hardware. Its multi-agent architecture enables distributed deployment with centralized traffic analysis and cross-agent attack correlation, while an intuitive web interface simplifies alert visualization and system management for non-expert administrators. Its main contributions are: (1) a performance-optimized NIDS agent achieving 1 Gbps throughput; (2) a distributed multi-agent architecture enabling centralized event correlation and detection of multi-vector attacks; and (3) an IaC-based automated deployment framework with an intuitive web interface, democratizing professional-grade security for SOHO and EoT environments.</description>
	<pubDate>2026-04-24</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 270: R-Snort: A Performance-Optimized Multi-Agent NIDS Architecture for SOHO and Edge-of-Things Networks Using Snort 3 on Raspberry Pi 5</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/5/270">doi: 10.3390/computers15050270</a></p>
	<p>Authors:
		Julio Gómez López
		Deian Orlando Petrovics Tabacu
		Nicolás Padilla Soriano
		Alfredo Alcayde García
		</p>
	<p>Network Intrusion Detection Systems (NIDSs) are critical to ensuring the resilience of modern digital infrastructures. Although traditionally deployed in large-scale corporate environments, the expanding threat landscape requires the integration of robust security measures into Small Office/Home Office (SOHO) and Edge-of-Things (EoT) networks. However, these environments often face significant constraints in terms of specialized hardware and technical expertise. This article presents R-Snort, an open-source NIDS based on Snort 3, optimized for low-cost Raspberry Pi 5 hardware. Its multi-agent architecture enables distributed deployment with centralized traffic analysis and cross-agent attack correlation, while an intuitive web interface simplifies alert visualization and system management for non-expert administrators. Its main contributions are: (1) a performance-optimized NIDS agent achieving 1 Gbps throughput; (2) a distributed multi-agent architecture enabling centralized event correlation and detection of multi-vector attacks; and (3) an IaC-based automated deployment framework with an intuitive web interface, democratizing professional-grade security for SOHO and EoT environments.</p>
	]]></content:encoded>

	<dc:title>R-Snort: A Performance-Optimized Multi-Agent NIDS Architecture for SOHO and Edge-of-Things Networks Using Snort 3 on Raspberry Pi 5</dc:title>
			<dc:creator>Julio Gómez López</dc:creator>
			<dc:creator>Deian Orlando Petrovics Tabacu</dc:creator>
			<dc:creator>Nicolás Padilla Soriano</dc:creator>
			<dc:creator>Alfredo Alcayde García</dc:creator>
		<dc:identifier>doi: 10.3390/computers15050270</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-04-24</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-04-24</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>270</prism:startingPage>
		<prism:doi>10.3390/computers15050270</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/5/270</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/5/269">

	<title>Computers, Vol. 15, Pages 269: Development and Evaluation of a Chatbot-Based System for Early Detection of Depression Indicators</title>
	<link>https://www.mdpi.com/2073-431X/15/5/269</link>
	<description>In this study, we developed a chatbot-based system for detecting early signs of depression and verified its effectiveness through experimental evaluations and user surveys. Emphasizing that it does not rely on medical checklists, the system is designed to automatically extract three linguistic features associated with depression&amp;amp;mdash;frequent use of first-person pronouns, pessimistic expressions, and obsessive-compulsive writing styles&amp;amp;mdash;from natural user conversations. Multiple models were constructed for these features, and an ensemble layer integrates their outputs for a comprehensive judgment. The implemented system analyzes input sentences obtained through chat, extracts the three categories of features, calculates a final score through an ensemble layer, and visualizes potential signs of depression based on the total score. We performed an evaluation experiment with 20 participants. In the test data evaluation, the system demonstrated over 76% accuracy in each of the three classification categories: first-person usage, pessimistic tendency, and obsessive-compulsive tendency.</description>
	<pubDate>2026-04-23</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 269: Development and Evaluation of a Chatbot-Based System for Early Detection of Depression Indicators</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/5/269">doi: 10.3390/computers15050269</a></p>
	<p>Authors:
		Min Yang
		Makoto Oka
		Hirohiko Mori
		</p>
	<p>In this study, we developed a chatbot-based system for detecting early signs of depression and verified its effectiveness through experimental evaluations and user surveys. Emphasizing that it does not rely on medical checklists, the system is designed to automatically extract three linguistic features associated with depression&amp;amp;mdash;frequent use of first-person pronouns, pessimistic expressions, and obsessive-compulsive writing styles&amp;amp;mdash;from natural user conversations. Multiple models were constructed for these features, and an ensemble layer integrates their outputs for a comprehensive judgment. The implemented system analyzes input sentences obtained through chat, extracts the three categories of features, calculates a final score through an ensemble layer, and visualizes potential signs of depression based on the total score. We performed an evaluation experiment with 20 participants. In the test data evaluation, the system demonstrated over 76% accuracy in each of the three classification categories: first-person usage, pessimistic tendency, and obsessive-compulsive tendency.</p>
	]]></content:encoded>

	<dc:title>Development and Evaluation of a Chatbot-Based System for Early Detection of Depression Indicators</dc:title>
			<dc:creator>Min Yang</dc:creator>
			<dc:creator>Makoto Oka</dc:creator>
			<dc:creator>Hirohiko Mori</dc:creator>
		<dc:identifier>doi: 10.3390/computers15050269</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-04-23</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-04-23</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>269</prism:startingPage>
		<prism:doi>10.3390/computers15050269</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/5/269</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/5/268">

	<title>Computers, Vol. 15, Pages 268: Learning Scientific Document Representations via Triple-Source Automatic Supervision Without Annotations or Citations</title>
	<link>https://www.mdpi.com/2073-431X/15/5/268</link>
	<description>Learning meaningful representations of scientific documents is essential for information retrieval, knowledge discovery, and recommendation systems. Traditional methods such as TF-IDF rely on lexical matching and fail to capture deeper semantic relationships, while transformer-based approaches typically depend on limited supervision signals. In this work, we propose a Triple-Source automatic supervision framework for learning document embeddings from scientific corpora. The model integrates three types of supervision&amp;amp;ndash;title&amp;amp;ndash;abstract pairs, same-category document pairs, and document-level semantic relationships&amp;amp;mdash;within a unified contrastive learning framework based on a multilingual XLM-RoBERTa encoder. Unlike prior approaches that rely on citation graphs or manual annotations, our method enables citation-free and annotation-free representation learning using only lightweight metadata. Experiments on a publicly available arXiv dataset consisting of 98,649 documents demonstrate improved semantic retrieval performance, achieving Recall@1 = 0.6181 for same-category retrieval and outperforming both TF-IDF and single-source transformer baselines. The learned embeddings also exhibit improved clustering of scientific domains, indicating more structured semantic representations.</description>
	<pubDate>2026-04-23</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 268: Learning Scientific Document Representations via Triple-Source Automatic Supervision Without Annotations or Citations</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/5/268">doi: 10.3390/computers15050268</a></p>
	<p>Authors:
		Mussa Turdalyuly
		Ainur Tursynkhan
		Aigerim Yerimbetova
		Tolganay Turdalykyzy
		Bakzhan Sakenov
		Nurzhan Mukazhanov
		Nazerke Baisholan
		</p>
	<p>Learning meaningful representations of scientific documents is essential for information retrieval, knowledge discovery, and recommendation systems. Traditional methods such as TF-IDF rely on lexical matching and fail to capture deeper semantic relationships, while transformer-based approaches typically depend on limited supervision signals. In this work, we propose a Triple-Source automatic supervision framework for learning document embeddings from scientific corpora. The model integrates three types of supervision&amp;amp;ndash;title&amp;amp;ndash;abstract pairs, same-category document pairs, and document-level semantic relationships&amp;amp;mdash;within a unified contrastive learning framework based on a multilingual XLM-RoBERTa encoder. Unlike prior approaches that rely on citation graphs or manual annotations, our method enables citation-free and annotation-free representation learning using only lightweight metadata. Experiments on a publicly available arXiv dataset consisting of 98,649 documents demonstrate improved semantic retrieval performance, achieving Recall@1 = 0.6181 for same-category retrieval and outperforming both TF-IDF and single-source transformer baselines. The learned embeddings also exhibit improved clustering of scientific domains, indicating more structured semantic representations.</p>
	]]></content:encoded>

	<dc:title>Learning Scientific Document Representations via Triple-Source Automatic Supervision Without Annotations or Citations</dc:title>
			<dc:creator>Mussa Turdalyuly</dc:creator>
			<dc:creator>Ainur Tursynkhan</dc:creator>
			<dc:creator>Aigerim Yerimbetova</dc:creator>
			<dc:creator>Tolganay Turdalykyzy</dc:creator>
			<dc:creator>Bakzhan Sakenov</dc:creator>
			<dc:creator>Nurzhan Mukazhanov</dc:creator>
			<dc:creator>Nazerke Baisholan</dc:creator>
		<dc:identifier>doi: 10.3390/computers15050268</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-04-23</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-04-23</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>268</prism:startingPage>
		<prism:doi>10.3390/computers15050268</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/5/268</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/5/267">

	<title>Computers, Vol. 15, Pages 267: Early Detection of Aggressive Human Behavior in Video Streams Using Deep Spatiotemporal Models</title>
	<link>https://www.mdpi.com/2073-431X/15/5/267</link>
	<description>In this paper, we propose a spatiotemporal approach for binary classification of violent and non-violent behavior in real-world settings. The experimental pipeline includes video preprocessing, stratified data splitting, generation of temporally structured clips, and comparative evaluation of baseline models, including a convolutional neural network. We also developed a Residual Adaptive Motion Temporal Binary Heat Network model that combines frame color characteristics, residual motion descriptions, temporal feature fusion, an early risk assessment mechanism, and interpretable localization maps. Experiments were conducted on a balanced dataset of 2000 video clips. The proposed model demonstrated the best early warning performance: a supervision rate of 0.6, an F1 score of 0.9527, and a balanced accuracy of 0.9533. With full supervision, the F1 score was 0.9342, and the area under the receiver operating characteristic curve (AUC) was 0.9871. The practical significance of the work is that the proposed approach can be used as a decision support tool for the preliminary identification of potentially dangerous video fragments with subsequent manual verification, without the assumption of autonomous use in high-risk scenarios.</description>
	<pubDate>2026-04-23</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 267: Early Detection of Aggressive Human Behavior in Video Streams Using Deep Spatiotemporal Models</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/5/267">doi: 10.3390/computers15050267</a></p>
	<p>Authors:
		Aida Issembayeva
		Anargul Shaushenova
		Ardak Nurpeisova
		Aidar Ispussinov
		Buldyryk Suleimenova
		Anargul Bekenova
		Aliya Satybaldieva
		Aigul Zholmukhanova
		Galiya Mauina
		</p>
	<p>In this paper, we propose a spatiotemporal approach for binary classification of violent and non-violent behavior in real-world settings. The experimental pipeline includes video preprocessing, stratified data splitting, generation of temporally structured clips, and comparative evaluation of baseline models, including a convolutional neural network. We also developed a Residual Adaptive Motion Temporal Binary Heat Network model that combines frame color characteristics, residual motion descriptions, temporal feature fusion, an early risk assessment mechanism, and interpretable localization maps. Experiments were conducted on a balanced dataset of 2000 video clips. The proposed model demonstrated the best early warning performance: a supervision rate of 0.6, an F1 score of 0.9527, and a balanced accuracy of 0.9533. With full supervision, the F1 score was 0.9342, and the area under the receiver operating characteristic curve (AUC) was 0.9871. The practical significance of the work is that the proposed approach can be used as a decision support tool for the preliminary identification of potentially dangerous video fragments with subsequent manual verification, without the assumption of autonomous use in high-risk scenarios.</p>
	]]></content:encoded>

	<dc:title>Early Detection of Aggressive Human Behavior in Video Streams Using Deep Spatiotemporal Models</dc:title>
			<dc:creator>Aida Issembayeva</dc:creator>
			<dc:creator>Anargul Shaushenova</dc:creator>
			<dc:creator>Ardak Nurpeisova</dc:creator>
			<dc:creator>Aidar Ispussinov</dc:creator>
			<dc:creator>Buldyryk Suleimenova</dc:creator>
			<dc:creator>Anargul Bekenova</dc:creator>
			<dc:creator>Aliya Satybaldieva</dc:creator>
			<dc:creator>Aigul Zholmukhanova</dc:creator>
			<dc:creator>Galiya Mauina</dc:creator>
		<dc:identifier>doi: 10.3390/computers15050267</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-04-23</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-04-23</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>267</prism:startingPage>
		<prism:doi>10.3390/computers15050267</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/5/267</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/5/266">

	<title>Computers, Vol. 15, Pages 266: Enhancing IoT Network Security: A BPSO-Optimized Attention-GRU Deep Learning Framework for Intrusion Detection</title>
	<link>https://www.mdpi.com/2073-431X/15/5/266</link>
	<description>The exponential expansion of computer networks, alongside the rapid development of the Internet of Things (IoT), has significantly increased the volume and complexity of transmitted data, emphasizing the need for robust network security measures to secure sensitive data and prevent unauthorized access or breaches. Intrusion Detection Systems (IDSs) have emerged as a vital tool for protecting networks and IoT environments from threats. Various IDSs have been proposed in the literature; however, the lack of optimal feature learning, computational efficiency, and reliance on obsolete datasets poses significant challenges, limiting their effectiveness against evolving cyber threats. Moreover, traditional IDSs struggle to efficiently manage the high-dimensional and imbalanced nature of IoT network traffic data. To address these challenges, this research proposes a hybrid deep learning (DL)-based IDS integrating Binary Particle Swarm Optimization (BPSO), MultiHead Attention mechanisms (MHA), and a deep Gated Recurrent Unit (GRU) architecture, improving detection effectiveness while reducing computational overhead. Our proposed approach also utilizes a Target Sampling strategy to balance class distributions, enhancing the model&amp;amp;rsquo;s ability to accurately identify minority attacks. The BPSO algorithm is employed to identify the most influential features from the high-dimensional network traffic datasets, enhancing model interpretability and supporting more efficient learning. This optimized feature subset is then fed into a GRU-based DL architecture augmented with MHA, which performs sequence processing and attention-based learning for intrusion detection. The performance of the proposed model is evaluated utilizing the BoT-IoT and the CIC-IDS2017 benchmark datasets, ensuring a comprehensive assessment of anomaly detection capabilities. Extensive experimental results demonstrate the superior performance of the proposed model, achieving a recall of 98.42% and 99.76%, with F1-score of 98.94% and 99.76% for binary classification and a recall of 99.79% and 98.69%, with F1-score of 99.89% and 98.04% for multiclass classification on the BoT-IoT and CIC-IDS2017 datasets, respectively, highlighting the effectiveness of our model in enhancing threat detection for computer networks and IoT environments in comparison to recent state-of-the-art IDSs.</description>
	<pubDate>2026-04-23</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 266: Enhancing IoT Network Security: A BPSO-Optimized Attention-GRU Deep Learning Framework for Intrusion Detection</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/5/266">doi: 10.3390/computers15050266</a></p>
	<p>Authors:
		Abdallah Elayan
		Michel Kadoch
		</p>
	<p>The exponential expansion of computer networks, alongside the rapid development of the Internet of Things (IoT), has significantly increased the volume and complexity of transmitted data, emphasizing the need for robust network security measures to secure sensitive data and prevent unauthorized access or breaches. Intrusion Detection Systems (IDSs) have emerged as a vital tool for protecting networks and IoT environments from threats. Various IDSs have been proposed in the literature; however, the lack of optimal feature learning, computational efficiency, and reliance on obsolete datasets poses significant challenges, limiting their effectiveness against evolving cyber threats. Moreover, traditional IDSs struggle to efficiently manage the high-dimensional and imbalanced nature of IoT network traffic data. To address these challenges, this research proposes a hybrid deep learning (DL)-based IDS integrating Binary Particle Swarm Optimization (BPSO), MultiHead Attention mechanisms (MHA), and a deep Gated Recurrent Unit (GRU) architecture, improving detection effectiveness while reducing computational overhead. Our proposed approach also utilizes a Target Sampling strategy to balance class distributions, enhancing the model&amp;amp;rsquo;s ability to accurately identify minority attacks. The BPSO algorithm is employed to identify the most influential features from the high-dimensional network traffic datasets, enhancing model interpretability and supporting more efficient learning. This optimized feature subset is then fed into a GRU-based DL architecture augmented with MHA, which performs sequence processing and attention-based learning for intrusion detection. The performance of the proposed model is evaluated utilizing the BoT-IoT and the CIC-IDS2017 benchmark datasets, ensuring a comprehensive assessment of anomaly detection capabilities. Extensive experimental results demonstrate the superior performance of the proposed model, achieving a recall of 98.42% and 99.76%, with F1-score of 98.94% and 99.76% for binary classification and a recall of 99.79% and 98.69%, with F1-score of 99.89% and 98.04% for multiclass classification on the BoT-IoT and CIC-IDS2017 datasets, respectively, highlighting the effectiveness of our model in enhancing threat detection for computer networks and IoT environments in comparison to recent state-of-the-art IDSs.</p>
	]]></content:encoded>

	<dc:title>Enhancing IoT Network Security: A BPSO-Optimized Attention-GRU Deep Learning Framework for Intrusion Detection</dc:title>
			<dc:creator>Abdallah Elayan</dc:creator>
			<dc:creator>Michel Kadoch</dc:creator>
		<dc:identifier>doi: 10.3390/computers15050266</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-04-23</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-04-23</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>266</prism:startingPage>
		<prism:doi>10.3390/computers15050266</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/5/266</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/5/265">

	<title>Computers, Vol. 15, Pages 265: Pareto-Optimal Explainable Diagnosis Under Cost-Aware Parallel Reasoning</title>
	<link>https://www.mdpi.com/2073-431X/15/5/265</link>
	<description>Model-Based Diagnosis (MBD) is widely used to identify minimal conflicts and repair actions in constraint-based systems. Recent advances in parallel reasoning have significantly reduced runtime in large-scale models through speculative and multicore execution strategies. However, existing approaches primarily focus on computational efficiency and implicitly assume that minimal diagnoses are inherently suitable explanations for human decision makers. In complex configuration environments, minimality does not necessarily imply interpretability, as diagnoses may involve structurally dispersed or semantically heterogeneous constraints. To address this limitation, this paper introduces a multi-objective explainability-aware framework for parallel MDB. Diagnosis selection is formulated as a Pareto optimization problem balancing total computational cost and a formally defined interpretability penalty. Interpretability is quantified using graph-based structural dispersion, semantic entropy, hierarchical complexity, and ambiguity metrics. The proposed E-ParetoDiag algorithm computes non-dominated diagnoses and identifies balanced knee-point solutions without modifying correctness guarantees of underlying diagnosis algorithms. Experimental evaluation on large-scale benchmark datasets demonstrates a measurable trade-off between runtime and interpretability, particularly in dense constraint systems. Comparative analysis against classical selection strategies shows that the proposed approach reduces structural dispersion by up to 18% while increasing computational cost by only 7%. Statistical validation confirms that these improvements are significant (p &amp;amp;lt; 0.01) in medium- and high-density scenarios. The results indicate that aggressive parallelism may improve computational efficiency while increasing explanation complexity, highlighting the need for multi-objective selection strategies. Overall, the proposed framework extends scalable symbolic reasoning toward a human-centered diagnosis paradigm and establishes a principled foundation for explainability-aware optimization in constraint-based systems.</description>
	<pubDate>2026-04-23</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 265: Pareto-Optimal Explainable Diagnosis Under Cost-Aware Parallel Reasoning</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/5/265">doi: 10.3390/computers15050265</a></p>
	<p>Authors:
		Ana Chacón-Luna
		Miguel Tupac-Yupanqui
		Nicolás Márquez
		Cristian Vidal-Silva
		</p>
	<p>Model-Based Diagnosis (MBD) is widely used to identify minimal conflicts and repair actions in constraint-based systems. Recent advances in parallel reasoning have significantly reduced runtime in large-scale models through speculative and multicore execution strategies. However, existing approaches primarily focus on computational efficiency and implicitly assume that minimal diagnoses are inherently suitable explanations for human decision makers. In complex configuration environments, minimality does not necessarily imply interpretability, as diagnoses may involve structurally dispersed or semantically heterogeneous constraints. To address this limitation, this paper introduces a multi-objective explainability-aware framework for parallel MDB. Diagnosis selection is formulated as a Pareto optimization problem balancing total computational cost and a formally defined interpretability penalty. Interpretability is quantified using graph-based structural dispersion, semantic entropy, hierarchical complexity, and ambiguity metrics. The proposed E-ParetoDiag algorithm computes non-dominated diagnoses and identifies balanced knee-point solutions without modifying correctness guarantees of underlying diagnosis algorithms. Experimental evaluation on large-scale benchmark datasets demonstrates a measurable trade-off between runtime and interpretability, particularly in dense constraint systems. Comparative analysis against classical selection strategies shows that the proposed approach reduces structural dispersion by up to 18% while increasing computational cost by only 7%. Statistical validation confirms that these improvements are significant (p &amp;amp;lt; 0.01) in medium- and high-density scenarios. The results indicate that aggressive parallelism may improve computational efficiency while increasing explanation complexity, highlighting the need for multi-objective selection strategies. Overall, the proposed framework extends scalable symbolic reasoning toward a human-centered diagnosis paradigm and establishes a principled foundation for explainability-aware optimization in constraint-based systems.</p>
	]]></content:encoded>

	<dc:title>Pareto-Optimal Explainable Diagnosis Under Cost-Aware Parallel Reasoning</dc:title>
			<dc:creator>Ana Chacón-Luna</dc:creator>
			<dc:creator>Miguel Tupac-Yupanqui</dc:creator>
			<dc:creator>Nicolás Márquez</dc:creator>
			<dc:creator>Cristian Vidal-Silva</dc:creator>
		<dc:identifier>doi: 10.3390/computers15050265</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-04-23</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-04-23</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>265</prism:startingPage>
		<prism:doi>10.3390/computers15050265</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/5/265</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/5/264">

	<title>Computers, Vol. 15, Pages 264: Biometric Embedded Non-Blind Color Image Watermarking with Geometric Tamper Resistance via SIFT-ORB Keypoint Matching</title>
	<link>https://www.mdpi.com/2073-431X/15/5/264</link>
	<description>This work introduces a non-blind watermarking framework for color images to address tamper detection, particularly under geometric transformations. The proposed scheme fuses two watermarks, a personal signature and a biometric fingerprint, into a unified composite watermark embedded into the chrominance component of the cover image using a multi-level transform domain approach, discrete wavelet transforms (DWTs), discrete cosine transforms (DCTs), and singular value decomposition (SVD). By leveraging the rotation-invariant properties of scale-invariant feature transform (SIFT) and oriented FAST and rotated BRIEF (ORB) descriptors, the framework ensures robust tamper detection without requiring alignment, thus mitigating the limitations of conventional detection techniques vulnerable to transformation-induced tamper obfuscation (TITO). Extensive experimentation demonstrates that the method maintains high perceptual fidelity, achieving PSNR values ranging from 50 to 55 dB for embedding strength factor &amp;amp;mu; (0.01&amp;amp;ndash;0.04) and SSIM indices near 1 across multiple benchmark images. Furthermore, the scheme exhibits notable resilience to a range of image processing attacks and geometric distortion. Comparative evaluation reveals its superiority over existing grayscale, color, SIFT-based and DWT-DCT-SVD-based watermarking techniques, affirming its applicability in scenarios demanding secure, imperceptible, and transformation-invariant image watermarking.</description>
	<pubDate>2026-04-22</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 264: Biometric Embedded Non-Blind Color Image Watermarking with Geometric Tamper Resistance via SIFT-ORB Keypoint Matching</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/5/264">doi: 10.3390/computers15050264</a></p>
	<p>Authors:
		Swapnaneel Dhar
		Riyanka Manna
		Khaldi Amine
		Aditya Kumar Sahu
		</p>
	<p>This work introduces a non-blind watermarking framework for color images to address tamper detection, particularly under geometric transformations. The proposed scheme fuses two watermarks, a personal signature and a biometric fingerprint, into a unified composite watermark embedded into the chrominance component of the cover image using a multi-level transform domain approach, discrete wavelet transforms (DWTs), discrete cosine transforms (DCTs), and singular value decomposition (SVD). By leveraging the rotation-invariant properties of scale-invariant feature transform (SIFT) and oriented FAST and rotated BRIEF (ORB) descriptors, the framework ensures robust tamper detection without requiring alignment, thus mitigating the limitations of conventional detection techniques vulnerable to transformation-induced tamper obfuscation (TITO). Extensive experimentation demonstrates that the method maintains high perceptual fidelity, achieving PSNR values ranging from 50 to 55 dB for embedding strength factor &amp;amp;mu; (0.01&amp;amp;ndash;0.04) and SSIM indices near 1 across multiple benchmark images. Furthermore, the scheme exhibits notable resilience to a range of image processing attacks and geometric distortion. Comparative evaluation reveals its superiority over existing grayscale, color, SIFT-based and DWT-DCT-SVD-based watermarking techniques, affirming its applicability in scenarios demanding secure, imperceptible, and transformation-invariant image watermarking.</p>
	]]></content:encoded>

	<dc:title>Biometric Embedded Non-Blind Color Image Watermarking with Geometric Tamper Resistance via SIFT-ORB Keypoint Matching</dc:title>
			<dc:creator>Swapnaneel Dhar</dc:creator>
			<dc:creator>Riyanka Manna</dc:creator>
			<dc:creator>Khaldi Amine</dc:creator>
			<dc:creator>Aditya Kumar Sahu</dc:creator>
		<dc:identifier>doi: 10.3390/computers15050264</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-04-22</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-04-22</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>264</prism:startingPage>
		<prism:doi>10.3390/computers15050264</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/5/264</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/5/263">

	<title>Computers, Vol. 15, Pages 263: BlackBoxTestGen: An Automatic Black-Box Test Case Generation Framework</title>
	<link>https://www.mdpi.com/2073-431X/15/5/263</link>
	<description>Software testing is essential for software engineering practices, as it ensures that the final software product is reliable and satisfies all requirements before delivery. However, manually designing black-box testing test cases is time-consuming, inconsistent, and difficult to maintain in accordance with changing specifications. Therefore, this paper presents BlackBoxTestGen, an automatic framework that unifies three specification-driven black-box testing techniques, including rule-based Equivalence Class Partitioning (ECP), syntax, and state transition testing. The framework utilises a redesigned XML structure for test case generation to be shared among a data dictionary, decision tree, and state machine, used by each testing technique. The degree of testing coverage is accumulatively calculated during the test case generation process. The beneficial value of our proposed framework was demonstrated with the development of a web-based prototype tool. We rigorously evaluated its performance in terms of accuracy, computational efficiency, and scalability through a multidimensional approach. This included assessment by professional experts, algorithmic stress testing via parameter scaling, and application to close-to-realistic case studies. The results indicate that BlackBoxTestGen provides a robust integration of testing techniques. By automating the generation of compact and reproducible test cases, the framework substantially reduces manual effort and minimises drift between techniques.</description>
	<pubDate>2026-04-22</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 263: BlackBoxTestGen: An Automatic Black-Box Test Case Generation Framework</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/5/263">doi: 10.3390/computers15050263</a></p>
	<p>Authors:
		Adisak Intana
		Kuljaree Tantayakul
		Pongsakorn Kaewnaka
		</p>
	<p>Software testing is essential for software engineering practices, as it ensures that the final software product is reliable and satisfies all requirements before delivery. However, manually designing black-box testing test cases is time-consuming, inconsistent, and difficult to maintain in accordance with changing specifications. Therefore, this paper presents BlackBoxTestGen, an automatic framework that unifies three specification-driven black-box testing techniques, including rule-based Equivalence Class Partitioning (ECP), syntax, and state transition testing. The framework utilises a redesigned XML structure for test case generation to be shared among a data dictionary, decision tree, and state machine, used by each testing technique. The degree of testing coverage is accumulatively calculated during the test case generation process. The beneficial value of our proposed framework was demonstrated with the development of a web-based prototype tool. We rigorously evaluated its performance in terms of accuracy, computational efficiency, and scalability through a multidimensional approach. This included assessment by professional experts, algorithmic stress testing via parameter scaling, and application to close-to-realistic case studies. The results indicate that BlackBoxTestGen provides a robust integration of testing techniques. By automating the generation of compact and reproducible test cases, the framework substantially reduces manual effort and minimises drift between techniques.</p>
	]]></content:encoded>

	<dc:title>BlackBoxTestGen: An Automatic Black-Box Test Case Generation Framework</dc:title>
			<dc:creator>Adisak Intana</dc:creator>
			<dc:creator>Kuljaree Tantayakul</dc:creator>
			<dc:creator>Pongsakorn Kaewnaka</dc:creator>
		<dc:identifier>doi: 10.3390/computers15050263</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-04-22</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-04-22</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>263</prism:startingPage>
		<prism:doi>10.3390/computers15050263</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/5/263</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/5/262">

	<title>Computers, Vol. 15, Pages 262: Model-Contingent Polarity Bias in Large Language Model Annotation: Implications for Semantic Multimedia Personalization</title>
	<link>https://www.mdpi.com/2073-431X/15/5/262</link>
	<description>Large Language Models (LLMs) are increasingly deployed as automated annotators in semantic multimedia systems, yet their reliability varies significantly across architectures. This study extends prior cross-model evaluations by benchmarking ChatGPT-5, Qwen-3, and Gemini-3-flash against human expert annotations using the HRAST hotel review dataset. We adopt a bias-by-design framework to analyze systematic divergences in sentiment, topic, and aspect labeling across real and synthetic data, while investigating the moderating effects of annotation mode. Findings reveal model-contingent polarity bias: ChatGPT-5 exhibits a pronounced neutrality bias, while Qwen-3 and Gemini-3-flash align more closely with human polarization. Agreement is substantial for concrete topics but diverges on abstract evaluative dimensions. Synthetic data consistently inflates reliability metrics while masking ambiguity. These findings highlight that annotation bias is structurally embedded in model design choices and operational conditions. Cross-architectural triangulation and mode-aware deployment strategies are recommended for robust semantic multimedia system development.</description>
	<pubDate>2026-04-22</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 262: Model-Contingent Polarity Bias in Large Language Model Annotation: Implications for Semantic Multimedia Personalization</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/5/262">doi: 10.3390/computers15050262</a></p>
	<p>Authors:
		Constantinos Djouvas
		Christiana Andreou
		Maria C. Voutsa
		Nicolas Tsapatsoulis
		</p>
	<p>Large Language Models (LLMs) are increasingly deployed as automated annotators in semantic multimedia systems, yet their reliability varies significantly across architectures. This study extends prior cross-model evaluations by benchmarking ChatGPT-5, Qwen-3, and Gemini-3-flash against human expert annotations using the HRAST hotel review dataset. We adopt a bias-by-design framework to analyze systematic divergences in sentiment, topic, and aspect labeling across real and synthetic data, while investigating the moderating effects of annotation mode. Findings reveal model-contingent polarity bias: ChatGPT-5 exhibits a pronounced neutrality bias, while Qwen-3 and Gemini-3-flash align more closely with human polarization. Agreement is substantial for concrete topics but diverges on abstract evaluative dimensions. Synthetic data consistently inflates reliability metrics while masking ambiguity. These findings highlight that annotation bias is structurally embedded in model design choices and operational conditions. Cross-architectural triangulation and mode-aware deployment strategies are recommended for robust semantic multimedia system development.</p>
	]]></content:encoded>

	<dc:title>Model-Contingent Polarity Bias in Large Language Model Annotation: Implications for Semantic Multimedia Personalization</dc:title>
			<dc:creator>Constantinos Djouvas</dc:creator>
			<dc:creator>Christiana Andreou</dc:creator>
			<dc:creator>Maria C. Voutsa</dc:creator>
			<dc:creator>Nicolas Tsapatsoulis</dc:creator>
		<dc:identifier>doi: 10.3390/computers15050262</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-04-22</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-04-22</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>262</prism:startingPage>
		<prism:doi>10.3390/computers15050262</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/5/262</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/5/261">

	<title>Computers, Vol. 15, Pages 261: DaN: A Comprehensive Semi-Real Dataset for Extreme Low-Light Image Enhancement</title>
	<link>https://www.mdpi.com/2073-431X/15/5/261</link>
	<description>Extreme low-light image enhancement (ELLIE) targets the restoration of visual quality under ultra-dim environments (&amp;amp;lt;0.1 lux). Conventional image signal processing (ISP) pipelines often fail in such scenarios due to the limitations of heuristic, hand-crafted algorithms. While deep learning has advanced the field via end-to-end mapping, existing models suffer from constrained generalization and suboptimal perceptual fidelity, primarily stemming from the scarcity of large-scale, high-diversity datasets. To bridge this gap, we present the Day and Night (DaN) dataset, a semi-synthetic benchmark synthesized through a rigorous physics-based noise model. This approach effectively captures authentic noise characteristics while enabling the scalable generation of paired samples across multifaceted illumination conditions and scenes. Furthermore, we propose No Longer Vigil (NLV), a fully differentiable AI-ISP framework. By replacing traditional rigid blocks with adaptive non-linear networks, NLV facilitates scene-dependent transformations without requiring manual priors. Comprehensive evaluations demonstrate that our method significantly outshines state-of-the-art approaches, yielding a 4.15 dB gain in PSNR and a 0.026 improvement in SSIM.</description>
	<pubDate>2026-04-22</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 261: DaN: A Comprehensive Semi-Real Dataset for Extreme Low-Light Image Enhancement</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/5/261">doi: 10.3390/computers15050261</a></p>
	<p>Authors:
		Qiuyang Sun
		Shaonan Liu
		Hong Li
		Yingchao Feng
		Liuqing Sun
		Kun Lu
		Kangtai Liu
		</p>
	<p>Extreme low-light image enhancement (ELLIE) targets the restoration of visual quality under ultra-dim environments (&amp;amp;lt;0.1 lux). Conventional image signal processing (ISP) pipelines often fail in such scenarios due to the limitations of heuristic, hand-crafted algorithms. While deep learning has advanced the field via end-to-end mapping, existing models suffer from constrained generalization and suboptimal perceptual fidelity, primarily stemming from the scarcity of large-scale, high-diversity datasets. To bridge this gap, we present the Day and Night (DaN) dataset, a semi-synthetic benchmark synthesized through a rigorous physics-based noise model. This approach effectively captures authentic noise characteristics while enabling the scalable generation of paired samples across multifaceted illumination conditions and scenes. Furthermore, we propose No Longer Vigil (NLV), a fully differentiable AI-ISP framework. By replacing traditional rigid blocks with adaptive non-linear networks, NLV facilitates scene-dependent transformations without requiring manual priors. Comprehensive evaluations demonstrate that our method significantly outshines state-of-the-art approaches, yielding a 4.15 dB gain in PSNR and a 0.026 improvement in SSIM.</p>
	]]></content:encoded>

	<dc:title>DaN: A Comprehensive Semi-Real Dataset for Extreme Low-Light Image Enhancement</dc:title>
			<dc:creator>Qiuyang Sun</dc:creator>
			<dc:creator>Shaonan Liu</dc:creator>
			<dc:creator>Hong Li</dc:creator>
			<dc:creator>Yingchao Feng</dc:creator>
			<dc:creator>Liuqing Sun</dc:creator>
			<dc:creator>Kun Lu</dc:creator>
			<dc:creator>Kangtai Liu</dc:creator>
		<dc:identifier>doi: 10.3390/computers15050261</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-04-22</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-04-22</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>261</prism:startingPage>
		<prism:doi>10.3390/computers15050261</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/5/261</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/4/260">

	<title>Computers, Vol. 15, Pages 260: Verification of the Methods of Digital Monitoring of Information Space Based on Coding Theory Tools</title>
	<link>https://www.mdpi.com/2073-431X/15/4/260</link>
	<description>This study examines the applicability of coding-theoretic tools to the digital monitoring of information space. The proposed approach treats response patterns to socially significant stimuli as binary sequences and interprets their analysis as a classification problem analogous to error correction in coding theory. To verify the feasibility of this framework, a model psychological test consisting of seven binary questions was analyzed using a procedure derived from the Hamming code (7,4). The method makes it possible to map the full space of observed answer combinations onto a smaller set of reference codewords and thereby identify stable response configurations. The obtained results show that the distributions produced after coding-based transformation are markedly non-uniform and contain recurrent maxima, indicating the presence of structured patterns in collective responses. It is also shown that permutations of question order substantially affect the resulting distributions and correlation indicators, which highlights both the sensitivity and the analytical potential of the proposed encoding scheme. The main contribution of the study is methodological: it demonstrates that error-correcting coding can be operationalized as a formal tool for detecting latent regularities in simplified monitoring data. At the same time, the present results should be regarded as proof of concept, since further work is required to validate the approach on larger datasets, compare it with baseline classification methods, and extend it to longer and multivalued response sequences.</description>
	<pubDate>2026-04-21</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 260: Verification of the Methods of Digital Monitoring of Information Space Based on Coding Theory Tools</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/4/260">doi: 10.3390/computers15040260</a></p>
	<p>Authors:
		Dina Shaltykova
		Akhat Bakirov
		Anastasiya Grishina
		Mariya Kostsova
		Yelizaveta Vitulyova
		Ibragim Suleimenov
		</p>
	<p>This study examines the applicability of coding-theoretic tools to the digital monitoring of information space. The proposed approach treats response patterns to socially significant stimuli as binary sequences and interprets their analysis as a classification problem analogous to error correction in coding theory. To verify the feasibility of this framework, a model psychological test consisting of seven binary questions was analyzed using a procedure derived from the Hamming code (7,4). The method makes it possible to map the full space of observed answer combinations onto a smaller set of reference codewords and thereby identify stable response configurations. The obtained results show that the distributions produced after coding-based transformation are markedly non-uniform and contain recurrent maxima, indicating the presence of structured patterns in collective responses. It is also shown that permutations of question order substantially affect the resulting distributions and correlation indicators, which highlights both the sensitivity and the analytical potential of the proposed encoding scheme. The main contribution of the study is methodological: it demonstrates that error-correcting coding can be operationalized as a formal tool for detecting latent regularities in simplified monitoring data. At the same time, the present results should be regarded as proof of concept, since further work is required to validate the approach on larger datasets, compare it with baseline classification methods, and extend it to longer and multivalued response sequences.</p>
	]]></content:encoded>

	<dc:title>Verification of the Methods of Digital Monitoring of Information Space Based on Coding Theory Tools</dc:title>
			<dc:creator>Dina Shaltykova</dc:creator>
			<dc:creator>Akhat Bakirov</dc:creator>
			<dc:creator>Anastasiya Grishina</dc:creator>
			<dc:creator>Mariya Kostsova</dc:creator>
			<dc:creator>Yelizaveta Vitulyova</dc:creator>
			<dc:creator>Ibragim Suleimenov</dc:creator>
		<dc:identifier>doi: 10.3390/computers15040260</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-04-21</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-04-21</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>260</prism:startingPage>
		<prism:doi>10.3390/computers15040260</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/4/260</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/4/259">

	<title>Computers, Vol. 15, Pages 259: A Reproducible Hybrid Architecture of Fuzzy Logic and XGBoost for Explainable Tabular Classification of Territorial Vulnerability</title>
	<link>https://www.mdpi.com/2073-431X/15/4/259</link>
	<description>This study proposes a reproducible hybrid computational model for the explainable classification of territorial vulnerability using heterogeneous tabular data. The approach integrates fuzzy logic and extreme gradient boosting in a two-stage architecture that balances interpretability and predictive performance. First, a fuzzy transformation is applied to construct interpretable risk and resilience indicators based on multi-source administrative indicators. The analytical dataset was formed by integrating 11 heterogeneous administrative sources into a single matrix of 166 territorial units and 76 features. The model was evaluated on a stratified 75/25 split of the training and test sets using the F1 score, ROC-AUC, precision, recall, and integrated quality criterion. Experimental results show that the proposed Fuzzy-XGBoost framework achieved an F1 score of 0.7333 on the test dataset, an ROC-AUC of 0.8291, and an Integrated Score of 0.768, outperforming the strongest baseline and improving recall in highly vulnerable areas. Furthermore, probabilistic threshold optimization identified an operating point at &amp;amp;tau; = 0.35, reducing the number of missed high-risk cases while maintaining acceptable specificity. The results demonstrate that fuzzy feature expansion combined with gradient boosting provides an efficient and interpretable solution for tabular risk classification and decision support problems under heterogeneity and uncertainty.</description>
	<pubDate>2026-04-20</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 259: A Reproducible Hybrid Architecture of Fuzzy Logic and XGBoost for Explainable Tabular Classification of Territorial Vulnerability</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/4/259">doi: 10.3390/computers15040259</a></p>
	<p>Authors:
		Aiman Akynbekova
		Ayagoz Mukhanova
		Raikhan Muratkhan
		Lunara Diyarova
		Saya Baigubenova
		Gulden Murzabekova
		Gulaim Orazymbetova
		Aliya Satybaldieva
		Zhanat Abdikadyr
		</p>
	<p>This study proposes a reproducible hybrid computational model for the explainable classification of territorial vulnerability using heterogeneous tabular data. The approach integrates fuzzy logic and extreme gradient boosting in a two-stage architecture that balances interpretability and predictive performance. First, a fuzzy transformation is applied to construct interpretable risk and resilience indicators based on multi-source administrative indicators. The analytical dataset was formed by integrating 11 heterogeneous administrative sources into a single matrix of 166 territorial units and 76 features. The model was evaluated on a stratified 75/25 split of the training and test sets using the F1 score, ROC-AUC, precision, recall, and integrated quality criterion. Experimental results show that the proposed Fuzzy-XGBoost framework achieved an F1 score of 0.7333 on the test dataset, an ROC-AUC of 0.8291, and an Integrated Score of 0.768, outperforming the strongest baseline and improving recall in highly vulnerable areas. Furthermore, probabilistic threshold optimization identified an operating point at &amp;amp;tau; = 0.35, reducing the number of missed high-risk cases while maintaining acceptable specificity. The results demonstrate that fuzzy feature expansion combined with gradient boosting provides an efficient and interpretable solution for tabular risk classification and decision support problems under heterogeneity and uncertainty.</p>
	]]></content:encoded>

	<dc:title>A Reproducible Hybrid Architecture of Fuzzy Logic and XGBoost for Explainable Tabular Classification of Territorial Vulnerability</dc:title>
			<dc:creator>Aiman Akynbekova</dc:creator>
			<dc:creator>Ayagoz Mukhanova</dc:creator>
			<dc:creator>Raikhan Muratkhan</dc:creator>
			<dc:creator>Lunara Diyarova</dc:creator>
			<dc:creator>Saya Baigubenova</dc:creator>
			<dc:creator>Gulden Murzabekova</dc:creator>
			<dc:creator>Gulaim Orazymbetova</dc:creator>
			<dc:creator>Aliya Satybaldieva</dc:creator>
			<dc:creator>Zhanat Abdikadyr</dc:creator>
		<dc:identifier>doi: 10.3390/computers15040259</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-04-20</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-04-20</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>259</prism:startingPage>
		<prism:doi>10.3390/computers15040259</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/4/259</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/4/258">

	<title>Computers, Vol. 15, Pages 258: Colonic Polyp Detection with Object Detection Models</title>
	<link>https://www.mdpi.com/2073-431X/15/4/258</link>
	<description>In recent years, deep learning has been applied more and more to medical image analysis. One such application of deep learning is the automated polyp detection in colonoscopy with the target of reducing miss rates. This study presents a comprehensive evaluation of nine state-of-the-art object detection models for colonic polyp detection: YOLOv8, YOLOv9, YOLOv10, YOLO11, YOLO12, YOLO26, RT-DETR, YOLO-World, and YOLOE. The models were evaluated on three publicly available datasets: CVC-ClinicDB, CVC-ColonDB, and ETIS-LaribPolypDB. All models were trained under standardized conditions using identical hyperparameters and data augmentation strategies to guarantee fair comparison. Performance was evaluated using multiple metrics: mAP@50, mAP@50&amp;amp;ndash;95, F1 score, precision, recall, inference time, and computational cost. YOLO11 demonstrated the best overall performance, achieving mAP@50 scores of 0.995, 0.944, and 0.978 on the three datasets respectively, while maintaining the fastest inference time of approximately 150 ms per image and the third-lowest computational cost at 21.3 GFLOPs. Cross-dataset generalization experiments revealed a significant loss of performance, with mAP@50 dropping by 20&amp;amp;ndash;40% when models were tested on an unseen dataset, highlighting the challenge of true generalization with limited datasets. Statistical analysis by polyp size showed that while all models achieved F1 scores exceeding 0.95 for large polyps, performance decreased to 0.60&amp;amp;ndash;0.85 for small polyps, indicating a limitation in detecting small lesions. The analysis of failure modes showed that missed detections, false positives and boundary errors constitute 60&amp;amp;ndash;75% of all failures, suggesting that domain adaptation of object detection models may be required.</description>
	<pubDate>2026-04-20</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 258: Colonic Polyp Detection with Object Detection Models</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/4/258">doi: 10.3390/computers15040258</a></p>
	<p>Authors:
		Raluca Portase
		Eugen-Richard Ardelean
		</p>
	<p>In recent years, deep learning has been applied more and more to medical image analysis. One such application of deep learning is the automated polyp detection in colonoscopy with the target of reducing miss rates. This study presents a comprehensive evaluation of nine state-of-the-art object detection models for colonic polyp detection: YOLOv8, YOLOv9, YOLOv10, YOLO11, YOLO12, YOLO26, RT-DETR, YOLO-World, and YOLOE. The models were evaluated on three publicly available datasets: CVC-ClinicDB, CVC-ColonDB, and ETIS-LaribPolypDB. All models were trained under standardized conditions using identical hyperparameters and data augmentation strategies to guarantee fair comparison. Performance was evaluated using multiple metrics: mAP@50, mAP@50&amp;amp;ndash;95, F1 score, precision, recall, inference time, and computational cost. YOLO11 demonstrated the best overall performance, achieving mAP@50 scores of 0.995, 0.944, and 0.978 on the three datasets respectively, while maintaining the fastest inference time of approximately 150 ms per image and the third-lowest computational cost at 21.3 GFLOPs. Cross-dataset generalization experiments revealed a significant loss of performance, with mAP@50 dropping by 20&amp;amp;ndash;40% when models were tested on an unseen dataset, highlighting the challenge of true generalization with limited datasets. Statistical analysis by polyp size showed that while all models achieved F1 scores exceeding 0.95 for large polyps, performance decreased to 0.60&amp;amp;ndash;0.85 for small polyps, indicating a limitation in detecting small lesions. The analysis of failure modes showed that missed detections, false positives and boundary errors constitute 60&amp;amp;ndash;75% of all failures, suggesting that domain adaptation of object detection models may be required.</p>
	]]></content:encoded>

	<dc:title>Colonic Polyp Detection with Object Detection Models</dc:title>
			<dc:creator>Raluca Portase</dc:creator>
			<dc:creator>Eugen-Richard Ardelean</dc:creator>
		<dc:identifier>doi: 10.3390/computers15040258</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-04-20</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-04-20</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>258</prism:startingPage>
		<prism:doi>10.3390/computers15040258</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/4/258</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/4/257">

	<title>Computers, Vol. 15, Pages 257: Operationalising Teaching Presence at Scale: A Design Model for Foundational Cybersecurity Education</title>
	<link>https://www.mdpi.com/2073-431X/15/4/257</link>
	<description>Online cybersecurity education increasingly serves diverse cohorts, including students with non-technical backgrounds and those balancing their studies with work or family responsibilities. Yet, research on sustaining educational quality while scaling fully online enrolments remains limited, particularly in foundational technical subjects where learning requires both conceptual understanding and professional judgement. This study aims to examine how teaching presence can be operationalised in fully online foundational cybersecurity subjects through inspectable artefacts and routines that remain workable for large cohorts and distributed teaching teams. This paper reports a Scholarship of Teaching and Learning (SoTL) design and transfer case grounded in the Community of Inquiry (CoI) framework. This study examines the redesign of CSE1ICB (Introduction to Cybersecurity) and the transfer of the same design logic to CSE1CPR (Cybersecurity in Practice). The findings identify a coherent four-component design model comprising (1) real-world incident integration, (2) scenario-based learning and interactive checks, (3) structured, layered support, and (4) a predictable communication rhythm across the learning management system (LMS) and email. Across these two subjects, these elements are presented as an integrated system intended to make learning objectives salient, increase opportunities for guided practice in professional reasoning, reduce avoidable friction in practical work, and create consistent instructor visibility through routine communication and support structures. This paper synthesises the approach into nine transferable design principles, mapped to CoI teaching presence dimensions and illustrated through concrete design choices, including incident-framing templates, scenario prompt patterns, layered support resources, formative feedback patterns, and communication routines. Overall, this study shows that teaching presence can be operationalised as a coordinated design system rather than as a set of isolated tactics. This paper contributes a reusable and theory-informed model for educators coordinating foundational cybersecurity subjects delivered online at scale.</description>
	<pubDate>2026-04-20</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 257: Operationalising Teaching Presence at Scale: A Design Model for Foundational Cybersecurity Education</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/4/257">doi: 10.3390/computers15040257</a></p>
	<p>Authors:
		Ahmad Salehi Shahraki
		Hooman Alavizadeh
		</p>
	<p>Online cybersecurity education increasingly serves diverse cohorts, including students with non-technical backgrounds and those balancing their studies with work or family responsibilities. Yet, research on sustaining educational quality while scaling fully online enrolments remains limited, particularly in foundational technical subjects where learning requires both conceptual understanding and professional judgement. This study aims to examine how teaching presence can be operationalised in fully online foundational cybersecurity subjects through inspectable artefacts and routines that remain workable for large cohorts and distributed teaching teams. This paper reports a Scholarship of Teaching and Learning (SoTL) design and transfer case grounded in the Community of Inquiry (CoI) framework. This study examines the redesign of CSE1ICB (Introduction to Cybersecurity) and the transfer of the same design logic to CSE1CPR (Cybersecurity in Practice). The findings identify a coherent four-component design model comprising (1) real-world incident integration, (2) scenario-based learning and interactive checks, (3) structured, layered support, and (4) a predictable communication rhythm across the learning management system (LMS) and email. Across these two subjects, these elements are presented as an integrated system intended to make learning objectives salient, increase opportunities for guided practice in professional reasoning, reduce avoidable friction in practical work, and create consistent instructor visibility through routine communication and support structures. This paper synthesises the approach into nine transferable design principles, mapped to CoI teaching presence dimensions and illustrated through concrete design choices, including incident-framing templates, scenario prompt patterns, layered support resources, formative feedback patterns, and communication routines. Overall, this study shows that teaching presence can be operationalised as a coordinated design system rather than as a set of isolated tactics. This paper contributes a reusable and theory-informed model for educators coordinating foundational cybersecurity subjects delivered online at scale.</p>
	]]></content:encoded>

	<dc:title>Operationalising Teaching Presence at Scale: A Design Model for Foundational Cybersecurity Education</dc:title>
			<dc:creator>Ahmad Salehi Shahraki</dc:creator>
			<dc:creator>Hooman Alavizadeh</dc:creator>
		<dc:identifier>doi: 10.3390/computers15040257</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-04-20</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-04-20</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>257</prism:startingPage>
		<prism:doi>10.3390/computers15040257</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/4/257</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/4/256">

	<title>Computers, Vol. 15, Pages 256: Novel Ensemble Models for Enhanced Accuracy in Time Series Classification: Application to Multimodal Emotion Detection</title>
	<link>https://www.mdpi.com/2073-431X/15/4/256</link>
	<description>Emotions are fundamental to the human experience and are increasingly analyzed in applications such as marketing, healthcare, and human&amp;amp;ndash;computer interaction. Many recent approaches to human emotion recognition rely on deep learning, which typically demands large labeled datasets and substantial computational resources and often suffers from limited interpretability. Applying classical machine-learning methods to sensor time series is more lightweight but may struggle to reach high accuracy, especially when the temporal structure is not explicitly modelled. This paper introduces three subinterval voting-based ensemble models designed for user-specific emotion classification from multimodal time-series data acquired by smartwatch inertial sensors and heart-rate measurements. Each model partitions a time window into subwindows and performs window-level voting, thereby exploiting the temporal consistency of emotional responses while remaining compatible with standard classifiers such as logistic regression and Random Forests (with or without hyperparameter tuning). The models are evaluated on a public smartwatch emotion benchmark dataset under both binary (happy vs. sad) and three-class (happy, sad, neutral) settings. The relative accuracy improvement over the corresponding baseline reported in prior work ranges from 4.68% to 26.05%, with a mean gain of 12.34%. For the three-class tasks, improvements range from 11.17% to 37.10%, with a mean gain of 21.63%. Within the evaluated experimental setting, these results show that the proposed subinterval ensembles consistently enhance performance while remaining model-agnostic and compatible with standard user-specific classification pipelines in sensor-based emotion recognition.</description>
	<pubDate>2026-04-20</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 256: Novel Ensemble Models for Enhanced Accuracy in Time Series Classification: Application to Multimodal Emotion Detection</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/4/256">doi: 10.3390/computers15040256</a></p>
	<p>Authors:
		Mohamed Hanafy Abdel-Kader Mahmoud
		Sherine Nagy Saleh
		Amin Shoukry
		Yousry Elgamal
		</p>
	<p>Emotions are fundamental to the human experience and are increasingly analyzed in applications such as marketing, healthcare, and human&amp;amp;ndash;computer interaction. Many recent approaches to human emotion recognition rely on deep learning, which typically demands large labeled datasets and substantial computational resources and often suffers from limited interpretability. Applying classical machine-learning methods to sensor time series is more lightweight but may struggle to reach high accuracy, especially when the temporal structure is not explicitly modelled. This paper introduces three subinterval voting-based ensemble models designed for user-specific emotion classification from multimodal time-series data acquired by smartwatch inertial sensors and heart-rate measurements. Each model partitions a time window into subwindows and performs window-level voting, thereby exploiting the temporal consistency of emotional responses while remaining compatible with standard classifiers such as logistic regression and Random Forests (with or without hyperparameter tuning). The models are evaluated on a public smartwatch emotion benchmark dataset under both binary (happy vs. sad) and three-class (happy, sad, neutral) settings. The relative accuracy improvement over the corresponding baseline reported in prior work ranges from 4.68% to 26.05%, with a mean gain of 12.34%. For the three-class tasks, improvements range from 11.17% to 37.10%, with a mean gain of 21.63%. Within the evaluated experimental setting, these results show that the proposed subinterval ensembles consistently enhance performance while remaining model-agnostic and compatible with standard user-specific classification pipelines in sensor-based emotion recognition.</p>
	]]></content:encoded>

	<dc:title>Novel Ensemble Models for Enhanced Accuracy in Time Series Classification: Application to Multimodal Emotion Detection</dc:title>
			<dc:creator>Mohamed Hanafy Abdel-Kader Mahmoud</dc:creator>
			<dc:creator>Sherine Nagy Saleh</dc:creator>
			<dc:creator>Amin Shoukry</dc:creator>
			<dc:creator>Yousry Elgamal</dc:creator>
		<dc:identifier>doi: 10.3390/computers15040256</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-04-20</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-04-20</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>256</prism:startingPage>
		<prism:doi>10.3390/computers15040256</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/4/256</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/4/255">

	<title>Computers, Vol. 15, Pages 255: Machine Learning and Geographic Information Systems for Aircraft Route Analysis in Large-Scale Airport Transportation Networks</title>
	<link>https://www.mdpi.com/2073-431X/15/4/255</link>
	<description>This study proposes a scalable, AI-driven, and Geographic Information System (GIS)-integrated framework for intelligent route-level classification in large-scale airport transportation networks to support airport operations, logistics planning, and network-level decision-making. The framework addresses the need for practical artificial intelligence applications that combine spatial network analysis with supervised machine learning to improve route assessment and resource allocation in complex air transport systems. A structured dataset was developed using operational and traffic-related attributes, including route distance, aircraft capacity, weekly frequency, annual passenger volume, demand variability, and route performance indicators, with additional normalized features to improve data representation. A Gradient Boosting ensemble classifier was trained to categorize routes into high-, medium-, and low-priority classes. The model achieved strong predictive performance, with a testing area under the ROC curve of 0.961, accuracy of 0.922, F1-score of 0.915, precision of 0.918, and a recall of 0.922. Feature importance analysis identified demand variability and route-density indicators as the main drivers of classification, enhancing interpretability and practical trust. The proposed framework demonstrates the real-world potential of AI for scalable, explainable, and efficient decision support in airport logistics and transportation network management.</description>
	<pubDate>2026-04-18</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 255: Machine Learning and Geographic Information Systems for Aircraft Route Analysis in Large-Scale Airport Transportation Networks</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/4/255">doi: 10.3390/computers15040255</a></p>
	<p>Authors:
		Saadi Turied Kurdi
		Luttfi A. Al-Haddad
		Zeashan Hameed Khan
		</p>
	<p>This study proposes a scalable, AI-driven, and Geographic Information System (GIS)-integrated framework for intelligent route-level classification in large-scale airport transportation networks to support airport operations, logistics planning, and network-level decision-making. The framework addresses the need for practical artificial intelligence applications that combine spatial network analysis with supervised machine learning to improve route assessment and resource allocation in complex air transport systems. A structured dataset was developed using operational and traffic-related attributes, including route distance, aircraft capacity, weekly frequency, annual passenger volume, demand variability, and route performance indicators, with additional normalized features to improve data representation. A Gradient Boosting ensemble classifier was trained to categorize routes into high-, medium-, and low-priority classes. The model achieved strong predictive performance, with a testing area under the ROC curve of 0.961, accuracy of 0.922, F1-score of 0.915, precision of 0.918, and a recall of 0.922. Feature importance analysis identified demand variability and route-density indicators as the main drivers of classification, enhancing interpretability and practical trust. The proposed framework demonstrates the real-world potential of AI for scalable, explainable, and efficient decision support in airport logistics and transportation network management.</p>
	]]></content:encoded>

	<dc:title>Machine Learning and Geographic Information Systems for Aircraft Route Analysis in Large-Scale Airport Transportation Networks</dc:title>
			<dc:creator>Saadi Turied Kurdi</dc:creator>
			<dc:creator>Luttfi A. Al-Haddad</dc:creator>
			<dc:creator>Zeashan Hameed Khan</dc:creator>
		<dc:identifier>doi: 10.3390/computers15040255</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-04-18</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-04-18</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>255</prism:startingPage>
		<prism:doi>10.3390/computers15040255</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/4/255</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/4/254">

	<title>Computers, Vol. 15, Pages 254: Performance Degradation of Object Detection Neural Networks Under Natural Visual Contamination in Autonomous Driving</title>
	<link>https://www.mdpi.com/2073-431X/15/4/254</link>
	<description>The operation of driver assistance systems and autonomous vehicles requires a sensor system and a control algorithm. Sensors provide information to detect people, vehicles and objects in the vehicle&amp;amp;rsquo;s environment; however, their performance can be degraded by adverse environmental conditions and contamination. This literature review identified factors that reduce sensor visibility, such as weather conditions and external contamination. In this study, the detection efficiency of state-of-the-art neural network-based object detectors was examined in a simulation environment using a synthetic dataset. A custom dataset comprising six urban and suburban traffic scenarios was created, including clean images and ten contaminated variants per scene with increasing mud coverage. The results show that contamination leads to a measurable reduction in detection performance across all models. Smaller variants are more sensitive to degradation, while medium-complexity models provide a favorable balance between robustness and computational cost. Increasing model size yields limited additional robustness, and performance differences between architectures highlight the importance of model design. Furthermore, the spatial distribution of contamination, particularly near the image center, has a significant impact on performance in addition to its overall extent.</description>
	<pubDate>2026-04-17</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 254: Performance Degradation of Object Detection Neural Networks Under Natural Visual Contamination in Autonomous Driving</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/4/254">doi: 10.3390/computers15040254</a></p>
	<p>Authors:
		Dániel Csikor
		János Hollósi
		</p>
	<p>The operation of driver assistance systems and autonomous vehicles requires a sensor system and a control algorithm. Sensors provide information to detect people, vehicles and objects in the vehicle&amp;amp;rsquo;s environment; however, their performance can be degraded by adverse environmental conditions and contamination. This literature review identified factors that reduce sensor visibility, such as weather conditions and external contamination. In this study, the detection efficiency of state-of-the-art neural network-based object detectors was examined in a simulation environment using a synthetic dataset. A custom dataset comprising six urban and suburban traffic scenarios was created, including clean images and ten contaminated variants per scene with increasing mud coverage. The results show that contamination leads to a measurable reduction in detection performance across all models. Smaller variants are more sensitive to degradation, while medium-complexity models provide a favorable balance between robustness and computational cost. Increasing model size yields limited additional robustness, and performance differences between architectures highlight the importance of model design. Furthermore, the spatial distribution of contamination, particularly near the image center, has a significant impact on performance in addition to its overall extent.</p>
	]]></content:encoded>

	<dc:title>Performance Degradation of Object Detection Neural Networks Under Natural Visual Contamination in Autonomous Driving</dc:title>
			<dc:creator>Dániel Csikor</dc:creator>
			<dc:creator>János Hollósi</dc:creator>
		<dc:identifier>doi: 10.3390/computers15040254</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-04-17</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-04-17</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>254</prism:startingPage>
		<prism:doi>10.3390/computers15040254</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/4/254</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/4/253">

	<title>Computers, Vol. 15, Pages 253: Robust Iris Segmentation with Deep CNNs for Detecting Fully or Nearly Closed Eyes in Non-Ideal Biometric Systems</title>
	<link>https://www.mdpi.com/2073-431X/15/4/253</link>
	<description>This study proposes a robust hybrid framework for iris segmentation in covert biometric systems, specifically addressing the challenge of non-ideal images featuring fully or nearly closed eyes. To overcome the limitations of traditional geometric methods, this study implements a SqueezeNet-based Deep Convolutional Neural Network (DCNN) for rapid eye-state classification. Comparative analysis with various pretrained DCNN models indicates that SqueezeNet provides an optimal balance of accuracy and efficiency, requiring only 1.24 million parameters and a minimal memory footprint of 5.2 MB. For iris contour demarcation, the proposed algorithm combines the Circular Hough Transform (CHT) with global gray-level statistics and anatomical constraints to facilitate reliable iris localization. Utilizing image decimation, percentile-based thresholding, and Canny edge detection, it systematically delineates the limbic and pupillary boundaries. This improved search methodology ensures precise contour delineation, even under sub-optimal imaging circumstances. The proposed algorithm was validated on a novel dataset encompassing challenging conditions such as specular reflections, blur, non-uniform illumination, and varying degrees of occlusion, including nearly or fully closed eyes. Experimental results demonstrate superior segmentation accuracy and significant computational efficiency, underscoring the model&amp;amp;rsquo;s potential for real-time biometric applications in unconstrained environments.</description>
	<pubDate>2026-04-17</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 253: Robust Iris Segmentation with Deep CNNs for Detecting Fully or Nearly Closed Eyes in Non-Ideal Biometric Systems</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/4/253">doi: 10.3390/computers15040253</a></p>
	<p>Authors:
		Farmanullah Jan
		</p>
	<p>This study proposes a robust hybrid framework for iris segmentation in covert biometric systems, specifically addressing the challenge of non-ideal images featuring fully or nearly closed eyes. To overcome the limitations of traditional geometric methods, this study implements a SqueezeNet-based Deep Convolutional Neural Network (DCNN) for rapid eye-state classification. Comparative analysis with various pretrained DCNN models indicates that SqueezeNet provides an optimal balance of accuracy and efficiency, requiring only 1.24 million parameters and a minimal memory footprint of 5.2 MB. For iris contour demarcation, the proposed algorithm combines the Circular Hough Transform (CHT) with global gray-level statistics and anatomical constraints to facilitate reliable iris localization. Utilizing image decimation, percentile-based thresholding, and Canny edge detection, it systematically delineates the limbic and pupillary boundaries. This improved search methodology ensures precise contour delineation, even under sub-optimal imaging circumstances. The proposed algorithm was validated on a novel dataset encompassing challenging conditions such as specular reflections, blur, non-uniform illumination, and varying degrees of occlusion, including nearly or fully closed eyes. Experimental results demonstrate superior segmentation accuracy and significant computational efficiency, underscoring the model&amp;amp;rsquo;s potential for real-time biometric applications in unconstrained environments.</p>
	]]></content:encoded>

	<dc:title>Robust Iris Segmentation with Deep CNNs for Detecting Fully or Nearly Closed Eyes in Non-Ideal Biometric Systems</dc:title>
			<dc:creator>Farmanullah Jan</dc:creator>
		<dc:identifier>doi: 10.3390/computers15040253</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-04-17</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-04-17</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>253</prism:startingPage>
		<prism:doi>10.3390/computers15040253</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/4/253</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/4/252">

	<title>Computers, Vol. 15, Pages 252: AI-Driven CRM Architecture for Managing Large-Scale Fragrance Sample Requests and Understanding Customer Preferences on Social Media</title>
	<link>https://www.mdpi.com/2073-431X/15/4/252</link>
	<description>Social media platforms have become critical infrastructures for customer relationship management (CRM), requiring scalable and intelligent solutions to handle high-volume interactions. In the luxury fragrance sector, digital promotion poses a unique challenge because olfactory attributes cannot be experienced online. As a result, physical fragrance samples remain essential, generating large volumes of sample requests or inquiries across social media. However, many requests remain unmanaged due to limitations in manual CRM (i.e., human-driven processes), revealing a design gap that may negatively affect perceived responsiveness and service quality. This study uses qualitative content analysis with NVivo 12 to examine large-scale sample request interactions on the Facebook pages of four luxury fragrance brands. Data was collected via NCapture and analyzed to identify recurring patterns, linguistic structures, and customer expressions related to sample requests. Findings confirm frequent repetitive requests, highlighting inefficiencies in traditional CRM systems under high demand. This research proposes an AI-driven CRM Sample Request Management Architecture (CRM&amp;amp;ndash;SRMA) that systematically captures and processes customer sample requests, collects the necessary mailing information, and seamlessly transfers validated data to the final dispatching stage. The proposed system also models individual fragrance preferences by analyzing customers&amp;amp;rsquo; interactions with samples, particularly in terms of top, middle, and base notes. By leveraging this information, the architecture enables the targeted promotion of new fragrance releases that closely align with customers&amp;amp;rsquo; demonstrated olfactory preferences. The insights of this research provide a scalable, intelligent mechanism that enables luxury social media managers and CRM systems to manage high-volume interactions while maintaining service quality. By automating sample request processing, the mechanism improves responsiveness and reduces operational burden. It also supports long-term relationship building through preference tracking and updating customers with any new relevant-fragrance releases. Although focused on fragrances, the mechanism is adaptable to other luxury cosmetic categories, thereby ideally enhancing overall social media-based customer service.</description>
	<pubDate>2026-04-17</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 252: AI-Driven CRM Architecture for Managing Large-Scale Fragrance Sample Requests and Understanding Customer Preferences on Social Media</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/4/252">doi: 10.3390/computers15040252</a></p>
	<p>Authors:
		Ali Aldhamiri
		</p>
	<p>Social media platforms have become critical infrastructures for customer relationship management (CRM), requiring scalable and intelligent solutions to handle high-volume interactions. In the luxury fragrance sector, digital promotion poses a unique challenge because olfactory attributes cannot be experienced online. As a result, physical fragrance samples remain essential, generating large volumes of sample requests or inquiries across social media. However, many requests remain unmanaged due to limitations in manual CRM (i.e., human-driven processes), revealing a design gap that may negatively affect perceived responsiveness and service quality. This study uses qualitative content analysis with NVivo 12 to examine large-scale sample request interactions on the Facebook pages of four luxury fragrance brands. Data was collected via NCapture and analyzed to identify recurring patterns, linguistic structures, and customer expressions related to sample requests. Findings confirm frequent repetitive requests, highlighting inefficiencies in traditional CRM systems under high demand. This research proposes an AI-driven CRM Sample Request Management Architecture (CRM&amp;amp;ndash;SRMA) that systematically captures and processes customer sample requests, collects the necessary mailing information, and seamlessly transfers validated data to the final dispatching stage. The proposed system also models individual fragrance preferences by analyzing customers&amp;amp;rsquo; interactions with samples, particularly in terms of top, middle, and base notes. By leveraging this information, the architecture enables the targeted promotion of new fragrance releases that closely align with customers&amp;amp;rsquo; demonstrated olfactory preferences. The insights of this research provide a scalable, intelligent mechanism that enables luxury social media managers and CRM systems to manage high-volume interactions while maintaining service quality. By automating sample request processing, the mechanism improves responsiveness and reduces operational burden. It also supports long-term relationship building through preference tracking and updating customers with any new relevant-fragrance releases. Although focused on fragrances, the mechanism is adaptable to other luxury cosmetic categories, thereby ideally enhancing overall social media-based customer service.</p>
	]]></content:encoded>

	<dc:title>AI-Driven CRM Architecture for Managing Large-Scale Fragrance Sample Requests and Understanding Customer Preferences on Social Media</dc:title>
			<dc:creator>Ali Aldhamiri</dc:creator>
		<dc:identifier>doi: 10.3390/computers15040252</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-04-17</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-04-17</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>252</prism:startingPage>
		<prism:doi>10.3390/computers15040252</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/4/252</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/4/251">

	<title>Computers, Vol. 15, Pages 251: The Effects of Computer-Assisted Writing on Written Language Production in Students with Specific Learning Difficulties: Implications for Sustainable Digital Education</title>
	<link>https://www.mdpi.com/2073-431X/15/4/251</link>
	<description>This study investigated the effects of computer-assisted writing on the written language production of secondary school students with Specific Learning Difficulties (SLD), particularly dyslexia. Writing is a complex cognitive process requiring the coordination of spelling, lexical retrieval, syntactic organization, transcription, and revision, areas in which students with SLD often experience persistent difficulties. The study compared handwritten and computer-based texts produced by 40 students with SLD and 20 students without learning difficulties using a counterbalanced design, with an interval of approximately two weeks between the two writing sessions. In the handwriting condition, students used printed reference materials, whereas in the computer-based condition they had access to general-purpose digital tools, including spell-checkers, electronic dictionaries, online resources, and word-processing software. Written texts were evaluated using the Spelling Accuracy Index and holistic scores assigned by independent raters. Data were analyzed using descriptive statistics and non-parametric tests (Mann&amp;amp;ndash;Whitney U and Wilcoxon signed-rank tests). The findings revealed statistically significant improvements in favor of computer-based writing for both groups, with particularly strong gains among students with SLD. Computer-written texts demonstrated higher spelling accuracy and received higher evaluation scores, indicating improved performance in the assessed writing outcomes. The findings suggest that computer-assisted writing may support written language production in secondary school students with SLD, particularly in relation to spelling accuracy and overall text evaluation, and may offer a useful avenue for more inclusive writing instruction.</description>
	<pubDate>2026-04-17</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 251: The Effects of Computer-Assisted Writing on Written Language Production in Students with Specific Learning Difficulties: Implications for Sustainable Digital Education</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/4/251">doi: 10.3390/computers15040251</a></p>
	<p>Authors:
		Georgios Polydoros
		Ilias Vasileiou
		Zoe Krokou
		Alexandros-Stamatios Antoniou
		</p>
	<p>This study investigated the effects of computer-assisted writing on the written language production of secondary school students with Specific Learning Difficulties (SLD), particularly dyslexia. Writing is a complex cognitive process requiring the coordination of spelling, lexical retrieval, syntactic organization, transcription, and revision, areas in which students with SLD often experience persistent difficulties. The study compared handwritten and computer-based texts produced by 40 students with SLD and 20 students without learning difficulties using a counterbalanced design, with an interval of approximately two weeks between the two writing sessions. In the handwriting condition, students used printed reference materials, whereas in the computer-based condition they had access to general-purpose digital tools, including spell-checkers, electronic dictionaries, online resources, and word-processing software. Written texts were evaluated using the Spelling Accuracy Index and holistic scores assigned by independent raters. Data were analyzed using descriptive statistics and non-parametric tests (Mann&amp;amp;ndash;Whitney U and Wilcoxon signed-rank tests). The findings revealed statistically significant improvements in favor of computer-based writing for both groups, with particularly strong gains among students with SLD. Computer-written texts demonstrated higher spelling accuracy and received higher evaluation scores, indicating improved performance in the assessed writing outcomes. The findings suggest that computer-assisted writing may support written language production in secondary school students with SLD, particularly in relation to spelling accuracy and overall text evaluation, and may offer a useful avenue for more inclusive writing instruction.</p>
	]]></content:encoded>

	<dc:title>The Effects of Computer-Assisted Writing on Written Language Production in Students with Specific Learning Difficulties: Implications for Sustainable Digital Education</dc:title>
			<dc:creator>Georgios Polydoros</dc:creator>
			<dc:creator>Ilias Vasileiou</dc:creator>
			<dc:creator>Zoe Krokou</dc:creator>
			<dc:creator>Alexandros-Stamatios Antoniou</dc:creator>
		<dc:identifier>doi: 10.3390/computers15040251</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-04-17</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-04-17</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>251</prism:startingPage>
		<prism:doi>10.3390/computers15040251</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/4/251</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/4/250">

	<title>Computers, Vol. 15, Pages 250: How Influencer Attractiveness and Expertise Shape Consumer Responses Through Parasocial Interaction and Trust</title>
	<link>https://www.mdpi.com/2073-431X/15/4/250</link>
	<description>Influencer marketing research has shown that source-related evaluations matter, yet less is known about how specific influencer cues are translated into consumer responses through differentiated internal psychological states. Drawing on the Stimulus&amp;amp;ndash;Organism&amp;amp;ndash;Response (S-O-R) framework, this study examines how influencer attractiveness and expertise shape consumer responses through parasocial interaction and trust. Attractiveness is conceptualized as a social-affective cue, whereas expertise is conceptualized as a competence-based cue. Parasocial interaction is modeled as a relational organismic state, and trust is modeled as a reliance-oriented organismic state. Survey data were collected from 532 Taiwanese social media users with prior experience following influencers and analyzed using partial least squares structural equation modeling (PLS-SEM). The results show that attractiveness positively predicts parasocial interaction, expertise positively predicts trust, and parasocial interaction further contributes to trust. Trust, in turn, positively influences loyalty, purchase intention, and recommendation intention, with the strongest effect observed for recommendation intention. These findings suggest that influencer effectiveness is better understood as a differentiated cue&amp;amp;ndash;mechanism&amp;amp;ndash;response process rather than as a generalized source-evaluation effect. By distinguishing attractiveness from expertise and by modeling parasocial interaction and trust as conceptually distinct but sequentially connected organismic states, this study provides a more precise S-O-R account of how influencer evaluations are translated into relational, transactional, and advocacy-oriented consumer responses.</description>
	<pubDate>2026-04-17</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 250: How Influencer Attractiveness and Expertise Shape Consumer Responses Through Parasocial Interaction and Trust</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/4/250">doi: 10.3390/computers15040250</a></p>
	<p>Authors:
		Ming-Hsuan Wu
		</p>
	<p>Influencer marketing research has shown that source-related evaluations matter, yet less is known about how specific influencer cues are translated into consumer responses through differentiated internal psychological states. Drawing on the Stimulus&amp;amp;ndash;Organism&amp;amp;ndash;Response (S-O-R) framework, this study examines how influencer attractiveness and expertise shape consumer responses through parasocial interaction and trust. Attractiveness is conceptualized as a social-affective cue, whereas expertise is conceptualized as a competence-based cue. Parasocial interaction is modeled as a relational organismic state, and trust is modeled as a reliance-oriented organismic state. Survey data were collected from 532 Taiwanese social media users with prior experience following influencers and analyzed using partial least squares structural equation modeling (PLS-SEM). The results show that attractiveness positively predicts parasocial interaction, expertise positively predicts trust, and parasocial interaction further contributes to trust. Trust, in turn, positively influences loyalty, purchase intention, and recommendation intention, with the strongest effect observed for recommendation intention. These findings suggest that influencer effectiveness is better understood as a differentiated cue&amp;amp;ndash;mechanism&amp;amp;ndash;response process rather than as a generalized source-evaluation effect. By distinguishing attractiveness from expertise and by modeling parasocial interaction and trust as conceptually distinct but sequentially connected organismic states, this study provides a more precise S-O-R account of how influencer evaluations are translated into relational, transactional, and advocacy-oriented consumer responses.</p>
	]]></content:encoded>

	<dc:title>How Influencer Attractiveness and Expertise Shape Consumer Responses Through Parasocial Interaction and Trust</dc:title>
			<dc:creator>Ming-Hsuan Wu</dc:creator>
		<dc:identifier>doi: 10.3390/computers15040250</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-04-17</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-04-17</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>250</prism:startingPage>
		<prism:doi>10.3390/computers15040250</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/4/250</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/4/249">

	<title>Computers, Vol. 15, Pages 249: Occasion-Based Clothing Classification Using Vision Transformer and Traditional Machine Learning Models</title>
	<link>https://www.mdpi.com/2073-431X/15/4/249</link>
	<description>Clothing classification by occasion is an important area in computer vision and artificial intelligence (AI). This task is particularly challenging because of the subtle visual similarities among clothing categories such as formal, party, and casual attire. Variations in color, fabric, patterns, and lighting further increase the complexity of this task. To address this challenge, we used the Fashionpedia dataset to create a balanced subset of 15,000 images. Specifically, we adopted two different methods for labeling these images: automated classification, which relies on category identifications (IDs) and components, and manual labeling performed by human annotators. We then implemented our preprocessing pipeline, which includes several steps: resizing, image normalization, background removal using segmentation masks, and class balancing. We benchmarked traditional models, including artificial neural networks (ANNs), support vector machines (SVMs), and k-nearest neighbors (KNNs), which use a histogram of oriented gradient (HOG) features, as well as deep learning models such as convolutional neural networks (CNNs), the Visual Geometry Group 16 (VGG16) model utilizing transfer learning, and the vision transformer (ViT) model, all evaluated using identical data splits and preprocessing procedures. The traditional models achieved moderate accuracy, ranging from 54% to 66%. In contrast, the ViT model achieved an accuracy of 81.78% with automated classification and 98.09% with manual labeling. This indicates that a higher label accuracy, along with the preprocessing steps used, significantly enhances the performance. Together, these factors improve the effectiveness of ViT in context-aware apparel classification and establish a reliable baseline for future research.</description>
	<pubDate>2026-04-17</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 249: Occasion-Based Clothing Classification Using Vision Transformer and Traditional Machine Learning Models</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/4/249">doi: 10.3390/computers15040249</a></p>
	<p>Authors:
		Hanaa Alzahrani
		Maram Almotairi
		Arwa Basbrain
		</p>
	<p>Clothing classification by occasion is an important area in computer vision and artificial intelligence (AI). This task is particularly challenging because of the subtle visual similarities among clothing categories such as formal, party, and casual attire. Variations in color, fabric, patterns, and lighting further increase the complexity of this task. To address this challenge, we used the Fashionpedia dataset to create a balanced subset of 15,000 images. Specifically, we adopted two different methods for labeling these images: automated classification, which relies on category identifications (IDs) and components, and manual labeling performed by human annotators. We then implemented our preprocessing pipeline, which includes several steps: resizing, image normalization, background removal using segmentation masks, and class balancing. We benchmarked traditional models, including artificial neural networks (ANNs), support vector machines (SVMs), and k-nearest neighbors (KNNs), which use a histogram of oriented gradient (HOG) features, as well as deep learning models such as convolutional neural networks (CNNs), the Visual Geometry Group 16 (VGG16) model utilizing transfer learning, and the vision transformer (ViT) model, all evaluated using identical data splits and preprocessing procedures. The traditional models achieved moderate accuracy, ranging from 54% to 66%. In contrast, the ViT model achieved an accuracy of 81.78% with automated classification and 98.09% with manual labeling. This indicates that a higher label accuracy, along with the preprocessing steps used, significantly enhances the performance. Together, these factors improve the effectiveness of ViT in context-aware apparel classification and establish a reliable baseline for future research.</p>
	]]></content:encoded>

	<dc:title>Occasion-Based Clothing Classification Using Vision Transformer and Traditional Machine Learning Models</dc:title>
			<dc:creator>Hanaa Alzahrani</dc:creator>
			<dc:creator>Maram Almotairi</dc:creator>
			<dc:creator>Arwa Basbrain</dc:creator>
		<dc:identifier>doi: 10.3390/computers15040249</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-04-17</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-04-17</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>249</prism:startingPage>
		<prism:doi>10.3390/computers15040249</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/4/249</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/4/248">

	<title>Computers, Vol. 15, Pages 248: Evaluating Machine Learning Classifiers in Detecting Cyberattacks</title>
	<link>https://www.mdpi.com/2073-431X/15/4/248</link>
	<description>This study aims to develop a machine learning model that can accurately detect cyberattacks. We compare the performance of Support Vector Machine (SVM), Logistic Regression (LR), and Random Forest (RF) in predicting cyberattacks. Furthermore, we investigate whether using Information Gain Attribute Evaluation (IGAE) for feature selection improves the performance of the algorithms. This work provides a clear comparison of the algorithms and shows the most suitable one for classifying cyberattacks. In addition, this study combines LR and RF using a voting classifier along with IGAE and compares its performance with that of the rest of the algorithms. We investigate whether combining algorithms increases the accuracy of the results. The results show that the most accurate algorithm is RF, followed by LR and SVM. Contrary to initial expectations, the findings further indicate that the application of IGAE marginally reduces algorithm accuracy across the tested classifiers, suggesting that feature selection through information gain is not universally beneficial in cyberattack detection tasks. These findings contribute to the growing body of knowledge on effective machine learning methodologies for cybersecurity applications.</description>
	<pubDate>2026-04-16</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 248: Evaluating Machine Learning Classifiers in Detecting Cyberattacks</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/4/248">doi: 10.3390/computers15040248</a></p>
	<p>Authors:
		Mustafa Hammad
		Mohamed Almahmood
		Maen Hammad
		Bassam A. Y. Alqaralleh
		Aymen I. Zreikat
		</p>
	<p>This study aims to develop a machine learning model that can accurately detect cyberattacks. We compare the performance of Support Vector Machine (SVM), Logistic Regression (LR), and Random Forest (RF) in predicting cyberattacks. Furthermore, we investigate whether using Information Gain Attribute Evaluation (IGAE) for feature selection improves the performance of the algorithms. This work provides a clear comparison of the algorithms and shows the most suitable one for classifying cyberattacks. In addition, this study combines LR and RF using a voting classifier along with IGAE and compares its performance with that of the rest of the algorithms. We investigate whether combining algorithms increases the accuracy of the results. The results show that the most accurate algorithm is RF, followed by LR and SVM. Contrary to initial expectations, the findings further indicate that the application of IGAE marginally reduces algorithm accuracy across the tested classifiers, suggesting that feature selection through information gain is not universally beneficial in cyberattack detection tasks. These findings contribute to the growing body of knowledge on effective machine learning methodologies for cybersecurity applications.</p>
	]]></content:encoded>

	<dc:title>Evaluating Machine Learning Classifiers in Detecting Cyberattacks</dc:title>
			<dc:creator>Mustafa Hammad</dc:creator>
			<dc:creator>Mohamed Almahmood</dc:creator>
			<dc:creator>Maen Hammad</dc:creator>
			<dc:creator>Bassam A. Y. Alqaralleh</dc:creator>
			<dc:creator>Aymen I. Zreikat</dc:creator>
		<dc:identifier>doi: 10.3390/computers15040248</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-04-16</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-04-16</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>248</prism:startingPage>
		<prism:doi>10.3390/computers15040248</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/4/248</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/4/247">

	<title>Computers, Vol. 15, Pages 247: An Energy-Aware Security Framework for the Internet of Things Integrating Blockchain and Edge Intelligence</title>
	<link>https://www.mdpi.com/2073-431X/15/4/247</link>
	<description>Large-scale smart city Internet of Things (IoT) infrastructures must simultaneously provide strong cybersecurity protection, real-time anomaly detection, and energy-efficient operation despite the strict resource limitations of sensing devices. The current body of research typically addresses secure data management, edge intelligence, or energy optimization in isolation, leaving a practical gap in unified frameworks that jointly optimize these objectives. This paper proposes a jointly co-designed energy-aware cybersecurity framework that integrates lightweight secure sensing, hybrid edge-based anomaly detection, Practical Byzantine Fault Tolerance (PBFT)-enabled blockchain integrity, and Grey Wolf Optimization (GWO)-driven edge deployment within a single end-to-end architecture. The practical contribution of the proposed framework lies in enabling tamper-evident trusted sensing, real-time detection of both data and energy anomalies, and communication-efficient operation suitable for scalable smart city deployments. The simulation results demonstrate that the proposed method achieves strong operational efficiency, reaching up to 234.6 transactions per second while maintaining end-to-end latency of approximately 140&amp;amp;ndash;194 ms and reducing total energy consumption to about 1.68 J under high-load conditions. In addition, the hybrid anomaly detection mechanism achieves an F1-score of 0.985 and ROC-AUC of 0.992, confirming strong detection capability under realistic sensing and attack scenarios.</description>
	<pubDate>2026-04-16</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 247: An Energy-Aware Security Framework for the Internet of Things Integrating Blockchain and Edge Intelligence</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/4/247">doi: 10.3390/computers15040247</a></p>
	<p>Authors:
		Seyed Salar Sefati
		Razvan Craciunescu
		Bahman Arasteh
		</p>
	<p>Large-scale smart city Internet of Things (IoT) infrastructures must simultaneously provide strong cybersecurity protection, real-time anomaly detection, and energy-efficient operation despite the strict resource limitations of sensing devices. The current body of research typically addresses secure data management, edge intelligence, or energy optimization in isolation, leaving a practical gap in unified frameworks that jointly optimize these objectives. This paper proposes a jointly co-designed energy-aware cybersecurity framework that integrates lightweight secure sensing, hybrid edge-based anomaly detection, Practical Byzantine Fault Tolerance (PBFT)-enabled blockchain integrity, and Grey Wolf Optimization (GWO)-driven edge deployment within a single end-to-end architecture. The practical contribution of the proposed framework lies in enabling tamper-evident trusted sensing, real-time detection of both data and energy anomalies, and communication-efficient operation suitable for scalable smart city deployments. The simulation results demonstrate that the proposed method achieves strong operational efficiency, reaching up to 234.6 transactions per second while maintaining end-to-end latency of approximately 140&amp;amp;ndash;194 ms and reducing total energy consumption to about 1.68 J under high-load conditions. In addition, the hybrid anomaly detection mechanism achieves an F1-score of 0.985 and ROC-AUC of 0.992, confirming strong detection capability under realistic sensing and attack scenarios.</p>
	]]></content:encoded>

	<dc:title>An Energy-Aware Security Framework for the Internet of Things Integrating Blockchain and Edge Intelligence</dc:title>
			<dc:creator>Seyed Salar Sefati</dc:creator>
			<dc:creator>Razvan Craciunescu</dc:creator>
			<dc:creator>Bahman Arasteh</dc:creator>
		<dc:identifier>doi: 10.3390/computers15040247</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-04-16</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-04-16</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>247</prism:startingPage>
		<prism:doi>10.3390/computers15040247</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/4/247</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/4/246">

	<title>Computers, Vol. 15, Pages 246: Audio Signal Authentication and Integrity Protection</title>
	<link>https://www.mdpi.com/2073-431X/15/4/246</link>
	<description>This paper presents SAIN (Signal Authentication and INtegrity), an improvement to an algorithm developed for the fragile watermarking of audio files, composed of sequences of samples, with the purpose of integrity protection. First of all, the original method based on cryptographic hash functions is made secure with the use of Message Authentication Codes (MACs). Secondly, the quantity of security data is reduced, also thanks to the use of MACs, decreasing the number of original least significant bits that could potentially be modified by the embedding of the watermark, and therefore improving the quality. Finally, the localization ability is also improved by making the block size an input parameter of the algorithm.</description>
	<pubDate>2026-04-16</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 246: Audio Signal Authentication and Integrity Protection</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/4/246">doi: 10.3390/computers15040246</a></p>
	<p>Authors:
		Marco Botta
		Davide Cavagnino
		Annunziata Marra
		</p>
	<p>This paper presents SAIN (Signal Authentication and INtegrity), an improvement to an algorithm developed for the fragile watermarking of audio files, composed of sequences of samples, with the purpose of integrity protection. First of all, the original method based on cryptographic hash functions is made secure with the use of Message Authentication Codes (MACs). Secondly, the quantity of security data is reduced, also thanks to the use of MACs, decreasing the number of original least significant bits that could potentially be modified by the embedding of the watermark, and therefore improving the quality. Finally, the localization ability is also improved by making the block size an input parameter of the algorithm.</p>
	]]></content:encoded>

	<dc:title>Audio Signal Authentication and Integrity Protection</dc:title>
			<dc:creator>Marco Botta</dc:creator>
			<dc:creator>Davide Cavagnino</dc:creator>
			<dc:creator>Annunziata Marra</dc:creator>
		<dc:identifier>doi: 10.3390/computers15040246</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-04-16</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-04-16</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>246</prism:startingPage>
		<prism:doi>10.3390/computers15040246</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/4/246</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/4/245">

	<title>Computers, Vol. 15, Pages 245: Improving Financial Literacy Among Portuguese Youth: A Multicriteria Decision Analysis Using the Analytic Hierarchy Process</title>
	<link>https://www.mdpi.com/2073-431X/15/4/245</link>
	<description>Financial literacy is critical for individual well-being and sustainable economic development, yet significant gaps remain among Portuguese young adults. Using a two-phase design, this study combines a diagnostic assessment and multi-criteria decision analysis to identify and prioritise effective financial education strategies. In Phase 1, a diagnostic questionnaire administered to 172 first-year university students revealed pronounced deficiencies in core financial concepts. Only 29.1% correctly answered a question on compound interest, and almost half were unable to understand the concept of inflation. Additionally, 62.8% reported low exposure to financial education during compulsory schooling, and 59.9% strongly agreed that it should be included in the mandatory curriculum, indicating both unmet need and strong receptiveness. Phase 2 employed the Analytic Hierarchy Process (AHP) to evaluate five educational alternatives across four criteria. Engagement and motivation (0.32) and knowledge acquisition (0.31) were prioritised over behavioural impact (0.22) and accessibility (0.15). Based on expert assessments weighted by student preferences, in-person courses emerged as the most effective strategy (0.42), substantially outperforming online courses (0.22), videos and digital content (0.14), books (0.13), and games (0.10). The findings point to the need for policy-driven integration of structured, educator-led financial education within formal curricula, supported by approaches that prioritise active engagement and knowledge acquisition over convenience, with digital tools serving as complements rather than replacements.</description>
	<pubDate>2026-04-16</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 245: Improving Financial Literacy Among Portuguese Youth: A Multicriteria Decision Analysis Using the Analytic Hierarchy Process</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/4/245">doi: 10.3390/computers15040245</a></p>
	<p>Authors:
		Manuel Reis
		Tiago Miguel
		Paula Sarabando
		Rogério Matias
		</p>
	<p>Financial literacy is critical for individual well-being and sustainable economic development, yet significant gaps remain among Portuguese young adults. Using a two-phase design, this study combines a diagnostic assessment and multi-criteria decision analysis to identify and prioritise effective financial education strategies. In Phase 1, a diagnostic questionnaire administered to 172 first-year university students revealed pronounced deficiencies in core financial concepts. Only 29.1% correctly answered a question on compound interest, and almost half were unable to understand the concept of inflation. Additionally, 62.8% reported low exposure to financial education during compulsory schooling, and 59.9% strongly agreed that it should be included in the mandatory curriculum, indicating both unmet need and strong receptiveness. Phase 2 employed the Analytic Hierarchy Process (AHP) to evaluate five educational alternatives across four criteria. Engagement and motivation (0.32) and knowledge acquisition (0.31) were prioritised over behavioural impact (0.22) and accessibility (0.15). Based on expert assessments weighted by student preferences, in-person courses emerged as the most effective strategy (0.42), substantially outperforming online courses (0.22), videos and digital content (0.14), books (0.13), and games (0.10). The findings point to the need for policy-driven integration of structured, educator-led financial education within formal curricula, supported by approaches that prioritise active engagement and knowledge acquisition over convenience, with digital tools serving as complements rather than replacements.</p>
	]]></content:encoded>

	<dc:title>Improving Financial Literacy Among Portuguese Youth: A Multicriteria Decision Analysis Using the Analytic Hierarchy Process</dc:title>
			<dc:creator>Manuel Reis</dc:creator>
			<dc:creator>Tiago Miguel</dc:creator>
			<dc:creator>Paula Sarabando</dc:creator>
			<dc:creator>Rogério Matias</dc:creator>
		<dc:identifier>doi: 10.3390/computers15040245</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-04-16</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-04-16</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>245</prism:startingPage>
		<prism:doi>10.3390/computers15040245</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/4/245</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/4/244">

	<title>Computers, Vol. 15, Pages 244: A Calibrated Multi-Task Ensemble Architecture for Biomedical Risk Prediction</title>
	<link>https://www.mdpi.com/2073-431X/15/4/244</link>
	<description>Risk stratification of impaired glycemic control remains a major challenge in biomedical data analysis due to heterogeneous metabolic, behavioral, and therapeutic factors observed in large-scale populations. This study proposes a calibrated and interpretable decision&amp;amp;ndash;support framework, termed Calibrated Multi-Task Stacking Ensemble (CMSE), for joint modeling of clinically related glycemic outcomes. The framework integrates demographic variables, lipid profiles, renal and inflammatory biomarkers, dietary and smoking indicators, and therapy-related features within a unified predictive architecture. Robust modeling is ensured through leakage-aware preprocessing, quantile-based Winsorization, out-of-fold stacking, and isotonic calibration of probabilistic outputs. The physiological coherence between short-term and long-term glycemic markers is investigated using an explicit intertask coupling mechanism based on the estimated average glucose (eAG) ratio. Model interpretability is supported using SHAP analysis, mutual information, distance correlation, and feature importance metrics. In the primary medication-free screening configuration, the framework is evaluated on the NHANES 2017&amp;amp;ndash;March 2020 dataset, achieving ROC-AUC of 0.865 for diabetes classification and R2 values of 0.385 and 0.366 for plasma glucose and HbA1c prediction, respectively. These results indicate that CMSE provides a reliable and explainable approach for calibrated glycemic risk assessment and clinical decision support.</description>
	<pubDate>2026-04-15</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 244: A Calibrated Multi-Task Ensemble Architecture for Biomedical Risk Prediction</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/4/244">doi: 10.3390/computers15040244</a></p>
	<p>Authors:
		Zhainagul Khamitova
		Gulmira Omarova
		Madi Akhmetzhanov
		Roza Burganova
		Maksym Orynbassar
		Umida Sabirova
		Almagul Bukatayeva
		Aliya Barakova
		Gulnoz Jiyanmuratova
		Dilchekhra Yuldasheva
		</p>
	<p>Risk stratification of impaired glycemic control remains a major challenge in biomedical data analysis due to heterogeneous metabolic, behavioral, and therapeutic factors observed in large-scale populations. This study proposes a calibrated and interpretable decision&amp;amp;ndash;support framework, termed Calibrated Multi-Task Stacking Ensemble (CMSE), for joint modeling of clinically related glycemic outcomes. The framework integrates demographic variables, lipid profiles, renal and inflammatory biomarkers, dietary and smoking indicators, and therapy-related features within a unified predictive architecture. Robust modeling is ensured through leakage-aware preprocessing, quantile-based Winsorization, out-of-fold stacking, and isotonic calibration of probabilistic outputs. The physiological coherence between short-term and long-term glycemic markers is investigated using an explicit intertask coupling mechanism based on the estimated average glucose (eAG) ratio. Model interpretability is supported using SHAP analysis, mutual information, distance correlation, and feature importance metrics. In the primary medication-free screening configuration, the framework is evaluated on the NHANES 2017&amp;amp;ndash;March 2020 dataset, achieving ROC-AUC of 0.865 for diabetes classification and R2 values of 0.385 and 0.366 for plasma glucose and HbA1c prediction, respectively. These results indicate that CMSE provides a reliable and explainable approach for calibrated glycemic risk assessment and clinical decision support.</p>
	]]></content:encoded>

	<dc:title>A Calibrated Multi-Task Ensemble Architecture for Biomedical Risk Prediction</dc:title>
			<dc:creator>Zhainagul Khamitova</dc:creator>
			<dc:creator>Gulmira Omarova</dc:creator>
			<dc:creator>Madi Akhmetzhanov</dc:creator>
			<dc:creator>Roza Burganova</dc:creator>
			<dc:creator>Maksym Orynbassar</dc:creator>
			<dc:creator>Umida Sabirova</dc:creator>
			<dc:creator>Almagul Bukatayeva</dc:creator>
			<dc:creator>Aliya Barakova</dc:creator>
			<dc:creator>Gulnoz Jiyanmuratova</dc:creator>
			<dc:creator>Dilchekhra Yuldasheva</dc:creator>
		<dc:identifier>doi: 10.3390/computers15040244</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-04-15</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-04-15</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>244</prism:startingPage>
		<prism:doi>10.3390/computers15040244</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/4/244</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/4/243">

	<title>Computers, Vol. 15, Pages 243: The Art Nouveau Path: Requirements Engineering and Traceability for City-Scale In-the-Wild Mobile Augmented Reality Learning Services</title>
	<link>https://www.mdpi.com/2073-431X/15/4/243</link>
	<description>City-scale augmented reality (AR) learning paths are outdoor, multi-stop educational routes delivered through mobile devices in public space. This paper examines the Art Nouveau Path, a mobile AR game (MARG) route in Aveiro, Portugal, as a deployable learning service. The focus is on implementation requirements and traceability rather than learning outcomes. The analysis combined profiling of eight points of interest (POIs) and 36 tasks, group-session logs from 118 sessions, and teacher-facing evidence from a validation workshop (T1-VAL, N = 30) and on-site observation (T2-OBS, N = 24). Open-text responses were segmented into meaning units and coded with an eight-determinant taxonomy, with good intercoder reliability (Krippendorff&amp;amp;rsquo;s alpha = 0.83). Logs and the post-path questionnaire (S2-POST, N = 439) were used only to describe enactment feasibility and data integrity. The strongest determinants concerned onboarding and legibility, marker robustness and recovery, and curriculum alignment, together with safety and fallback needs. These signals were translated into 18 testable requirements linked to six transfer artefacts for enactment, maintenance, incident handling, and fallback. Overall, the study provides an implementation-oriented specification to support auditability, replication, and transfer in city-scale AR learning services.</description>
	<pubDate>2026-04-15</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 243: The Art Nouveau Path: Requirements Engineering and Traceability for City-Scale In-the-Wild Mobile Augmented Reality Learning Services</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/4/243">doi: 10.3390/computers15040243</a></p>
	<p>Authors:
		João Ferreira-Santos
		Lúcia Pombo
		</p>
	<p>City-scale augmented reality (AR) learning paths are outdoor, multi-stop educational routes delivered through mobile devices in public space. This paper examines the Art Nouveau Path, a mobile AR game (MARG) route in Aveiro, Portugal, as a deployable learning service. The focus is on implementation requirements and traceability rather than learning outcomes. The analysis combined profiling of eight points of interest (POIs) and 36 tasks, group-session logs from 118 sessions, and teacher-facing evidence from a validation workshop (T1-VAL, N = 30) and on-site observation (T2-OBS, N = 24). Open-text responses were segmented into meaning units and coded with an eight-determinant taxonomy, with good intercoder reliability (Krippendorff&amp;amp;rsquo;s alpha = 0.83). Logs and the post-path questionnaire (S2-POST, N = 439) were used only to describe enactment feasibility and data integrity. The strongest determinants concerned onboarding and legibility, marker robustness and recovery, and curriculum alignment, together with safety and fallback needs. These signals were translated into 18 testable requirements linked to six transfer artefacts for enactment, maintenance, incident handling, and fallback. Overall, the study provides an implementation-oriented specification to support auditability, replication, and transfer in city-scale AR learning services.</p>
	]]></content:encoded>

	<dc:title>The Art Nouveau Path: Requirements Engineering and Traceability for City-Scale In-the-Wild Mobile Augmented Reality Learning Services</dc:title>
			<dc:creator>João Ferreira-Santos</dc:creator>
			<dc:creator>Lúcia Pombo</dc:creator>
		<dc:identifier>doi: 10.3390/computers15040243</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-04-15</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-04-15</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>243</prism:startingPage>
		<prism:doi>10.3390/computers15040243</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/4/243</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/4/242">

	<title>Computers, Vol. 15, Pages 242: From Gamification to Student Achievement: A Longitudinal Conceptual Structure Analysis of a Research Field</title>
	<link>https://www.mdpi.com/2073-431X/15/4/242</link>
	<description>Gamification has become an innovative method used to enrich instructional designs, moving far beyond its initial motivational purposes. Given its promising impact on the contemporary educational context, gamification has attracted numerous studies seeking ways to enhance the quality of instruction and learning outcomes across diverse fields. Despite the growing volume of gamification research, longitudinal analyses tracing how the conceptual priorities of the field have shifted across historically distinct periods remain scarce. This study addresses that gap by mapping the thematic evolution of gamification research through science mapping analysis of articles published in peer-reviewed journals across three time periods: 2006&amp;amp;ndash;2019, 2020&amp;amp;ndash;2022, and 2023&amp;amp;ndash;2025. The findings indicate that early research focused primarily on gamification&amp;amp;rsquo;s impact on student motivation and engagement, while subsequent research focused on course evaluation and digital learning environments. Recent research has focused more on blended and domain-specific methods, testing the adaptability of different applications to specific fields. These findings suggest that gamification research has evolved from more superficial practices, such as encouraging course participation or boosting motivation, to its integration into personalized learning environments, and in recent years, towards ethical data use and the development of context-specific applications.</description>
	<pubDate>2026-04-15</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 242: From Gamification to Student Achievement: A Longitudinal Conceptual Structure Analysis of a Research Field</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/4/242">doi: 10.3390/computers15040242</a></p>
	<p>Authors:
		Stamatios Papadakis
		Turgut Karaköse
		Ramazan Yirci
		Tijen Tülübaş
		Irem Demir
		</p>
	<p>Gamification has become an innovative method used to enrich instructional designs, moving far beyond its initial motivational purposes. Given its promising impact on the contemporary educational context, gamification has attracted numerous studies seeking ways to enhance the quality of instruction and learning outcomes across diverse fields. Despite the growing volume of gamification research, longitudinal analyses tracing how the conceptual priorities of the field have shifted across historically distinct periods remain scarce. This study addresses that gap by mapping the thematic evolution of gamification research through science mapping analysis of articles published in peer-reviewed journals across three time periods: 2006&amp;amp;ndash;2019, 2020&amp;amp;ndash;2022, and 2023&amp;amp;ndash;2025. The findings indicate that early research focused primarily on gamification&amp;amp;rsquo;s impact on student motivation and engagement, while subsequent research focused on course evaluation and digital learning environments. Recent research has focused more on blended and domain-specific methods, testing the adaptability of different applications to specific fields. These findings suggest that gamification research has evolved from more superficial practices, such as encouraging course participation or boosting motivation, to its integration into personalized learning environments, and in recent years, towards ethical data use and the development of context-specific applications.</p>
	]]></content:encoded>

	<dc:title>From Gamification to Student Achievement: A Longitudinal Conceptual Structure Analysis of a Research Field</dc:title>
			<dc:creator>Stamatios Papadakis</dc:creator>
			<dc:creator>Turgut Karaköse</dc:creator>
			<dc:creator>Ramazan Yirci</dc:creator>
			<dc:creator>Tijen Tülübaş</dc:creator>
			<dc:creator>Irem Demir</dc:creator>
		<dc:identifier>doi: 10.3390/computers15040242</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-04-15</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-04-15</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Review</prism:section>
	<prism:startingPage>242</prism:startingPage>
		<prism:doi>10.3390/computers15040242</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/4/242</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/4/241">

	<title>Computers, Vol. 15, Pages 241: Edge Computing Approach to AI-Based Gesture for Human&amp;ndash;Robot Interaction and Control</title>
	<link>https://www.mdpi.com/2073-431X/15/4/241</link>
	<description>This paper presents an edge-deployable vision-based framework for human&amp;amp;ndash;robot interaction using a xArm collaborative robot and a single RGB camera mounted on the robot wrist, and lightweight AI-based perception modules. The system enables intuitive, contact-free control by combining hand understanding and object detection within a unified perception&amp;amp;ndash;decision&amp;amp;ndash;control pipeline. Hand landmarks are extracted using MediaPipe Hands, from which continuous hand trajectories, static gestures, and dynamic gestures are derived. Task objects are detected using a YOLO-based model, and both hand and object observations are mapped into the robot workspace using ArUco-based planar calibration. To ensure stable robot motion, the hand control signal is smoothed using low-pass and Kalman filtering, while dynamic gestures such as waving are recognized using a lightweight LSTM classifier. The complete pipeline runs locally on edge hardware, specifically NVIDIA Jetson Orin Nano and Raspberry Pi 5 with a Hailo AI accelerator. Experimental evaluation includes trajectory stability, gesture recognition reliability, and runtime performance on both platforms. Results show that filtering significantly reduces hand-tracking jitter, gesture recognition provides stable command states for control, and both edge devices support real-time operation, with Jetson achieving consistently lower runtime than Raspberry Pi. The proposed system demonstrates the feasibility of low-cost edge AI solutions for responsive and practical human&amp;amp;ndash;robot interaction in collaborative industrial environments.</description>
	<pubDate>2026-04-14</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 241: Edge Computing Approach to AI-Based Gesture for Human&amp;ndash;Robot Interaction and Control</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/4/241">doi: 10.3390/computers15040241</a></p>
	<p>Authors:
		Nikola Ivačko
		Ivan Ćirić
		Miloš Simonović
		</p>
	<p>This paper presents an edge-deployable vision-based framework for human&amp;amp;ndash;robot interaction using a xArm collaborative robot and a single RGB camera mounted on the robot wrist, and lightweight AI-based perception modules. The system enables intuitive, contact-free control by combining hand understanding and object detection within a unified perception&amp;amp;ndash;decision&amp;amp;ndash;control pipeline. Hand landmarks are extracted using MediaPipe Hands, from which continuous hand trajectories, static gestures, and dynamic gestures are derived. Task objects are detected using a YOLO-based model, and both hand and object observations are mapped into the robot workspace using ArUco-based planar calibration. To ensure stable robot motion, the hand control signal is smoothed using low-pass and Kalman filtering, while dynamic gestures such as waving are recognized using a lightweight LSTM classifier. The complete pipeline runs locally on edge hardware, specifically NVIDIA Jetson Orin Nano and Raspberry Pi 5 with a Hailo AI accelerator. Experimental evaluation includes trajectory stability, gesture recognition reliability, and runtime performance on both platforms. Results show that filtering significantly reduces hand-tracking jitter, gesture recognition provides stable command states for control, and both edge devices support real-time operation, with Jetson achieving consistently lower runtime than Raspberry Pi. The proposed system demonstrates the feasibility of low-cost edge AI solutions for responsive and practical human&amp;amp;ndash;robot interaction in collaborative industrial environments.</p>
	]]></content:encoded>

	<dc:title>Edge Computing Approach to AI-Based Gesture for Human&amp;amp;ndash;Robot Interaction and Control</dc:title>
			<dc:creator>Nikola Ivačko</dc:creator>
			<dc:creator>Ivan Ćirić</dc:creator>
			<dc:creator>Miloš Simonović</dc:creator>
		<dc:identifier>doi: 10.3390/computers15040241</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-04-14</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-04-14</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>241</prism:startingPage>
		<prism:doi>10.3390/computers15040241</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/4/241</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/4/240">

	<title>Computers, Vol. 15, Pages 240: Quantum-Safe Blockchain: Mapping Research Fronts in Post-Quantum Cryptography, Quantum Threat Models, and QKD Integration</title>
	<link>https://www.mdpi.com/2073-431X/15/4/240</link>
	<description>Quantum computing challenges the long-term security assumptions of blockchain systems that rely on classical public-key cryptography, motivating the adoption of post-quantum cryptography and quantum key distribution (QKD). This review maps research fronts at the intersection of blockchain and quantum-safe security, linking threat assumptions to post-quantum mechanisms, blockchain layers, and QKD positioning. Records were retrieved from Scopus and Web of Science using a two-block query and filtered through a PRISMA-guided workflow for bibliometric mapping. The final corpus comprises 648 journal articles and shows accelerated publication growth after 2023, with scientific production concentrated in a small set of leading countries. Keyword structures indicate that IoT-centric deployments dominate the semantic backbone, where authentication and intelligent methods co-occur with blockchain security primitives, while post-quantum and privacy-preserving constructs form a cohesive technical stream. QKD appears as a distinct but more specialized theme, typically discussed at the system level and shaped by infrastructure and scalability constraints. Overall, the literature is moving from conceptual risk articulation toward engineering integration; however, progress is limited by inconsistent reporting of threat models, post-quantum parameter sets, and ledger-level cost trade-offs, highlighting the need for auditable and reproducible evaluation.</description>
	<pubDate>2026-04-14</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 240: Quantum-Safe Blockchain: Mapping Research Fronts in Post-Quantum Cryptography, Quantum Threat Models, and QKD Integration</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/4/240">doi: 10.3390/computers15040240</a></p>
	<p>Authors:
		Félix Díaz
		Nhell Cerna
		Rafael Liza
		Bryan Motta
		</p>
	<p>Quantum computing challenges the long-term security assumptions of blockchain systems that rely on classical public-key cryptography, motivating the adoption of post-quantum cryptography and quantum key distribution (QKD). This review maps research fronts at the intersection of blockchain and quantum-safe security, linking threat assumptions to post-quantum mechanisms, blockchain layers, and QKD positioning. Records were retrieved from Scopus and Web of Science using a two-block query and filtered through a PRISMA-guided workflow for bibliometric mapping. The final corpus comprises 648 journal articles and shows accelerated publication growth after 2023, with scientific production concentrated in a small set of leading countries. Keyword structures indicate that IoT-centric deployments dominate the semantic backbone, where authentication and intelligent methods co-occur with blockchain security primitives, while post-quantum and privacy-preserving constructs form a cohesive technical stream. QKD appears as a distinct but more specialized theme, typically discussed at the system level and shaped by infrastructure and scalability constraints. Overall, the literature is moving from conceptual risk articulation toward engineering integration; however, progress is limited by inconsistent reporting of threat models, post-quantum parameter sets, and ledger-level cost trade-offs, highlighting the need for auditable and reproducible evaluation.</p>
	]]></content:encoded>

	<dc:title>Quantum-Safe Blockchain: Mapping Research Fronts in Post-Quantum Cryptography, Quantum Threat Models, and QKD Integration</dc:title>
			<dc:creator>Félix Díaz</dc:creator>
			<dc:creator>Nhell Cerna</dc:creator>
			<dc:creator>Rafael Liza</dc:creator>
			<dc:creator>Bryan Motta</dc:creator>
		<dc:identifier>doi: 10.3390/computers15040240</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-04-14</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-04-14</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Review</prism:section>
	<prism:startingPage>240</prism:startingPage>
		<prism:doi>10.3390/computers15040240</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/4/240</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/4/239">

	<title>Computers, Vol. 15, Pages 239: A Blockchain-Based Model for Managing Infectious Disease Data</title>
	<link>https://www.mdpi.com/2073-431X/15/4/239</link>
	<description>Infectious disease outbreaks continue to pose a significant threat to global health, underscoring the importance of timely detection and reliable reporting for effective interventions. Traditional reporting systems often rely on hierarchical data flows, which introduce delays, inconsistencies, and vulnerabilities, as highlighted during the COVID-19 pandemic. Blockchain, a disruptive technology, offers a promising solution. This study proposes a blockchain-based infectious disease reporting system built on Hyperledger Fabric that supports multi-level reporting and governance across national health systems. The architecture preserves hierarchical structures while enabling real-time reporting across authorized health stakeholders. It separates public test results from sensitive patient information, with private data secured via Private Data Collections and anchored using cryptographic hashes. Smart contracts enforce role-based access and validation, ensuring data integrity and controlled oversight. The system prototype was deployed within Docker containers and evaluated using illustrative COVID-19 case data. Network performance was benchmarked using Hyperledger Caliper, measuring throughput, latency, and resource utilization. The results demonstrate proper system functioning and stable transaction processing under the tested experimental conditions, supporting the feasibility of the proposed architecture for privacy-preserving multi-level infectious disease reporting systems.</description>
	<pubDate>2026-04-13</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 239: A Blockchain-Based Model for Managing Infectious Disease Data</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/4/239">doi: 10.3390/computers15040239</a></p>
	<p>Authors:
		Touria Jdid
		Mohammed Benbrahim
		Mohammed Nabil Kabbaj
		Adil Najdi
		</p>
	<p>Infectious disease outbreaks continue to pose a significant threat to global health, underscoring the importance of timely detection and reliable reporting for effective interventions. Traditional reporting systems often rely on hierarchical data flows, which introduce delays, inconsistencies, and vulnerabilities, as highlighted during the COVID-19 pandemic. Blockchain, a disruptive technology, offers a promising solution. This study proposes a blockchain-based infectious disease reporting system built on Hyperledger Fabric that supports multi-level reporting and governance across national health systems. The architecture preserves hierarchical structures while enabling real-time reporting across authorized health stakeholders. It separates public test results from sensitive patient information, with private data secured via Private Data Collections and anchored using cryptographic hashes. Smart contracts enforce role-based access and validation, ensuring data integrity and controlled oversight. The system prototype was deployed within Docker containers and evaluated using illustrative COVID-19 case data. Network performance was benchmarked using Hyperledger Caliper, measuring throughput, latency, and resource utilization. The results demonstrate proper system functioning and stable transaction processing under the tested experimental conditions, supporting the feasibility of the proposed architecture for privacy-preserving multi-level infectious disease reporting systems.</p>
	]]></content:encoded>

	<dc:title>A Blockchain-Based Model for Managing Infectious Disease Data</dc:title>
			<dc:creator>Touria Jdid</dc:creator>
			<dc:creator>Mohammed Benbrahim</dc:creator>
			<dc:creator>Mohammed Nabil Kabbaj</dc:creator>
			<dc:creator>Adil Najdi</dc:creator>
		<dc:identifier>doi: 10.3390/computers15040239</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-04-13</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-04-13</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>239</prism:startingPage>
		<prism:doi>10.3390/computers15040239</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/4/239</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/4/238">

	<title>Computers, Vol. 15, Pages 238: Design Behaviour and Interface Consistency in Generative No-Code Tools: A Systematic Literature Review</title>
	<link>https://www.mdpi.com/2073-431X/15/4/238</link>
	<description>Generative no-code development tools enable users to create applications directly from natural-language prompts, shifting interface design from manual construction to AI-mediated generation. However, identical prompts frequently produce substantially different user interface (UI) outcomes across tools and even across repeated executions within the same tool. This paper presents a systematic literature review examining how generative no-code systems make design and aesthetic decisions with respect to layout structure, visual consistency, usability, accessibility, and reproducibility. Twenty peer-reviewed studies (2021&amp;amp;ndash;2025) were analyzed following a structured review protocol. Existing research predominantly evaluates usability and accessibility in isolation while providing limited insight into aesthetic coherence, design variability, and prompt-to-output stability. Across studies, generative tools exhibit implicit design priors and stochastic behavior that lead to inconsistent visual outcomes and partial misalignment with human-centered design principles. These findings indicate that generative no-code tools do not act as deterministic translators of user intent but instead introduce their own stylistic tendencies. The paper identifies critical evaluation gaps and outlines requirements for future systems, including reproducible generation, transparent design reasoning, and user-directed control, to support reliable and predictable interface development.</description>
	<pubDate>2026-04-12</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 238: Design Behaviour and Interface Consistency in Generative No-Code Tools: A Systematic Literature Review</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/4/238">doi: 10.3390/computers15040238</a></p>
	<p>Authors:
		Gizem Irmak
		Qusay H. Mahmoud
		</p>
	<p>Generative no-code development tools enable users to create applications directly from natural-language prompts, shifting interface design from manual construction to AI-mediated generation. However, identical prompts frequently produce substantially different user interface (UI) outcomes across tools and even across repeated executions within the same tool. This paper presents a systematic literature review examining how generative no-code systems make design and aesthetic decisions with respect to layout structure, visual consistency, usability, accessibility, and reproducibility. Twenty peer-reviewed studies (2021&amp;amp;ndash;2025) were analyzed following a structured review protocol. Existing research predominantly evaluates usability and accessibility in isolation while providing limited insight into aesthetic coherence, design variability, and prompt-to-output stability. Across studies, generative tools exhibit implicit design priors and stochastic behavior that lead to inconsistent visual outcomes and partial misalignment with human-centered design principles. These findings indicate that generative no-code tools do not act as deterministic translators of user intent but instead introduce their own stylistic tendencies. The paper identifies critical evaluation gaps and outlines requirements for future systems, including reproducible generation, transparent design reasoning, and user-directed control, to support reliable and predictable interface development.</p>
	]]></content:encoded>

	<dc:title>Design Behaviour and Interface Consistency in Generative No-Code Tools: A Systematic Literature Review</dc:title>
			<dc:creator>Gizem Irmak</dc:creator>
			<dc:creator>Qusay H. Mahmoud</dc:creator>
		<dc:identifier>doi: 10.3390/computers15040238</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-04-12</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-04-12</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Systematic Review</prism:section>
	<prism:startingPage>238</prism:startingPage>
		<prism:doi>10.3390/computers15040238</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/4/238</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/4/237">

	<title>Computers, Vol. 15, Pages 237: Instructional Mediation for Equitable Computational Thinking in STEAM Learning Across Diverse School Contexts</title>
	<link>https://www.mdpi.com/2073-431X/15/4/237</link>
	<description>Guaranteeing equitable access to computational thinking (CT) remains a persistent challenge in computing education, particularly across socioeconomically diverse school contexts. Although prior research has demonstrated the effectiveness of block-based and physical computing environments, limited empirical evidence has examined whether structured instructional mediation can compensate for contextual disparities. This quasi-experimental pre&amp;amp;ndash;post study addresses this gap by analyzing CT development in three socioeconomically diverse primary schools in Chile (N=88, third grade), including private urban, public urban, and rural public institutions. Students engaged in scaffolded Scratch programming and Arduino simulation activities designed to explicitly support abstraction, sequencing, and debugging processes. These activities were framed within a broader STEAM learning approach, integrating computational thinking with problem-solving, experimentation, and interdisciplinary reasoning. Statistical analysis revealed significant differences in instructional time across contexts (F(2,85)=14.62, p&amp;amp;lt;0.001, &amp;amp;eta;2=0.26), indicating structural disparities in pacing. However, no statistically significant differences were observed in CT gains (F(2,85)=0.31, p=0.74), suggesting that structured pedagogical scaffolding buffered contextual inequalities. These findings provide empirical evidence from a Latin American non-WEIRD context and advance the conceptualization of instructional mediation as a compensatory mechanism for equity in early computing education. This study contributes to digital equity research by demonstrating that instructional design quality may play a more decisive role than infrastructural availability in enabling computational thinking development for all learners.</description>
	<pubDate>2026-04-12</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 237: Instructional Mediation for Equitable Computational Thinking in STEAM Learning Across Diverse School Contexts</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/4/237">doi: 10.3390/computers15040237</a></p>
	<p>Authors:
		Jesennia Cárdenas-Cobo
		Moyra Castro-Paredes
		Rodrigo Saens-Navarrete
		Claudia de la Fuente-Burdiles
		Cristian Vidal-Silva
		</p>
	<p>Guaranteeing equitable access to computational thinking (CT) remains a persistent challenge in computing education, particularly across socioeconomically diverse school contexts. Although prior research has demonstrated the effectiveness of block-based and physical computing environments, limited empirical evidence has examined whether structured instructional mediation can compensate for contextual disparities. This quasi-experimental pre&amp;amp;ndash;post study addresses this gap by analyzing CT development in three socioeconomically diverse primary schools in Chile (N=88, third grade), including private urban, public urban, and rural public institutions. Students engaged in scaffolded Scratch programming and Arduino simulation activities designed to explicitly support abstraction, sequencing, and debugging processes. These activities were framed within a broader STEAM learning approach, integrating computational thinking with problem-solving, experimentation, and interdisciplinary reasoning. Statistical analysis revealed significant differences in instructional time across contexts (F(2,85)=14.62, p&amp;amp;lt;0.001, &amp;amp;eta;2=0.26), indicating structural disparities in pacing. However, no statistically significant differences were observed in CT gains (F(2,85)=0.31, p=0.74), suggesting that structured pedagogical scaffolding buffered contextual inequalities. These findings provide empirical evidence from a Latin American non-WEIRD context and advance the conceptualization of instructional mediation as a compensatory mechanism for equity in early computing education. This study contributes to digital equity research by demonstrating that instructional design quality may play a more decisive role than infrastructural availability in enabling computational thinking development for all learners.</p>
	]]></content:encoded>

	<dc:title>Instructional Mediation for Equitable Computational Thinking in STEAM Learning Across Diverse School Contexts</dc:title>
			<dc:creator>Jesennia Cárdenas-Cobo</dc:creator>
			<dc:creator>Moyra Castro-Paredes</dc:creator>
			<dc:creator>Rodrigo Saens-Navarrete</dc:creator>
			<dc:creator>Claudia de la Fuente-Burdiles</dc:creator>
			<dc:creator>Cristian Vidal-Silva</dc:creator>
		<dc:identifier>doi: 10.3390/computers15040237</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-04-12</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-04-12</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>237</prism:startingPage>
		<prism:doi>10.3390/computers15040237</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/4/237</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/4/236">

	<title>Computers, Vol. 15, Pages 236: An Integrated Information Security Governance Model for Hyperconnected IoT Ecosystems; Unified Resilient Security Governance Model (URSGM)</title>
	<link>https://www.mdpi.com/2073-431X/15/4/236</link>
	<description>Hyperconnected IoT ecosystems have become crucial for organizational operations; yet, existing governance structures remain fragmented, are technology-centric, and not well-equipped to manage the risks, compliance pressures, and resilience needs of IoT. This paper presents an integrated, theory-based information security governance model that is tailored for IoT-driven organizations. A conceptual synthesis is performed through integrating five theoretical anchors: governance theory, socio-technical systems theory, risk governance theory, institutional/compliance theory, and resilience/adaptive capacity theory. These theoretical lenses are used to derive essential governance constructs and to develop a modular architecture tailored to IoT security needs. The model&amp;amp;rsquo;s validity is grounded in theoretical integration rather than empirical testing, consistent with the nature of conceptual research. The integrated model provides six interdependent governance dimensions: strategic governance, operational governance, technical oversight, compliance alignment, risk governance, and resilience/adaptation, anchored by an ecosystem coordination layer. It provides structured decision rights, continuous risk monitoring, regulatory legitimacy, and native adaptive capabilities toward dynamic cyber-physical threats. This research addresses a known gap in the literature on IoT governance by providing an integrated, theoretically validated governance model that systematically connects the rationale and operational mechanisms of governance for resilient, future-proof IoT adoption. The model is further operationalized through a five-level maturity structure, enabling organizations to assess and progressively enhance governance capabilities.</description>
	<pubDate>2026-04-10</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 236: An Integrated Information Security Governance Model for Hyperconnected IoT Ecosystems; Unified Resilient Security Governance Model (URSGM)</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/4/236">doi: 10.3390/computers15040236</a></p>
	<p>Authors:
		Hamed Taherdoost
		Chin-Shiuh Shieh
		Shashi Kant Gupta
		</p>
	<p>Hyperconnected IoT ecosystems have become crucial for organizational operations; yet, existing governance structures remain fragmented, are technology-centric, and not well-equipped to manage the risks, compliance pressures, and resilience needs of IoT. This paper presents an integrated, theory-based information security governance model that is tailored for IoT-driven organizations. A conceptual synthesis is performed through integrating five theoretical anchors: governance theory, socio-technical systems theory, risk governance theory, institutional/compliance theory, and resilience/adaptive capacity theory. These theoretical lenses are used to derive essential governance constructs and to develop a modular architecture tailored to IoT security needs. The model&amp;amp;rsquo;s validity is grounded in theoretical integration rather than empirical testing, consistent with the nature of conceptual research. The integrated model provides six interdependent governance dimensions: strategic governance, operational governance, technical oversight, compliance alignment, risk governance, and resilience/adaptation, anchored by an ecosystem coordination layer. It provides structured decision rights, continuous risk monitoring, regulatory legitimacy, and native adaptive capabilities toward dynamic cyber-physical threats. This research addresses a known gap in the literature on IoT governance by providing an integrated, theoretically validated governance model that systematically connects the rationale and operational mechanisms of governance for resilient, future-proof IoT adoption. The model is further operationalized through a five-level maturity structure, enabling organizations to assess and progressively enhance governance capabilities.</p>
	]]></content:encoded>

	<dc:title>An Integrated Information Security Governance Model for Hyperconnected IoT Ecosystems; Unified Resilient Security Governance Model (URSGM)</dc:title>
			<dc:creator>Hamed Taherdoost</dc:creator>
			<dc:creator>Chin-Shiuh Shieh</dc:creator>
			<dc:creator>Shashi Kant Gupta</dc:creator>
		<dc:identifier>doi: 10.3390/computers15040236</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-04-10</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-04-10</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>236</prism:startingPage>
		<prism:doi>10.3390/computers15040236</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/4/236</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/4/235">

	<title>Computers, Vol. 15, Pages 235: Adaptive Architectures for Gamified Learning in Software Engineering: A Systematic Review</title>
	<link>https://www.mdpi.com/2073-431X/15/4/235</link>
	<description>Effective software engineering education today requires tools that adapt to individual learner proficiency and progress, while ensuring positive student engagement. Gamified platforms represent an effective approach to learning and maintaining motivation, but their efficacy depends on a robust underlying architecture. This systematic literature review analyzes state-of-the-art artificial intelligence (AI)-based adaptive architectures designed to support gamified learning tools, highlighting their architectural models (such as intelligent tutoring systems, multi-agent systems, and immersive virtual reality/augmented reality environments), adaptation mechanisms (including Generative AI and chatbots), and personalization strategies. A significant focus is placed on Process Mining and Learning Analytics as methodological approaches to organize learning paths and guide dynamic adaptation based on student behavior. The results of the selected studies demonstrate advantages such as increased engagement, longer-term participation, and personalized learning pace. However, challenges remain, such as common assessment criteria, integrating different technologies, and system scalability. The findings offer concrete insights for designing the next generation of effective gamified learning tools, based on data and software engineering processes.</description>
	<pubDate>2026-04-09</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 235: Adaptive Architectures for Gamified Learning in Software Engineering: A Systematic Review</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/4/235">doi: 10.3390/computers15040235</a></p>
	<p>Authors:
		Aurora Annamaria Quartulli
		Giovanni Mignogna
		Vera Zizzo
		Marina Mongiello
		</p>
	<p>Effective software engineering education today requires tools that adapt to individual learner proficiency and progress, while ensuring positive student engagement. Gamified platforms represent an effective approach to learning and maintaining motivation, but their efficacy depends on a robust underlying architecture. This systematic literature review analyzes state-of-the-art artificial intelligence (AI)-based adaptive architectures designed to support gamified learning tools, highlighting their architectural models (such as intelligent tutoring systems, multi-agent systems, and immersive virtual reality/augmented reality environments), adaptation mechanisms (including Generative AI and chatbots), and personalization strategies. A significant focus is placed on Process Mining and Learning Analytics as methodological approaches to organize learning paths and guide dynamic adaptation based on student behavior. The results of the selected studies demonstrate advantages such as increased engagement, longer-term participation, and personalized learning pace. However, challenges remain, such as common assessment criteria, integrating different technologies, and system scalability. The findings offer concrete insights for designing the next generation of effective gamified learning tools, based on data and software engineering processes.</p>
	]]></content:encoded>

	<dc:title>Adaptive Architectures for Gamified Learning in Software Engineering: A Systematic Review</dc:title>
			<dc:creator>Aurora Annamaria Quartulli</dc:creator>
			<dc:creator>Giovanni Mignogna</dc:creator>
			<dc:creator>Vera Zizzo</dc:creator>
			<dc:creator>Marina Mongiello</dc:creator>
		<dc:identifier>doi: 10.3390/computers15040235</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-04-09</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-04-09</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Review</prism:section>
	<prism:startingPage>235</prism:startingPage>
		<prism:doi>10.3390/computers15040235</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/4/235</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/4/234">

	<title>Computers, Vol. 15, Pages 234: Distilling Vision Foundation Models into LiDAR Networks via Manifold-Aware Topological Alignment</title>
	<link>https://www.mdpi.com/2073-431X/15/4/234</link>
	<description>LiDAR point cloud semantic segmentation is essential for autonomous driving, yet LiDAR-only methods remain constrained by sparsity and limited texture cues. We propose Cross-Modal Collaborative Manifold Distillation (CMCMD), which transfers open-world semantic priors from the DINOv3 Vision Foundation Model to a LiDAR student network. The framework combines an Adaptive Relation Convolution (ARConv) backbone with geometry-conditioned aggregation, a Unified Bidirectional Mapping Module (UBMM) for explicit 2D&amp;amp;ndash;3D interaction, and Manifold-Aware Topological Distillation (MATD), which aligns inter-sample affinity structures in a shared latent manifold rather than enforcing pointwise feature matching. By preserving relational topology instead of absolute feature coordinates, CMCMD mitigates negative transfer across heterogeneous modalities. Experiments on SemanticKITTI and nuScenes yield mIoU values of 72.9% and 81.2%, respectively, surpassing the compared distillation baselines and approaching the performance of multimodal fusion methods at lower inference cost. Additional evaluation on real-world campus scenes further supports the cross-domain robustness of the proposed framework.</description>
	<pubDate>2026-04-09</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 234: Distilling Vision Foundation Models into LiDAR Networks via Manifold-Aware Topological Alignment</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/4/234">doi: 10.3390/computers15040234</a></p>
	<p>Authors:
		Yuchuan Yang
		Xiaosu Xu
		</p>
	<p>LiDAR point cloud semantic segmentation is essential for autonomous driving, yet LiDAR-only methods remain constrained by sparsity and limited texture cues. We propose Cross-Modal Collaborative Manifold Distillation (CMCMD), which transfers open-world semantic priors from the DINOv3 Vision Foundation Model to a LiDAR student network. The framework combines an Adaptive Relation Convolution (ARConv) backbone with geometry-conditioned aggregation, a Unified Bidirectional Mapping Module (UBMM) for explicit 2D&amp;amp;ndash;3D interaction, and Manifold-Aware Topological Distillation (MATD), which aligns inter-sample affinity structures in a shared latent manifold rather than enforcing pointwise feature matching. By preserving relational topology instead of absolute feature coordinates, CMCMD mitigates negative transfer across heterogeneous modalities. Experiments on SemanticKITTI and nuScenes yield mIoU values of 72.9% and 81.2%, respectively, surpassing the compared distillation baselines and approaching the performance of multimodal fusion methods at lower inference cost. Additional evaluation on real-world campus scenes further supports the cross-domain robustness of the proposed framework.</p>
	]]></content:encoded>

	<dc:title>Distilling Vision Foundation Models into LiDAR Networks via Manifold-Aware Topological Alignment</dc:title>
			<dc:creator>Yuchuan Yang</dc:creator>
			<dc:creator>Xiaosu Xu</dc:creator>
		<dc:identifier>doi: 10.3390/computers15040234</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-04-09</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-04-09</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>234</prism:startingPage>
		<prism:doi>10.3390/computers15040234</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/4/234</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/4/233">

	<title>Computers, Vol. 15, Pages 233: Adversarial Robustness in Quantum Machine Learning: A Scoping Review</title>
	<link>https://www.mdpi.com/2073-431X/15/4/233</link>
	<description>Quantum machine learning (QML) is emerging as a promising paradigm at the intersection of quantum computing and artificial intelligence, yet its security under adversarial conditions remains insufficiently understood. This scoping review aims to systematically map empirical research on adversarial robustness in QML and to identify dominant threat models, defense strategies, evaluation approaches, practical constraints, and future research directions. Following PRISMA-ScR guidelines, four major databases were searched, resulting in 53 eligible empirical studies published between 2020 and 2026. The findings show that most research concentrates on input-level evasion attacks, particularly adversarial examples, and primarily evaluates robustness in classification-oriented models such as variational quantum circuits and quantum neural networks. Defense strategies are largely adapted from classical adversarial training and noise-based mitigation, with limited deployment on real quantum hardware. Robustness assessment is predominantly empirical, relying on accuracy degradation and attack success rate, while formal certification methods remain less common. The literature also highlights substantial constraints related to hardware limitations, NISQ noise, computational cost, and dataset scale. Overall, the evidence indicates that adversarial robustness research in QML is expanding but remains methodologically concentrated, underscoring the need for standardized benchmarking, scalable defenses, and hardware-validated robustness evaluation frameworks.</description>
	<pubDate>2026-04-09</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 233: Adversarial Robustness in Quantum Machine Learning: A Scoping Review</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/4/233">doi: 10.3390/computers15040233</a></p>
	<p>Authors:
		Yanche Ari Kustiawan
		Khairil Imran Ghauth
		</p>
	<p>Quantum machine learning (QML) is emerging as a promising paradigm at the intersection of quantum computing and artificial intelligence, yet its security under adversarial conditions remains insufficiently understood. This scoping review aims to systematically map empirical research on adversarial robustness in QML and to identify dominant threat models, defense strategies, evaluation approaches, practical constraints, and future research directions. Following PRISMA-ScR guidelines, four major databases were searched, resulting in 53 eligible empirical studies published between 2020 and 2026. The findings show that most research concentrates on input-level evasion attacks, particularly adversarial examples, and primarily evaluates robustness in classification-oriented models such as variational quantum circuits and quantum neural networks. Defense strategies are largely adapted from classical adversarial training and noise-based mitigation, with limited deployment on real quantum hardware. Robustness assessment is predominantly empirical, relying on accuracy degradation and attack success rate, while formal certification methods remain less common. The literature also highlights substantial constraints related to hardware limitations, NISQ noise, computational cost, and dataset scale. Overall, the evidence indicates that adversarial robustness research in QML is expanding but remains methodologically concentrated, underscoring the need for standardized benchmarking, scalable defenses, and hardware-validated robustness evaluation frameworks.</p>
	]]></content:encoded>

	<dc:title>Adversarial Robustness in Quantum Machine Learning: A Scoping Review</dc:title>
			<dc:creator>Yanche Ari Kustiawan</dc:creator>
			<dc:creator>Khairil Imran Ghauth</dc:creator>
		<dc:identifier>doi: 10.3390/computers15040233</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-04-09</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-04-09</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Review</prism:section>
	<prism:startingPage>233</prism:startingPage>
		<prism:doi>10.3390/computers15040233</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/4/233</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/4/232">

	<title>Computers, Vol. 15, Pages 232: Autoencoders in Natural Language Processing: A Comprehensive Review</title>
	<link>https://www.mdpi.com/2073-431X/15/4/232</link>
	<description>Autoencoder-based models have become a fundamental component of unsupervised and self-supervised learning in natural language processing (NLP), enabling models to learn compact latent representations through input reconstruction. From early denoising autoencoders to probabilistic variational autoencoders (VAEs) and transformer-based masked autoencoding, reconstruction-driven objectives have played a significant role in shaping modern approaches to text representation and generation. This review provides a comprehensive analysis of the evolution of autoencoder architectures and training objectives in NLP, and synthesizes applications of VAEs across language modeling, controllable text generation, machine translation, sentiment modeling, and multilingual representation learning. Although previous surveys have examined deep generative models or representation learning in NLP, there remains a lack of a unified review that systematically connects classical autoencoder variants, variational formulations, and modern transformer-based masked autoencoders within a single conceptual framework. To address this gap, this work consolidates architectural developments, training objectives, and major application domains under a reconstruction-based learning perspective, offering a structured comparison of modeling choices, datasets, and evaluation practices. Our analysis highlights the strengths and limitations of existing approaches, discusses the ongoing influence of autoencoder-style learning in NLP, and outlines future research directions focused on improving training stability, designing more structured latent spaces, and enhancing multilingual representation learning.</description>
	<pubDate>2026-04-08</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 232: Autoencoders in Natural Language Processing: A Comprehensive Review</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/4/232">doi: 10.3390/computers15040232</a></p>
	<p>Authors:
		Moussa Redah
		Wasfi G. Al-Khatib
		</p>
	<p>Autoencoder-based models have become a fundamental component of unsupervised and self-supervised learning in natural language processing (NLP), enabling models to learn compact latent representations through input reconstruction. From early denoising autoencoders to probabilistic variational autoencoders (VAEs) and transformer-based masked autoencoding, reconstruction-driven objectives have played a significant role in shaping modern approaches to text representation and generation. This review provides a comprehensive analysis of the evolution of autoencoder architectures and training objectives in NLP, and synthesizes applications of VAEs across language modeling, controllable text generation, machine translation, sentiment modeling, and multilingual representation learning. Although previous surveys have examined deep generative models or representation learning in NLP, there remains a lack of a unified review that systematically connects classical autoencoder variants, variational formulations, and modern transformer-based masked autoencoders within a single conceptual framework. To address this gap, this work consolidates architectural developments, training objectives, and major application domains under a reconstruction-based learning perspective, offering a structured comparison of modeling choices, datasets, and evaluation practices. Our analysis highlights the strengths and limitations of existing approaches, discusses the ongoing influence of autoencoder-style learning in NLP, and outlines future research directions focused on improving training stability, designing more structured latent spaces, and enhancing multilingual representation learning.</p>
	]]></content:encoded>

	<dc:title>Autoencoders in Natural Language Processing: A Comprehensive Review</dc:title>
			<dc:creator>Moussa Redah</dc:creator>
			<dc:creator>Wasfi G. Al-Khatib</dc:creator>
		<dc:identifier>doi: 10.3390/computers15040232</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-04-08</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-04-08</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Review</prism:section>
	<prism:startingPage>232</prism:startingPage>
		<prism:doi>10.3390/computers15040232</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/4/232</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/4/231">

	<title>Computers, Vol. 15, Pages 231: Artificial Intelligence for High-Availability Systems: A Comprehensive Review</title>
	<link>https://www.mdpi.com/2073-431X/15/4/231</link>
	<description>High-availability (HA) systems&amp;amp;mdash;essential in many contemporary contexts&amp;amp;mdash;are designed to guarantee the availability of processes and data for more than 99% of their operational time. These systems are typically implemented as Cloud/Edge infrastructures that are properly maintained by human operators and intelligent agents in order to guarantee the required level of availability. Moreover, we are witnessing the widespread adoption of AI-based automation across many industries. AI-based software agents are increasingly being adopted to introduce more automation in highly available systems, particularly for monitoring and fault detection, fault prediction, recovery, and optimization processes. In this review paper, we discuss the state of the art of AI-based solutions for HA systems. In particular, we focus on the use of AI for the core operational mechanisms of monitoring, failure detection, and recovery. Our discussion begins by reviewing a few key background concepts of HA architectures, then we review recent work on AI-based solutions for monitoring, fault detection and recovery in HA systems.</description>
	<pubDate>2026-04-08</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 231: Artificial Intelligence for High-Availability Systems: A Comprehensive Review</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/4/231">doi: 10.3390/computers15040231</a></p>
	<p>Authors:
		Lidia Fotia
		Rosario Gaeta
		Fabrizio Messina
		Domenico Rosaci
		Giuseppe M. L. Sarné
		</p>
	<p>High-availability (HA) systems&amp;amp;mdash;essential in many contemporary contexts&amp;amp;mdash;are designed to guarantee the availability of processes and data for more than 99% of their operational time. These systems are typically implemented as Cloud/Edge infrastructures that are properly maintained by human operators and intelligent agents in order to guarantee the required level of availability. Moreover, we are witnessing the widespread adoption of AI-based automation across many industries. AI-based software agents are increasingly being adopted to introduce more automation in highly available systems, particularly for monitoring and fault detection, fault prediction, recovery, and optimization processes. In this review paper, we discuss the state of the art of AI-based solutions for HA systems. In particular, we focus on the use of AI for the core operational mechanisms of monitoring, failure detection, and recovery. Our discussion begins by reviewing a few key background concepts of HA architectures, then we review recent work on AI-based solutions for monitoring, fault detection and recovery in HA systems.</p>
	]]></content:encoded>

	<dc:title>Artificial Intelligence for High-Availability Systems: A Comprehensive Review</dc:title>
			<dc:creator>Lidia Fotia</dc:creator>
			<dc:creator>Rosario Gaeta</dc:creator>
			<dc:creator>Fabrizio Messina</dc:creator>
			<dc:creator>Domenico Rosaci</dc:creator>
			<dc:creator>Giuseppe M. L. Sarné</dc:creator>
		<dc:identifier>doi: 10.3390/computers15040231</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-04-08</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-04-08</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Review</prism:section>
	<prism:startingPage>231</prism:startingPage>
		<prism:doi>10.3390/computers15040231</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/4/231</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/4/230">

	<title>Computers, Vol. 15, Pages 230: An Enhanced YOLOv8n-Based Approach for Pig Behavior Recognition</title>
	<link>https://www.mdpi.com/2073-431X/15/4/230</link>
	<description>Pig behavior statistics can reflect their health status. Conventional approaches depend on manual observation to derive behavioral information from video recordings, a process that demands substantial time and human effort. To overcome these limitations in indoor intensive farming environments, this study introduces an effective approach for recognizing pig behaviors, employing an enhanced YOLOv8n architecture. The approach utilizes advanced object detection algorithms to automatically identify pig behaviors, including stand, lie, eat, fight, and tail-bite, from overhead video footage of the enclosure. First, images of daily pig behaviors are collected using cameras to build a pig behavior dataset. To boost detection accuracy, the SE attention mechanism is embedded within the feature extraction backbone of the YOLOv8n network to enhance its representational capacity, strengthening the model&amp;amp;rsquo;s capacity to grasp overarching contextual information and improve the expressiveness of extracted features. The GIoU loss function is employed during training to reduce computational cost and accelerate model convergence. Moreover, integrating Ghost convolution into the backbone significantly reduces both computational complexity and the total number of parameters. The experimental findings reveal that the optimized YOLOv8n model contains just 1.71 million parameters, marking a 42.93% reduction relative to the baseline model. Its floating-point operations total 5.0 billion, indicating a 38.27% decrease, while the mean average precision (mAP@50) reaches 96.8%, surpassing the original by 2.6 percentage points. Compared with other widely used YOLO-based object detection frameworks, the proposed approach achieves notably higher accuracy while requiring significantly lower computational resources and model complexity.</description>
	<pubDate>2026-04-08</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 230: An Enhanced YOLOv8n-Based Approach for Pig Behavior Recognition</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/4/230">doi: 10.3390/computers15040230</a></p>
	<p>Authors:
		Jianjun Guo
		Yudian Xu
		Lijun Lin
		Beibei Zhang
		Piao Zhou
		Shangwen Luo
		Yuhan Zhuo
		Jingyu Ji
		Zhijie Luo
		Guangming Cheng
		</p>
	<p>Pig behavior statistics can reflect their health status. Conventional approaches depend on manual observation to derive behavioral information from video recordings, a process that demands substantial time and human effort. To overcome these limitations in indoor intensive farming environments, this study introduces an effective approach for recognizing pig behaviors, employing an enhanced YOLOv8n architecture. The approach utilizes advanced object detection algorithms to automatically identify pig behaviors, including stand, lie, eat, fight, and tail-bite, from overhead video footage of the enclosure. First, images of daily pig behaviors are collected using cameras to build a pig behavior dataset. To boost detection accuracy, the SE attention mechanism is embedded within the feature extraction backbone of the YOLOv8n network to enhance its representational capacity, strengthening the model&amp;amp;rsquo;s capacity to grasp overarching contextual information and improve the expressiveness of extracted features. The GIoU loss function is employed during training to reduce computational cost and accelerate model convergence. Moreover, integrating Ghost convolution into the backbone significantly reduces both computational complexity and the total number of parameters. The experimental findings reveal that the optimized YOLOv8n model contains just 1.71 million parameters, marking a 42.93% reduction relative to the baseline model. Its floating-point operations total 5.0 billion, indicating a 38.27% decrease, while the mean average precision (mAP@50) reaches 96.8%, surpassing the original by 2.6 percentage points. Compared with other widely used YOLO-based object detection frameworks, the proposed approach achieves notably higher accuracy while requiring significantly lower computational resources and model complexity.</p>
	]]></content:encoded>

	<dc:title>An Enhanced YOLOv8n-Based Approach for Pig Behavior Recognition</dc:title>
			<dc:creator>Jianjun Guo</dc:creator>
			<dc:creator>Yudian Xu</dc:creator>
			<dc:creator>Lijun Lin</dc:creator>
			<dc:creator>Beibei Zhang</dc:creator>
			<dc:creator>Piao Zhou</dc:creator>
			<dc:creator>Shangwen Luo</dc:creator>
			<dc:creator>Yuhan Zhuo</dc:creator>
			<dc:creator>Jingyu Ji</dc:creator>
			<dc:creator>Zhijie Luo</dc:creator>
			<dc:creator>Guangming Cheng</dc:creator>
		<dc:identifier>doi: 10.3390/computers15040230</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-04-08</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-04-08</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>230</prism:startingPage>
		<prism:doi>10.3390/computers15040230</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/4/230</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/4/229">

	<title>Computers, Vol. 15, Pages 229: From Virtual Worlds to Real-World Equity: A Scoping Review of the Metaverse as Computer-Assisted Learning for STEM Competencies</title>
	<link>https://www.mdpi.com/2073-431X/15/4/229</link>
	<description>This scoping review critically synthesizes 34 studies (2015&amp;amp;ndash;2026) examining the metaverse&amp;amp;rsquo;s role in fostering six core STEM competencies, moving beyond descriptive reporting to interrogate whether these technologies constitute genuine pedagogical transformation, whose learners are served or excluded, and how isolated interventions connect into lifelong learning pathways. Following PRISMA-ScR guidelines, our analysis reveals that while technology literacy and collaboration appear in 91.2% of our selected studies, mathematical application is addressed in fewer than half (44.1%), raising unanswered questions about whether this pattern reflects an equitable distribution of mathematical learning opportunities across diverse learner populations&amp;amp;mdash;a question the current evidence base cannot answer but one that warrants urgent investigation. The evidence demonstrates substantial immediate learning gains through embodied presence and risk-free experimentation, yet a deeper reading suggests this often represents technological optimization of traditional goals rather than epistemological transformation. More troublingly, the concentration of inclusivity evidence on select populations&amp;amp;mdash;while rendering students with physical disabilities, Indigenous learners, and refugee students entirely invisible&amp;amp;mdash;reveals an equity paradox where immersive technologies may inadvertently amplify existing disparities. The absence of any longitudinal data linking short-term engagement to sustained STEM participation leaves the field&amp;amp;rsquo;s claim to transformative impact unsubstantiated. This review argues for moving beyond fragmented interventions toward designing coherent, equitable learning pathways that fulfill the metaverse&amp;amp;rsquo;s potential for all learners.</description>
	<pubDate>2026-04-07</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 229: From Virtual Worlds to Real-World Equity: A Scoping Review of the Metaverse as Computer-Assisted Learning for STEM Competencies</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/4/229">doi: 10.3390/computers15040229</a></p>
	<p>Authors:
		Franklin Parrales-Bravo
		Roberto Tolozano-Benites
		Janio Jadán-Guerrero
		Leonel Vasquez-Cevallos
		Víctor Gómez-Rodríguez
		</p>
	<p>This scoping review critically synthesizes 34 studies (2015&amp;amp;ndash;2026) examining the metaverse&amp;amp;rsquo;s role in fostering six core STEM competencies, moving beyond descriptive reporting to interrogate whether these technologies constitute genuine pedagogical transformation, whose learners are served or excluded, and how isolated interventions connect into lifelong learning pathways. Following PRISMA-ScR guidelines, our analysis reveals that while technology literacy and collaboration appear in 91.2% of our selected studies, mathematical application is addressed in fewer than half (44.1%), raising unanswered questions about whether this pattern reflects an equitable distribution of mathematical learning opportunities across diverse learner populations&amp;amp;mdash;a question the current evidence base cannot answer but one that warrants urgent investigation. The evidence demonstrates substantial immediate learning gains through embodied presence and risk-free experimentation, yet a deeper reading suggests this often represents technological optimization of traditional goals rather than epistemological transformation. More troublingly, the concentration of inclusivity evidence on select populations&amp;amp;mdash;while rendering students with physical disabilities, Indigenous learners, and refugee students entirely invisible&amp;amp;mdash;reveals an equity paradox where immersive technologies may inadvertently amplify existing disparities. The absence of any longitudinal data linking short-term engagement to sustained STEM participation leaves the field&amp;amp;rsquo;s claim to transformative impact unsubstantiated. This review argues for moving beyond fragmented interventions toward designing coherent, equitable learning pathways that fulfill the metaverse&amp;amp;rsquo;s potential for all learners.</p>
	]]></content:encoded>

	<dc:title>From Virtual Worlds to Real-World Equity: A Scoping Review of the Metaverse as Computer-Assisted Learning for STEM Competencies</dc:title>
			<dc:creator>Franklin Parrales-Bravo</dc:creator>
			<dc:creator>Roberto Tolozano-Benites</dc:creator>
			<dc:creator>Janio Jadán-Guerrero</dc:creator>
			<dc:creator>Leonel Vasquez-Cevallos</dc:creator>
			<dc:creator>Víctor Gómez-Rodríguez</dc:creator>
		<dc:identifier>doi: 10.3390/computers15040229</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-04-07</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-04-07</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Systematic Review</prism:section>
	<prism:startingPage>229</prism:startingPage>
		<prism:doi>10.3390/computers15040229</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/4/229</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/4/228">

	<title>Computers, Vol. 15, Pages 228: Modified Shamir Threshold Scheme for Secure Storage of Biometric Data</title>
	<link>https://www.mdpi.com/2073-431X/15/4/228</link>
	<description>The security of biometric data is a critical challenge in modern information security due to their uniqueness and non-revocability. Compromise of biometric characteristics leads to irreversible consequences; therefore, storing or transmitting them in plaintext is unacceptable. This paper addresses the confidentiality and integrity of fingerprint data using cryptographic protection methods. Considering the specific nature of biometrics, fingerprint features are used only to generate a cryptographic secret rather than being stored directly. To protect the derived secret, a modified threshold secret-sharing scheme based on non-positional polynomial notation and the Chinese Remainder Theorem is proposed. The method generates a cryptographic secret from fingerprint minutiae described by spatial coordinates and ridge orientation. Concatenating minutiae coordinates and converting them into binary form produces a unique value deterministically linked to a specific user. Compared to the classical Shamir scheme, the modified scheme reduces the computational complexity of secret reconstruction from O(n&amp;amp;nbsp;log2n) to O(k log k), decreases data storage requirements by 30&amp;amp;ndash;40% through compact polynomial remainders, and increases successful secret reconstruction by 12&amp;amp;ndash;15% in the presence of noise in biometric samples. The results show that the proposed algorithm can be effectively applied in biometric authentication systems to protect personal data in distributed environments. Security analysis confirms resistance to major attack classes and demonstrates practical applicability in real-world systems.</description>
	<pubDate>2026-04-07</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 228: Modified Shamir Threshold Scheme for Secure Storage of Biometric Data</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/4/228">doi: 10.3390/computers15040228</a></p>
	<p>Authors:
		Saule Nyssanbayeva
		Nursulu Kapalova
		Saltanat Beisenova
		</p>
	<p>The security of biometric data is a critical challenge in modern information security due to their uniqueness and non-revocability. Compromise of biometric characteristics leads to irreversible consequences; therefore, storing or transmitting them in plaintext is unacceptable. This paper addresses the confidentiality and integrity of fingerprint data using cryptographic protection methods. Considering the specific nature of biometrics, fingerprint features are used only to generate a cryptographic secret rather than being stored directly. To protect the derived secret, a modified threshold secret-sharing scheme based on non-positional polynomial notation and the Chinese Remainder Theorem is proposed. The method generates a cryptographic secret from fingerprint minutiae described by spatial coordinates and ridge orientation. Concatenating minutiae coordinates and converting them into binary form produces a unique value deterministically linked to a specific user. Compared to the classical Shamir scheme, the modified scheme reduces the computational complexity of secret reconstruction from O(n&amp;amp;nbsp;log2n) to O(k log k), decreases data storage requirements by 30&amp;amp;ndash;40% through compact polynomial remainders, and increases successful secret reconstruction by 12&amp;amp;ndash;15% in the presence of noise in biometric samples. The results show that the proposed algorithm can be effectively applied in biometric authentication systems to protect personal data in distributed environments. Security analysis confirms resistance to major attack classes and demonstrates practical applicability in real-world systems.</p>
	]]></content:encoded>

	<dc:title>Modified Shamir Threshold Scheme for Secure Storage of Biometric Data</dc:title>
			<dc:creator>Saule Nyssanbayeva</dc:creator>
			<dc:creator>Nursulu Kapalova</dc:creator>
			<dc:creator>Saltanat Beisenova</dc:creator>
		<dc:identifier>doi: 10.3390/computers15040228</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-04-07</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-04-07</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>228</prism:startingPage>
		<prism:doi>10.3390/computers15040228</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/4/228</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/4/227">

	<title>Computers, Vol. 15, Pages 227: PlanProjU: A BPMN-to-HDDL HTN Planning Approach for University Project Execution</title>
	<link>https://www.mdpi.com/2073-431X/15/4/227</link>
	<description>This study aims to automate the generation of execution plans for university projects by transforming BPMN-based process models into hierarchical planning representations that can be executed by HTN planners. Effective implementation of university extension projects requires explicit management of objectives, dependencies, and operational constraints, yet this process is often carried out manually and without formal planning support. To address this problem, the paper proposes PlanProjU, a web-based platform that captures project knowledge through BPMN and translates it into HDDL domain and problem files for execution with SHOP2 and PyHOP. The system was evaluated through real university project cases and a comparative analysis of alternative generated plans. The results show that BPMN-based project knowledge can be operationalized into executable hierarchical planning structures and that different planners may produce distinct plan alternatives depending on project characteristics. The originality of the study lies in the design of a traceable BPMN-to-HDDL workflow for university project planning, implemented in an integrated platform that connects business process modeling with HTN automated planning the originality of the study lies in the design of a traceable BPMN-to-HDDL workflow for university project planning, implemented in an integrated platform that connects business process modeling with HTN automated planning in a domain that has received limited attention in prior research. In this sense, the proposal serves both as an innovative research contribution and as a practical alternative for structuring implementation decisions in institutional settings.</description>
	<pubDate>2026-04-07</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 227: PlanProjU: A BPMN-to-HDDL HTN Planning Approach for University Project Execution</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/4/227">doi: 10.3390/computers15040227</a></p>
	<p>Authors:
		Jhon Wilder Sanchez-Obando
		Néstor Dario Duque-Méndez
		Luis Fernando Castillo-Ossa
		</p>
	<p>This study aims to automate the generation of execution plans for university projects by transforming BPMN-based process models into hierarchical planning representations that can be executed by HTN planners. Effective implementation of university extension projects requires explicit management of objectives, dependencies, and operational constraints, yet this process is often carried out manually and without formal planning support. To address this problem, the paper proposes PlanProjU, a web-based platform that captures project knowledge through BPMN and translates it into HDDL domain and problem files for execution with SHOP2 and PyHOP. The system was evaluated through real university project cases and a comparative analysis of alternative generated plans. The results show that BPMN-based project knowledge can be operationalized into executable hierarchical planning structures and that different planners may produce distinct plan alternatives depending on project characteristics. The originality of the study lies in the design of a traceable BPMN-to-HDDL workflow for university project planning, implemented in an integrated platform that connects business process modeling with HTN automated planning the originality of the study lies in the design of a traceable BPMN-to-HDDL workflow for university project planning, implemented in an integrated platform that connects business process modeling with HTN automated planning in a domain that has received limited attention in prior research. In this sense, the proposal serves both as an innovative research contribution and as a practical alternative for structuring implementation decisions in institutional settings.</p>
	]]></content:encoded>

	<dc:title>PlanProjU: A BPMN-to-HDDL HTN Planning Approach for University Project Execution</dc:title>
			<dc:creator>Jhon Wilder Sanchez-Obando</dc:creator>
			<dc:creator>Néstor Dario Duque-Méndez</dc:creator>
			<dc:creator>Luis Fernando Castillo-Ossa</dc:creator>
		<dc:identifier>doi: 10.3390/computers15040227</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-04-07</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-04-07</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>227</prism:startingPage>
		<prism:doi>10.3390/computers15040227</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/4/227</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/4/226">

	<title>Computers, Vol. 15, Pages 226: A Survey on Large Language Models in Software Security: Opportunities and Threats</title>
	<link>https://www.mdpi.com/2073-431X/15/4/226</link>
	<description>The rise of large language models (LLMs), such as GPT-4, Codex, Code Llama, Claude 3, CodeGemma and DeepSeek, etc., is changing the way software development is approached. These models provide strong support for tasks like writing codes, analyzing bugs, and automation. At the same time, their use in software development creates both opportunities and new risks. This survey reviews how LLMs are being used to improve security practices in software development, including vulnerability detection, secure code generation, threat analysis, and patch development. It also discusses how attackers may exploit LLMs for malicious purposes, such as writing malware, carrying out phishing campaigns, or bypassing defenses. We draw on case studies that show LLMs can help uncover zero-day vulnerabilities and speed up secure coding but also highlight cases where they have been misused to generate harmful code, sometimes unintentionally. The paper examines technical challenges like bias in training data, the difficulty of interpreting model outputs, and the risks of adversarial attacks. It also considers ethical and regulatory issues related to accountability, compliance, and responsible use. By bringing together findings from recent research and industry practice, the survey outlines future directions for building safer models, developing stronger defensive frameworks, and shaping policies that balance innovation with security. Overall, the paper argues for a careful approach where LLMs are used to strengthen software security while addressing the risks they introduce through collaboration, oversight, and ongoing improvements.</description>
	<pubDate>2026-04-03</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 226: A Survey on Large Language Models in Software Security: Opportunities and Threats</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/4/226">doi: 10.3390/computers15040226</a></p>
	<p>Authors:
		Md Bajlur Rashid
		Mohammad Shafayet Jamil Hossain
		Mohammad Ishtiaque Khan
		Sharaban Tahora
		Aiasha Siddika
		Mahmudul Islam Prakash
		Sharmin Yeasmin
		Hossain Shahriar
		</p>
	<p>The rise of large language models (LLMs), such as GPT-4, Codex, Code Llama, Claude 3, CodeGemma and DeepSeek, etc., is changing the way software development is approached. These models provide strong support for tasks like writing codes, analyzing bugs, and automation. At the same time, their use in software development creates both opportunities and new risks. This survey reviews how LLMs are being used to improve security practices in software development, including vulnerability detection, secure code generation, threat analysis, and patch development. It also discusses how attackers may exploit LLMs for malicious purposes, such as writing malware, carrying out phishing campaigns, or bypassing defenses. We draw on case studies that show LLMs can help uncover zero-day vulnerabilities and speed up secure coding but also highlight cases where they have been misused to generate harmful code, sometimes unintentionally. The paper examines technical challenges like bias in training data, the difficulty of interpreting model outputs, and the risks of adversarial attacks. It also considers ethical and regulatory issues related to accountability, compliance, and responsible use. By bringing together findings from recent research and industry practice, the survey outlines future directions for building safer models, developing stronger defensive frameworks, and shaping policies that balance innovation with security. Overall, the paper argues for a careful approach where LLMs are used to strengthen software security while addressing the risks they introduce through collaboration, oversight, and ongoing improvements.</p>
	]]></content:encoded>

	<dc:title>A Survey on Large Language Models in Software Security: Opportunities and Threats</dc:title>
			<dc:creator>Md Bajlur Rashid</dc:creator>
			<dc:creator>Mohammad Shafayet Jamil Hossain</dc:creator>
			<dc:creator>Mohammad Ishtiaque Khan</dc:creator>
			<dc:creator>Sharaban Tahora</dc:creator>
			<dc:creator>Aiasha Siddika</dc:creator>
			<dc:creator>Mahmudul Islam Prakash</dc:creator>
			<dc:creator>Sharmin Yeasmin</dc:creator>
			<dc:creator>Hossain Shahriar</dc:creator>
		<dc:identifier>doi: 10.3390/computers15040226</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-04-03</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-04-03</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>226</prism:startingPage>
		<prism:doi>10.3390/computers15040226</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/4/226</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/4/225">

	<title>Computers, Vol. 15, Pages 225: Proximity-Aware VM Placement in Multi-Layer Fog Computing for Efficient Resource Management: Performance Evaluation Under a Gaming Application Scenario</title>
	<link>https://www.mdpi.com/2073-431X/15/4/225</link>
	<description>The rapid proliferation of mobile devices, particularly smartphones and tablets, has transformed digital entertainment, with mobile gaming emerging as one of the fastest-growing digital segments. Such applications are inherently latency-sensitive and require effective resource management and seamless mobility support. To overcome these issues, this paper suggests a four-layered infrastructure that combines edge, fog, and cloud computing with Software-Defined Networking (SDN) and is assisted by a lightweight proximity-aware heuristic placement strategy and mobility management. The suggested structure follows a microservices contained breakdown of the gaming functionality and uses clustering algorithms to permit coordinated access to resources by edge and fog nodes. A dynamic lightweight proximity-aware virtual machine placement algorithm is presented to deploy application modules nearer to the users depending on the availability and mobility of the resources. The proposed work is simulated using IFogSim2. The proposed model reduces the latency by up to 73 percent and the rate of task completion by 25 percent relative to baseline configurations in the case of dynamic mobility of users. These results indicate that the suggested strategy can be effective in improving the latency-sensitive mobile gaming applications performance in the edge-fog networks.</description>
	<pubDate>2026-04-03</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 225: Proximity-Aware VM Placement in Multi-Layer Fog Computing for Efficient Resource Management: Performance Evaluation Under a Gaming Application Scenario</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/4/225">doi: 10.3390/computers15040225</a></p>
	<p>Authors:
		Sreebha Bhaskaran
		Supriya Muthuraman
		</p>
	<p>The rapid proliferation of mobile devices, particularly smartphones and tablets, has transformed digital entertainment, with mobile gaming emerging as one of the fastest-growing digital segments. Such applications are inherently latency-sensitive and require effective resource management and seamless mobility support. To overcome these issues, this paper suggests a four-layered infrastructure that combines edge, fog, and cloud computing with Software-Defined Networking (SDN) and is assisted by a lightweight proximity-aware heuristic placement strategy and mobility management. The suggested structure follows a microservices contained breakdown of the gaming functionality and uses clustering algorithms to permit coordinated access to resources by edge and fog nodes. A dynamic lightweight proximity-aware virtual machine placement algorithm is presented to deploy application modules nearer to the users depending on the availability and mobility of the resources. The proposed work is simulated using IFogSim2. The proposed model reduces the latency by up to 73 percent and the rate of task completion by 25 percent relative to baseline configurations in the case of dynamic mobility of users. These results indicate that the suggested strategy can be effective in improving the latency-sensitive mobile gaming applications performance in the edge-fog networks.</p>
	]]></content:encoded>

	<dc:title>Proximity-Aware VM Placement in Multi-Layer Fog Computing for Efficient Resource Management: Performance Evaluation Under a Gaming Application Scenario</dc:title>
			<dc:creator>Sreebha Bhaskaran</dc:creator>
			<dc:creator>Supriya Muthuraman</dc:creator>
		<dc:identifier>doi: 10.3390/computers15040225</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-04-03</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-04-03</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>225</prism:startingPage>
		<prism:doi>10.3390/computers15040225</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/4/225</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/4/224">

	<title>Computers, Vol. 15, Pages 224: Enhancing Polynomial Multiplication in Post-Quantum Cryptography for IoT Applications: A Hybrid Serial&amp;ndash;Parallel Systolic Architecture</title>
	<link>https://www.mdpi.com/2073-431X/15/4/224</link>
	<description>The rapid growth of the Internet of Things (IoT) is fundamentally altering industrial and economic landscapes by embedding smart, connected devices into everyday operations. Despite these benefits, significant concerns regarding data protection and user privacy continue to obstruct the widespread use of these technologies, particularly with the looming threat of quantum computing. Implementing post-quantum cryptographic (PQC) solutions is vital for addressing these risks, yet the limited resources found in IoT edge devices present major deployment challenges. Lattice-based cryptography has become a leading solution to these problems, largely because it depends on efficient polynomial multiplication. Enhancing the execution of this mathematical operation is crucial for improving the overall performance of PQC protocols. In this work, we introduce a hybrid serial&amp;amp;ndash;parallel systolic architecture specifically engineered for polynomial multiplication within the Binary Ring Learning With Errors (BRLWE) scheme. Designed for the security processors used in IoT hardware, this architecture significantly increases processing speeds while minimizing the use of hardware resources and reducing energy consumption. Such improvements are critical for establishing a secure IoT infrastructure that is resilient against quantum-era attacks and capable of supporting industrial expansion. Moreover, this research aligns with global Sustainable Development Goals (SDGs) 8 and 9 by building trust in innovative systems and fostering a more secure, sustainable, and productive digital economy.</description>
	<pubDate>2026-04-03</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 224: Enhancing Polynomial Multiplication in Post-Quantum Cryptography for IoT Applications: A Hybrid Serial&amp;ndash;Parallel Systolic Architecture</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/4/224">doi: 10.3390/computers15040224</a></p>
	<p>Authors:
		Atef Ibrahim
		Fayez Gebali
		</p>
	<p>The rapid growth of the Internet of Things (IoT) is fundamentally altering industrial and economic landscapes by embedding smart, connected devices into everyday operations. Despite these benefits, significant concerns regarding data protection and user privacy continue to obstruct the widespread use of these technologies, particularly with the looming threat of quantum computing. Implementing post-quantum cryptographic (PQC) solutions is vital for addressing these risks, yet the limited resources found in IoT edge devices present major deployment challenges. Lattice-based cryptography has become a leading solution to these problems, largely because it depends on efficient polynomial multiplication. Enhancing the execution of this mathematical operation is crucial for improving the overall performance of PQC protocols. In this work, we introduce a hybrid serial&amp;amp;ndash;parallel systolic architecture specifically engineered for polynomial multiplication within the Binary Ring Learning With Errors (BRLWE) scheme. Designed for the security processors used in IoT hardware, this architecture significantly increases processing speeds while minimizing the use of hardware resources and reducing energy consumption. Such improvements are critical for establishing a secure IoT infrastructure that is resilient against quantum-era attacks and capable of supporting industrial expansion. Moreover, this research aligns with global Sustainable Development Goals (SDGs) 8 and 9 by building trust in innovative systems and fostering a more secure, sustainable, and productive digital economy.</p>
	]]></content:encoded>

	<dc:title>Enhancing Polynomial Multiplication in Post-Quantum Cryptography for IoT Applications: A Hybrid Serial&amp;amp;ndash;Parallel Systolic Architecture</dc:title>
			<dc:creator>Atef Ibrahim</dc:creator>
			<dc:creator>Fayez Gebali</dc:creator>
		<dc:identifier>doi: 10.3390/computers15040224</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-04-03</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-04-03</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>224</prism:startingPage>
		<prism:doi>10.3390/computers15040224</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/4/224</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/4/223">

	<title>Computers, Vol. 15, Pages 223: AGP-GEMM: Adaptive Grouping and Partitioning Framework for Accelerating Small and Irregular Matrices on CPUs</title>
	<link>https://www.mdpi.com/2073-431X/15/4/223</link>
	<description>General Matrix Multiplication (GEMM) is a fundamental computational kernel in scientific computing, serving as the foundation for numerous complex tasks. However, in practical applications, the performance of GEMM is often constrained by irregular matrix dimensions and the diversity of hardware architectures. In particular, when processing small and irregular matrices, GEMM typically exhibits reduced computational efficiency. To address these challenges, this paper proposes a GEMM acceleration method based on an adaptive core grouping strategy. The method consists of two key components: a core grouping mechanism that alleviates workload imbalance among multi-core CPUs, and an adaptive block partitioning algorithm that dynamically selects optimal tiling schemes according to the matrix dimensions, achieving both load balance and cache-friendly data access. Experimental results on the Kunpeng CPU platform demonstrate that the proposed method achieves significant performance improvements compared to the Kunpeng KML math library, reaching a peak acceleration of up to 2.1&amp;amp;times; and an average speedup of 1.64&amp;amp;times;. These results validate the effectiveness and efficiency of the proposed approach in handling small and irregular matrix computation scenarios.</description>
	<pubDate>2026-04-03</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 223: AGP-GEMM: Adaptive Grouping and Partitioning Framework for Accelerating Small and Irregular Matrices on CPUs</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/4/223">doi: 10.3390/computers15040223</a></p>
	<p>Authors:
		Hongzhe Zhou
		Lu Lu
		Haibiao Yang
		Yu Zhang
		</p>
	<p>General Matrix Multiplication (GEMM) is a fundamental computational kernel in scientific computing, serving as the foundation for numerous complex tasks. However, in practical applications, the performance of GEMM is often constrained by irregular matrix dimensions and the diversity of hardware architectures. In particular, when processing small and irregular matrices, GEMM typically exhibits reduced computational efficiency. To address these challenges, this paper proposes a GEMM acceleration method based on an adaptive core grouping strategy. The method consists of two key components: a core grouping mechanism that alleviates workload imbalance among multi-core CPUs, and an adaptive block partitioning algorithm that dynamically selects optimal tiling schemes according to the matrix dimensions, achieving both load balance and cache-friendly data access. Experimental results on the Kunpeng CPU platform demonstrate that the proposed method achieves significant performance improvements compared to the Kunpeng KML math library, reaching a peak acceleration of up to 2.1&amp;amp;times; and an average speedup of 1.64&amp;amp;times;. These results validate the effectiveness and efficiency of the proposed approach in handling small and irregular matrix computation scenarios.</p>
	]]></content:encoded>

	<dc:title>AGP-GEMM: Adaptive Grouping and Partitioning Framework for Accelerating Small and Irregular Matrices on CPUs</dc:title>
			<dc:creator>Hongzhe Zhou</dc:creator>
			<dc:creator>Lu Lu</dc:creator>
			<dc:creator>Haibiao Yang</dc:creator>
			<dc:creator>Yu Zhang</dc:creator>
		<dc:identifier>doi: 10.3390/computers15040223</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-04-03</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-04-03</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>223</prism:startingPage>
		<prism:doi>10.3390/computers15040223</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/4/223</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/4/222">

	<title>Computers, Vol. 15, Pages 222: An Empirical Comparison of Cascade and Direct End-to-End Speech Translation for Low-Resource Language Pair</title>
	<link>https://www.mdpi.com/2073-431X/15/4/222</link>
	<description>Speech-to-text translation (S2TT) for low-resource languages remains challenging due to the scarcity of parallel speech translation data and the susceptibility of modular pipelines to error propagation. This paper presents a controlled empirical comparison of cascade and end-to-end approaches for Kazakh&amp;amp;ndash;Russian speech translation using the ST-kk-ru dataset (&amp;amp;asymp;332 h, 140 k triplets). The cascade framework is strengthened with recent pre-trained models for automatic speech recognition and neural machine translation, achieving 21.3 BLEU on the test set. Three representative end-to-end architectures are evaluated under identical data conditions. The strongest direct model, combining a Wav2Vec 2.0 encoder with an mBART decoder augmented by a length adaptor and adapter modules, reaches 17.97 BLEU, compared with 15.35 BLEU for FAIRSEQ S2T and 16.3 BLEU for ESPnet-ST. Automatic evaluation is complemented by expert manual assessment and targeted linguistic analysis. Results indicate that, under current low-resource conditions, cascade systems provide higher translation accuracy and better morpho-syntactic fidelity, while end-to-end models remain competitive and offer advantages in architectural simplicity and potentially reduced inference latency (due to single-pass processing), although empirical measurements were not conducted in this study. This study establishes a reproducible benchmark for Kazakh&amp;amp;ndash;Russian speech translation and highlights practical trade-offs between modeling paradigms in low-resource, morphologically rich settings.</description>
	<pubDate>2026-04-02</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 222: An Empirical Comparison of Cascade and Direct End-to-End Speech Translation for Low-Resource Language Pair</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/4/222">doi: 10.3390/computers15040222</a></p>
	<p>Authors:
		Zhanibek Kozhirbayev
		</p>
	<p>Speech-to-text translation (S2TT) for low-resource languages remains challenging due to the scarcity of parallel speech translation data and the susceptibility of modular pipelines to error propagation. This paper presents a controlled empirical comparison of cascade and end-to-end approaches for Kazakh&amp;amp;ndash;Russian speech translation using the ST-kk-ru dataset (&amp;amp;asymp;332 h, 140 k triplets). The cascade framework is strengthened with recent pre-trained models for automatic speech recognition and neural machine translation, achieving 21.3 BLEU on the test set. Three representative end-to-end architectures are evaluated under identical data conditions. The strongest direct model, combining a Wav2Vec 2.0 encoder with an mBART decoder augmented by a length adaptor and adapter modules, reaches 17.97 BLEU, compared with 15.35 BLEU for FAIRSEQ S2T and 16.3 BLEU for ESPnet-ST. Automatic evaluation is complemented by expert manual assessment and targeted linguistic analysis. Results indicate that, under current low-resource conditions, cascade systems provide higher translation accuracy and better morpho-syntactic fidelity, while end-to-end models remain competitive and offer advantages in architectural simplicity and potentially reduced inference latency (due to single-pass processing), although empirical measurements were not conducted in this study. This study establishes a reproducible benchmark for Kazakh&amp;amp;ndash;Russian speech translation and highlights practical trade-offs between modeling paradigms in low-resource, morphologically rich settings.</p>
	]]></content:encoded>

	<dc:title>An Empirical Comparison of Cascade and Direct End-to-End Speech Translation for Low-Resource Language Pair</dc:title>
			<dc:creator>Zhanibek Kozhirbayev</dc:creator>
		<dc:identifier>doi: 10.3390/computers15040222</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-04-02</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-04-02</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>222</prism:startingPage>
		<prism:doi>10.3390/computers15040222</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/4/222</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/4/221">

	<title>Computers, Vol. 15, Pages 221: Table-Aware Row-Level RAG for Classical Chinese Understanding</title>
	<link>https://www.mdpi.com/2073-431X/15/4/221</link>
	<description>The classical Chinese language is characterized by a high density of meaning, wide use of polysemy, and strong dependence on history and culture, which pose challenges to existing large language models (LLMs). Retrieval-augmented generation (RAG) technology has become a prevailing option that could address these issues without retraining the model, but most of the existing RAG systems regard structured tables as unstructured text, encoding a whole table into one vector. Such a schema usually hides the row-level semantic information and raises the reasoning cost for LLMs. In this study, we propose a new table-aware row-wise retrieval system in which each row of a table is treated as an individual semantic unit, explicitly (instead of implicitly) reasoning at generation time. We organize the table into row-level vector representations, which makes retrieval more deterministic and semantically interpretable, in particular, for pedagogical or philological datasets. Based on LangChain and integrated with Qwen LLMs, our system can be evaluated experimentally for classical Chinese learning tasks, where we find that compared with the traditional RAG systems, this system improves on retrieval performance, semantic consistency, and explainability, with no model training or extra computation time required.</description>
	<pubDate>2026-04-02</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 221: Table-Aware Row-Level RAG for Classical Chinese Understanding</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/4/221">doi: 10.3390/computers15040221</a></p>
	<p>Authors:
		Zhihao Liu
		Waiyie Leong
		</p>
	<p>The classical Chinese language is characterized by a high density of meaning, wide use of polysemy, and strong dependence on history and culture, which pose challenges to existing large language models (LLMs). Retrieval-augmented generation (RAG) technology has become a prevailing option that could address these issues without retraining the model, but most of the existing RAG systems regard structured tables as unstructured text, encoding a whole table into one vector. Such a schema usually hides the row-level semantic information and raises the reasoning cost for LLMs. In this study, we propose a new table-aware row-wise retrieval system in which each row of a table is treated as an individual semantic unit, explicitly (instead of implicitly) reasoning at generation time. We organize the table into row-level vector representations, which makes retrieval more deterministic and semantically interpretable, in particular, for pedagogical or philological datasets. Based on LangChain and integrated with Qwen LLMs, our system can be evaluated experimentally for classical Chinese learning tasks, where we find that compared with the traditional RAG systems, this system improves on retrieval performance, semantic consistency, and explainability, with no model training or extra computation time required.</p>
	]]></content:encoded>

	<dc:title>Table-Aware Row-Level RAG for Classical Chinese Understanding</dc:title>
			<dc:creator>Zhihao Liu</dc:creator>
			<dc:creator>Waiyie Leong</dc:creator>
		<dc:identifier>doi: 10.3390/computers15040221</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-04-02</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-04-02</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>221</prism:startingPage>
		<prism:doi>10.3390/computers15040221</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/4/221</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/4/220">

	<title>Computers, Vol. 15, Pages 220: Toward a Unified Framework for Secure Coding: A Comprehensive Synthesis of Best Practices</title>
	<link>https://www.mdpi.com/2073-431X/15/4/220</link>
	<description>The challenge of software vulnerabilities persists globally, despite the widespread availability of advanced security tools and comprehensive developer guidelines. This issue is not the result of professional negligence, but rather the complex and non-intuitive nature of secure coding. This research takes on the massive data silos in the security industry by providing a comprehensive review of best practices drawn from 35 reputable academic and corporate sources. Authentication, cryptography, input validation, and deployment hardening are some of the key development domains into which these technologies are organized. We conduct a comprehensive analysis of each practice, elucidating the specific security issue it addresses, prevalent implementation patterns, and potential hazards, in addition to serving as a checklist. Simple precautions, like not using passwords that are hardcoded, and more involved methods, such correctly encoding output and configuring access controls effectively, are all part of the range of practices. We assert that despite the prevalent usage of tools like as static analyzers, numerous vulnerabilities persist due to developers&amp;amp;rsquo; insufficient training in integrating security considerations into their coding practices. This work aspires to serve as a comprehensive, organized resource that supplies developers with the necessary context and guidance to make informed, security-oriented decisions along the software development lifecycle. The aim is to develop a more extensive resource than those presently accessible, which can also assist educators or security teams during code instruction or evaluation.</description>
	<pubDate>2026-04-02</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 220: Toward a Unified Framework for Secure Coding: A Comprehensive Synthesis of Best Practices</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/4/220">doi: 10.3390/computers15040220</a></p>
	<p>Authors:
		Alyah Alromaizan
		Ghala Alzahrani
		Aliza Khan
		Lulwah Alhumaid
		Md Kamrul Siam
		Muhammad Umair Khan
		Md Jobair Hossain Faruk
		Hossain Shahriar
		</p>
	<p>The challenge of software vulnerabilities persists globally, despite the widespread availability of advanced security tools and comprehensive developer guidelines. This issue is not the result of professional negligence, but rather the complex and non-intuitive nature of secure coding. This research takes on the massive data silos in the security industry by providing a comprehensive review of best practices drawn from 35 reputable academic and corporate sources. Authentication, cryptography, input validation, and deployment hardening are some of the key development domains into which these technologies are organized. We conduct a comprehensive analysis of each practice, elucidating the specific security issue it addresses, prevalent implementation patterns, and potential hazards, in addition to serving as a checklist. Simple precautions, like not using passwords that are hardcoded, and more involved methods, such correctly encoding output and configuring access controls effectively, are all part of the range of practices. We assert that despite the prevalent usage of tools like as static analyzers, numerous vulnerabilities persist due to developers&amp;amp;rsquo; insufficient training in integrating security considerations into their coding practices. This work aspires to serve as a comprehensive, organized resource that supplies developers with the necessary context and guidance to make informed, security-oriented decisions along the software development lifecycle. The aim is to develop a more extensive resource than those presently accessible, which can also assist educators or security teams during code instruction or evaluation.</p>
	]]></content:encoded>

	<dc:title>Toward a Unified Framework for Secure Coding: A Comprehensive Synthesis of Best Practices</dc:title>
			<dc:creator>Alyah Alromaizan</dc:creator>
			<dc:creator>Ghala Alzahrani</dc:creator>
			<dc:creator>Aliza Khan</dc:creator>
			<dc:creator>Lulwah Alhumaid</dc:creator>
			<dc:creator>Md Kamrul Siam</dc:creator>
			<dc:creator>Muhammad Umair Khan</dc:creator>
			<dc:creator>Md Jobair Hossain Faruk</dc:creator>
			<dc:creator>Hossain Shahriar</dc:creator>
		<dc:identifier>doi: 10.3390/computers15040220</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-04-02</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-04-02</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>220</prism:startingPage>
		<prism:doi>10.3390/computers15040220</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/4/220</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/4/219">

	<title>Computers, Vol. 15, Pages 219: Energy-Efficient Dual-Core RISC-V Architecture for Edge AI Acceleration with Dynamic MAC Unit Reuse</title>
	<link>https://www.mdpi.com/2073-431X/15/4/219</link>
	<description>This paper presents a dual-core RISC-V architecture designed for energy-efficient AI acceleration at the edge, featuring dynamic MAC unit sharing, frequency scaling (DFS), and FIFO-based resource arbitration. The system comprises two RISC-V cores that compete for shared computational resources&amp;amp;mdash;a single Multiply&amp;amp;ndash;Accumulate (MAC) unit and a shared external memory subsystem&amp;amp;mdash;governed by a channel-based arbitration mechanism with CPU-priority semantics, while each core maintains private instruction and data caches. The architecture implements a tightly coupled Neural Processing Unit (NPU) with CONV, GEMM, and POOL operations that execute opportunistically in the background when the MAC unit is available. Dynamic frequency scaling (DFS) with three levels (100/200/400 MHz) is applied to the shared MAC unit, allowing the dynamic acceleration of CNN workloads. The arbitration mechanism uses SystemC sc_fifo channels with CPU-priority polling, ensuring that CPU execution is minimally impacted by background AI processing while the NPU makes progress during idle MAC slots. The NPU supports 3 &amp;amp;times; 3 convolutions, matrix multiplication (GEMM) with 10 &amp;amp;times; 10 tiles, and pooling operations. The implementation is cycle-accurate in SystemC, targeting FPGA deployment. Experimental evaluation demonstrates that the dual-core architecture achieves 1.87&amp;amp;times; speedup with 93.5% efficiency for parallel workloads, while DFS enables 70% power reduction at low frequency. The system successfully executes simultaneous CPU and AI workloads, with CPU-priority arbitration ensuring no CPU starvation under contention. The proposed design offers a practical solution for embedded AI applications requiring both general-purpose computation and neural network acceleration, validated through comprehensive SystemC simulation on modern FPGA platforms.</description>
	<pubDate>2026-04-01</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 219: Energy-Efficient Dual-Core RISC-V Architecture for Edge AI Acceleration with Dynamic MAC Unit Reuse</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/4/219">doi: 10.3390/computers15040219</a></p>
	<p>Authors:
		Cristian Andy Tanase
		</p>
	<p>This paper presents a dual-core RISC-V architecture designed for energy-efficient AI acceleration at the edge, featuring dynamic MAC unit sharing, frequency scaling (DFS), and FIFO-based resource arbitration. The system comprises two RISC-V cores that compete for shared computational resources&amp;amp;mdash;a single Multiply&amp;amp;ndash;Accumulate (MAC) unit and a shared external memory subsystem&amp;amp;mdash;governed by a channel-based arbitration mechanism with CPU-priority semantics, while each core maintains private instruction and data caches. The architecture implements a tightly coupled Neural Processing Unit (NPU) with CONV, GEMM, and POOL operations that execute opportunistically in the background when the MAC unit is available. Dynamic frequency scaling (DFS) with three levels (100/200/400 MHz) is applied to the shared MAC unit, allowing the dynamic acceleration of CNN workloads. The arbitration mechanism uses SystemC sc_fifo channels with CPU-priority polling, ensuring that CPU execution is minimally impacted by background AI processing while the NPU makes progress during idle MAC slots. The NPU supports 3 &amp;amp;times; 3 convolutions, matrix multiplication (GEMM) with 10 &amp;amp;times; 10 tiles, and pooling operations. The implementation is cycle-accurate in SystemC, targeting FPGA deployment. Experimental evaluation demonstrates that the dual-core architecture achieves 1.87&amp;amp;times; speedup with 93.5% efficiency for parallel workloads, while DFS enables 70% power reduction at low frequency. The system successfully executes simultaneous CPU and AI workloads, with CPU-priority arbitration ensuring no CPU starvation under contention. The proposed design offers a practical solution for embedded AI applications requiring both general-purpose computation and neural network acceleration, validated through comprehensive SystemC simulation on modern FPGA platforms.</p>
	]]></content:encoded>

	<dc:title>Energy-Efficient Dual-Core RISC-V Architecture for Edge AI Acceleration with Dynamic MAC Unit Reuse</dc:title>
			<dc:creator>Cristian Andy Tanase</dc:creator>
		<dc:identifier>doi: 10.3390/computers15040219</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-04-01</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-04-01</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>219</prism:startingPage>
		<prism:doi>10.3390/computers15040219</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/4/219</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/4/217">

	<title>Computers, Vol. 15, Pages 217: Stylometry Analyzis of Human and Machine Text for Academic Integrity</title>
	<link>https://www.mdpi.com/2073-431X/15/4/217</link>
	<description>This work addresses critical challenges to academic integrity, including plagiarism, fabrication, and verification of authorship of educational content, by proposing a Natural Language Processing (NLP)-based framework for authenticating students&amp;amp;rsquo; content through author attribution and style change detection. Despite some initial efforts, several aspects of the topic are yet to be explored. In contrast to existing solutions, the paper provides a comprehensive analyzis of the topic by targeting four relevant tasks, including (i) classification of human and machine text, (ii) differentiating in single and multi-authored documents, (iii) author change detection within multi-authored documents, and (iv) author recognition in collaboratively produced documents. The solutions proposed for the tasks are evaluated on two datasets generated with Gemini using two different prompts, including a normal and a strict set of instructions. During experiments, some performance reduction is observed for the proposed solutions on the dataset generated by the strict prompt, demonstrating the complexities involved in detecting machine-generated text with cleverly crafted prompts. The generated datasets, code, and other relevant materials are made publicly available on GitHub, which are expected to provide a baseline for future research in the domain.</description>
	<pubDate>2026-04-01</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 217: Stylometry Analyzis of Human and Machine Text for Academic Integrity</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/4/217">doi: 10.3390/computers15040217</a></p>
	<p>Authors:
		Hezam Albaqami
		Muhammad Asif Ayub
		Nasir Ahmad
		Yaseen Ahmad
		Mohammad M. Alqahtani
		Abdullah M. Algamdi
		Almoaid A. Owaidah
		Kashif Ahmad
		</p>
	<p>This work addresses critical challenges to academic integrity, including plagiarism, fabrication, and verification of authorship of educational content, by proposing a Natural Language Processing (NLP)-based framework for authenticating students&amp;amp;rsquo; content through author attribution and style change detection. Despite some initial efforts, several aspects of the topic are yet to be explored. In contrast to existing solutions, the paper provides a comprehensive analyzis of the topic by targeting four relevant tasks, including (i) classification of human and machine text, (ii) differentiating in single and multi-authored documents, (iii) author change detection within multi-authored documents, and (iv) author recognition in collaboratively produced documents. The solutions proposed for the tasks are evaluated on two datasets generated with Gemini using two different prompts, including a normal and a strict set of instructions. During experiments, some performance reduction is observed for the proposed solutions on the dataset generated by the strict prompt, demonstrating the complexities involved in detecting machine-generated text with cleverly crafted prompts. The generated datasets, code, and other relevant materials are made publicly available on GitHub, which are expected to provide a baseline for future research in the domain.</p>
	]]></content:encoded>

	<dc:title>Stylometry Analyzis of Human and Machine Text for Academic Integrity</dc:title>
			<dc:creator>Hezam Albaqami</dc:creator>
			<dc:creator>Muhammad Asif Ayub</dc:creator>
			<dc:creator>Nasir Ahmad</dc:creator>
			<dc:creator>Yaseen Ahmad</dc:creator>
			<dc:creator>Mohammad M. Alqahtani</dc:creator>
			<dc:creator>Abdullah M. Algamdi</dc:creator>
			<dc:creator>Almoaid A. Owaidah</dc:creator>
			<dc:creator>Kashif Ahmad</dc:creator>
		<dc:identifier>doi: 10.3390/computers15040217</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-04-01</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-04-01</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>217</prism:startingPage>
		<prism:doi>10.3390/computers15040217</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/4/217</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/4/218">

	<title>Computers, Vol. 15, Pages 218: Towards a Reference Architecture for Machine Learning Operations</title>
	<link>https://www.mdpi.com/2073-431X/15/4/218</link>
	<description>Industrial organisations increasingly rely on machine learning (ML) to improve quality, maintenance, and planning in Industry 4.0/5.0 ecosystems. However, turning experimental models into reliable services on the production floor remains complex due to the heterogeneity of operational technologies (OTs) and information technologies (ITs), including implementation constraints, latency in edge-fog-cloud scenarios, governance requirements, and continuous performance degradation caused by data drift. Although Machine Learning Operations (MLOps) provides lifecycle practices for deployment, monitoring, and retraining, the evidence is fragmented across tool-centric descriptions, case-specific pipelines, and conceptual architectures, offering limited guidance on which industrial constraints should inform architectural decisions and how to evaluate solutions. This work addresses that gap through a PRISMA-guided systematic review of 49 studies on industrial MLOps (with the search and screening primarily targeting Industry 4.0/IIoT operationalisation contexts, as reflected in the search strategy and corpus) and an evidence-based synthesis of principles, challenges, lifecycle practices, and enabling technologies. From this synthesis, industrial requirements are derived that encompass OT/IT integration, edge-fog-cloud orchestration, security and traceability, and observability-based lifecycle control. On this basis, a reference architecture is proposed that maps these requirements to functional layers, data and control flows, and verifiable responsibilities. To support reproducibility and practical inspectability, the article also presents an open-source architectural instantiation aligned with the proposed decomposition. Finally, the evaluation is illustrated through a predictive maintenance use case (tool breakage) in a single CNC machining cell, where the objective is to demonstrate end-to-end feasibility under realistic operational constraints rather than cross-scenario superiority or broad industrial generalisability.</description>
	<pubDate>2026-04-01</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 218: Towards a Reference Architecture for Machine Learning Operations</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/4/218">doi: 10.3390/computers15040218</a></p>
	<p>Authors:
		Miguel Ángel Mateo-Casalí
		Andrés Boza
		Francisco Fraile
		</p>
	<p>Industrial organisations increasingly rely on machine learning (ML) to improve quality, maintenance, and planning in Industry 4.0/5.0 ecosystems. However, turning experimental models into reliable services on the production floor remains complex due to the heterogeneity of operational technologies (OTs) and information technologies (ITs), including implementation constraints, latency in edge-fog-cloud scenarios, governance requirements, and continuous performance degradation caused by data drift. Although Machine Learning Operations (MLOps) provides lifecycle practices for deployment, monitoring, and retraining, the evidence is fragmented across tool-centric descriptions, case-specific pipelines, and conceptual architectures, offering limited guidance on which industrial constraints should inform architectural decisions and how to evaluate solutions. This work addresses that gap through a PRISMA-guided systematic review of 49 studies on industrial MLOps (with the search and screening primarily targeting Industry 4.0/IIoT operationalisation contexts, as reflected in the search strategy and corpus) and an evidence-based synthesis of principles, challenges, lifecycle practices, and enabling technologies. From this synthesis, industrial requirements are derived that encompass OT/IT integration, edge-fog-cloud orchestration, security and traceability, and observability-based lifecycle control. On this basis, a reference architecture is proposed that maps these requirements to functional layers, data and control flows, and verifiable responsibilities. To support reproducibility and practical inspectability, the article also presents an open-source architectural instantiation aligned with the proposed decomposition. Finally, the evaluation is illustrated through a predictive maintenance use case (tool breakage) in a single CNC machining cell, where the objective is to demonstrate end-to-end feasibility under realistic operational constraints rather than cross-scenario superiority or broad industrial generalisability.</p>
	]]></content:encoded>

	<dc:title>Towards a Reference Architecture for Machine Learning Operations</dc:title>
			<dc:creator>Miguel Ángel Mateo-Casalí</dc:creator>
			<dc:creator>Andrés Boza</dc:creator>
			<dc:creator>Francisco Fraile</dc:creator>
		<dc:identifier>doi: 10.3390/computers15040218</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-04-01</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-04-01</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>218</prism:startingPage>
		<prism:doi>10.3390/computers15040218</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/4/218</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/4/216">

	<title>Computers, Vol. 15, Pages 216: Adaptive Sequence-Based Heuristic for Two-Dimensional Guillotine Cutting and Packing Problems</title>
	<link>https://www.mdpi.com/2073-431X/15/4/216</link>
	<description>This paper proposes adaptive sequence-based heuristics for solving rectangular two-dimensional guillotine Cutting and Packing Problems (CPPs). These problems are essential in various industrial sectors, aiming to maximise resource utilisation by selecting profitable item subsets or minimise waste by using the fewest possible identical large objects. The core methodology is grounded in the principle that if a specific item sequence generates a high-quality solution, incremental adjustments to that sequence can yield even better outcomes. By iteratively refining item ordering through the BubbleSearch method, the heuristics balance search intensification with the diversification of the solution space. Extensive computational experiments were conducted on benchmark datasets, including SET1, ATP, and CLASS, across multiple problem variants such as the Single Stock-Size Cutting Stock Problem (SSSCSP) and the Single Large Object Placement Problem (SLOPP). The results confirm that these heuristics and their extension with path relinking consistently deliver optimal or near-optimal solutions. These heuristics achieve high performance in computational times that are significantly shorter than existing state-of-the-art methods, demonstrating their robustness, flexibility, and suitability for software transferability and real-world industrial adoption.</description>
	<pubDate>2026-04-01</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 216: Adaptive Sequence-Based Heuristic for Two-Dimensional Guillotine Cutting and Packing Problems</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/4/216">doi: 10.3390/computers15040216</a></p>
	<p>Authors:
		Óscar Oliveira
		Dorabela Gamboa
		</p>
	<p>This paper proposes adaptive sequence-based heuristics for solving rectangular two-dimensional guillotine Cutting and Packing Problems (CPPs). These problems are essential in various industrial sectors, aiming to maximise resource utilisation by selecting profitable item subsets or minimise waste by using the fewest possible identical large objects. The core methodology is grounded in the principle that if a specific item sequence generates a high-quality solution, incremental adjustments to that sequence can yield even better outcomes. By iteratively refining item ordering through the BubbleSearch method, the heuristics balance search intensification with the diversification of the solution space. Extensive computational experiments were conducted on benchmark datasets, including SET1, ATP, and CLASS, across multiple problem variants such as the Single Stock-Size Cutting Stock Problem (SSSCSP) and the Single Large Object Placement Problem (SLOPP). The results confirm that these heuristics and their extension with path relinking consistently deliver optimal or near-optimal solutions. These heuristics achieve high performance in computational times that are significantly shorter than existing state-of-the-art methods, demonstrating their robustness, flexibility, and suitability for software transferability and real-world industrial adoption.</p>
	]]></content:encoded>

	<dc:title>Adaptive Sequence-Based Heuristic for Two-Dimensional Guillotine Cutting and Packing Problems</dc:title>
			<dc:creator>Óscar Oliveira</dc:creator>
			<dc:creator>Dorabela Gamboa</dc:creator>
		<dc:identifier>doi: 10.3390/computers15040216</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-04-01</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-04-01</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>216</prism:startingPage>
		<prism:doi>10.3390/computers15040216</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/4/216</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/4/215">

	<title>Computers, Vol. 15, Pages 215: Drivers and Barriers to the Use of Generative Artificial Intelligence in the Spanish Active Population: Insights from Artificial Neural Network Modeling and Shapley Additive Explanations</title>
	<link>https://www.mdpi.com/2073-431X/15/4/215</link>
	<description>This study analyzes the determinants of generative artificial intelligence (GAI) use intensity among the Spanish working population, as well as the possible existence of gender gaps in its adoption. To this end, a conceptual model is proposed that incorporates perceived economic and productive usefulness (PEU), perceived social usefulness (PSU), three dimensions of the Technology Readiness Index&amp;amp;mdash;technological optimism (OPTI), innovativeness (INNOV), and insecurity (INSEC)&amp;amp;mdash;and three sociodemographic variables: entrepreneurial status, gender, and generational cohort. The model is implemented using artificial neural networks (ANNs) endowed with explanatory capability through Shapley Additive Explanations (SHAP). The application of SHAP enables the assessment of both the global and local importance of the explanatory variables, as well as the potential existence of gender biases in their contribution to GAI use. The results indicate that the most relevant variables are PEU, generational cohort, and INNOV. Although gender does not rank among the most important variables in terms of global importance, women exhibit lower levels of GAI use, and gender-related differences are also observed in the contribution of several explanatory variables. In particular, substantive effect sizes are observed for PSU, OPTI, INSEC, entrepreneurial status, and membership in Generation Y. By contrast, differences associated with especially relevant variables such as PEU and INNOV, as well as membership in Generation Z, do not exhibit meaningful effect sizes.</description>
	<pubDate>2026-04-01</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 215: Drivers and Barriers to the Use of Generative Artificial Intelligence in the Spanish Active Population: Insights from Artificial Neural Network Modeling and Shapley Additive Explanations</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/4/215">doi: 10.3390/computers15040215</a></p>
	<p>Authors:
		Teresa Torres-Coronas
		Jorge de Andrés-Sánchez
		Orlando Lima Rua
		Álvaro Carrasco-Aguilar
		</p>
	<p>This study analyzes the determinants of generative artificial intelligence (GAI) use intensity among the Spanish working population, as well as the possible existence of gender gaps in its adoption. To this end, a conceptual model is proposed that incorporates perceived economic and productive usefulness (PEU), perceived social usefulness (PSU), three dimensions of the Technology Readiness Index&amp;amp;mdash;technological optimism (OPTI), innovativeness (INNOV), and insecurity (INSEC)&amp;amp;mdash;and three sociodemographic variables: entrepreneurial status, gender, and generational cohort. The model is implemented using artificial neural networks (ANNs) endowed with explanatory capability through Shapley Additive Explanations (SHAP). The application of SHAP enables the assessment of both the global and local importance of the explanatory variables, as well as the potential existence of gender biases in their contribution to GAI use. The results indicate that the most relevant variables are PEU, generational cohort, and INNOV. Although gender does not rank among the most important variables in terms of global importance, women exhibit lower levels of GAI use, and gender-related differences are also observed in the contribution of several explanatory variables. In particular, substantive effect sizes are observed for PSU, OPTI, INSEC, entrepreneurial status, and membership in Generation Y. By contrast, differences associated with especially relevant variables such as PEU and INNOV, as well as membership in Generation Z, do not exhibit meaningful effect sizes.</p>
	]]></content:encoded>

	<dc:title>Drivers and Barriers to the Use of Generative Artificial Intelligence in the Spanish Active Population: Insights from Artificial Neural Network Modeling and Shapley Additive Explanations</dc:title>
			<dc:creator>Teresa Torres-Coronas</dc:creator>
			<dc:creator>Jorge de Andrés-Sánchez</dc:creator>
			<dc:creator>Orlando Lima Rua</dc:creator>
			<dc:creator>Álvaro Carrasco-Aguilar</dc:creator>
		<dc:identifier>doi: 10.3390/computers15040215</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-04-01</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-04-01</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>215</prism:startingPage>
		<prism:doi>10.3390/computers15040215</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/4/215</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/4/214">

	<title>Computers, Vol. 15, Pages 214: A Dynamic Clustering Framework for Intelligent Task Orchestration in Mobile Edge Computing</title>
	<link>https://www.mdpi.com/2073-431X/15/4/214</link>
	<description>Mobile edge computing (MEC) enables resource-constrained mobile devices to execute delay-sensitive and compute-intensive applications by offloading tasks to nearby edge servers. However, task orchestration in MEC is challenged by the highly dynamic system conditions, unreliable networks, and distributed edge environments. Moreover, as the number of mobile users, tasks, and distributed computing resources (edge/cloud servers) increases, the task orchestration process becomes more complex due to the expanded decision space and the need to efficiently allocate heterogeneous resources under latency and capacity constraints. As the decision space grows, exhaustive-search-based orchestration becomes computationally infeasible. Clustering approaches often rely on proximity-only grouping, while learning-based solutions require extensive training and parameter tuning. To address these challenges, this paper proposes a Multi-Criteria Hierarchical Clustering-based Task Orchestrator (MCHC-TO), a novel framework that integrates multi-criteria decision making with divisive hierarchical clustering for preference-aware and adaptive workload orchestration. Edge servers are first evaluated using multiple decision criteria, and the resulting preference rankings are exploited to form hierarchical preference-based clusters. Incoming tasks are then assigned to the most suitable cluster based on task requirements, enabling efficient resource utilization and dynamic decision-making. Extensive simulations conducted using an edge computing simulator demonstrate that the proposed MCHC-TO framework consistently outperforms benchmark approaches, achieving reductions in average service delay and task failure rate of up to 48% and 92%, respectively. These results highlight the effectiveness of combining multi-criteria evaluation with hierarchical clustering for robust and dynamic task orchestration in MEC environments.</description>
	<pubDate>2026-04-01</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 214: A Dynamic Clustering Framework for Intelligent Task Orchestration in Mobile Edge Computing</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/4/214">doi: 10.3390/computers15040214</a></p>
	<p>Authors:
		Mona Alghamdi
		Atm S. Alam
		Asma Cherif
		</p>
	<p>Mobile edge computing (MEC) enables resource-constrained mobile devices to execute delay-sensitive and compute-intensive applications by offloading tasks to nearby edge servers. However, task orchestration in MEC is challenged by the highly dynamic system conditions, unreliable networks, and distributed edge environments. Moreover, as the number of mobile users, tasks, and distributed computing resources (edge/cloud servers) increases, the task orchestration process becomes more complex due to the expanded decision space and the need to efficiently allocate heterogeneous resources under latency and capacity constraints. As the decision space grows, exhaustive-search-based orchestration becomes computationally infeasible. Clustering approaches often rely on proximity-only grouping, while learning-based solutions require extensive training and parameter tuning. To address these challenges, this paper proposes a Multi-Criteria Hierarchical Clustering-based Task Orchestrator (MCHC-TO), a novel framework that integrates multi-criteria decision making with divisive hierarchical clustering for preference-aware and adaptive workload orchestration. Edge servers are first evaluated using multiple decision criteria, and the resulting preference rankings are exploited to form hierarchical preference-based clusters. Incoming tasks are then assigned to the most suitable cluster based on task requirements, enabling efficient resource utilization and dynamic decision-making. Extensive simulations conducted using an edge computing simulator demonstrate that the proposed MCHC-TO framework consistently outperforms benchmark approaches, achieving reductions in average service delay and task failure rate of up to 48% and 92%, respectively. These results highlight the effectiveness of combining multi-criteria evaluation with hierarchical clustering for robust and dynamic task orchestration in MEC environments.</p>
	]]></content:encoded>

	<dc:title>A Dynamic Clustering Framework for Intelligent Task Orchestration in Mobile Edge Computing</dc:title>
			<dc:creator>Mona Alghamdi</dc:creator>
			<dc:creator>Atm S. Alam</dc:creator>
			<dc:creator>Asma Cherif</dc:creator>
		<dc:identifier>doi: 10.3390/computers15040214</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-04-01</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-04-01</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>214</prism:startingPage>
		<prism:doi>10.3390/computers15040214</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/4/214</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/4/213">

	<title>Computers, Vol. 15, Pages 213: A Reproducible Computational Pipeline for Cross-Database Scientometric Network Construction: Architecture, Algorithms, and Structural Validation</title>
	<link>https://www.mdpi.com/2073-431X/15/4/213</link>
	<description>The rapid expansion of scientific publications indexed in multiple bibliographic databases has created new computational challenges for large-scale scientometric analysis. Differences in metadata schemas, identifier structures, and export formats across indexing systems such as Web of Science and Scopus introduce inconsistencies that may distort network-based bibliometric analyses. These issues affect duplicate detection, node identification, and network topology construction. This study proposes a reproducible computational pipeline for cross-database scientometric network construction. The framework formalizes the preprocessing workflow into explicit computational modules, including metadata harmonization, deterministic duplicate detection, sparse graph construction, normalization, and structural diagnostics. The proposed architecture separates preprocessing stages into reproducible algorithmic components, enabling transparent evaluation of methodological assumptions. Empirical evaluation using an interdisciplinary dataset of 317 publications (1990&amp;amp;ndash;2023) demonstrate that deterministic preprocessing significantly improves network stability and preserves clustering structure. Structural diagnostics based on modularity, Herfindahl&amp;amp;ndash;Hirschman Index, Shannon entropy, and Gini coefficient provide multi-dimensional evaluation of network topology. Scalability experiments confirm near-linear computational growth under sparse graph construction. The principal contribution of this work lies in the formalization of a transparent and extensible computational architecture for reproducible scientometric analysis. The proposed pipeline supports reliable cross-database integration and enables scalable knowledge-mapping applications in interdisciplinary research domains.</description>
	<pubDate>2026-03-31</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 213: A Reproducible Computational Pipeline for Cross-Database Scientometric Network Construction: Architecture, Algorithms, and Structural Validation</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/4/213">doi: 10.3390/computers15040213</a></p>
	<p>Authors:
		Denny Moreno-Castro
		Omar Orlando Franco-Arias
		Cícero Pimenteira
		Nicolás Márquez
		Cristian Vidal-Silva
		</p>
	<p>The rapid expansion of scientific publications indexed in multiple bibliographic databases has created new computational challenges for large-scale scientometric analysis. Differences in metadata schemas, identifier structures, and export formats across indexing systems such as Web of Science and Scopus introduce inconsistencies that may distort network-based bibliometric analyses. These issues affect duplicate detection, node identification, and network topology construction. This study proposes a reproducible computational pipeline for cross-database scientometric network construction. The framework formalizes the preprocessing workflow into explicit computational modules, including metadata harmonization, deterministic duplicate detection, sparse graph construction, normalization, and structural diagnostics. The proposed architecture separates preprocessing stages into reproducible algorithmic components, enabling transparent evaluation of methodological assumptions. Empirical evaluation using an interdisciplinary dataset of 317 publications (1990&amp;amp;ndash;2023) demonstrate that deterministic preprocessing significantly improves network stability and preserves clustering structure. Structural diagnostics based on modularity, Herfindahl&amp;amp;ndash;Hirschman Index, Shannon entropy, and Gini coefficient provide multi-dimensional evaluation of network topology. Scalability experiments confirm near-linear computational growth under sparse graph construction. The principal contribution of this work lies in the formalization of a transparent and extensible computational architecture for reproducible scientometric analysis. The proposed pipeline supports reliable cross-database integration and enables scalable knowledge-mapping applications in interdisciplinary research domains.</p>
	]]></content:encoded>

	<dc:title>A Reproducible Computational Pipeline for Cross-Database Scientometric Network Construction: Architecture, Algorithms, and Structural Validation</dc:title>
			<dc:creator>Denny Moreno-Castro</dc:creator>
			<dc:creator>Omar Orlando Franco-Arias</dc:creator>
			<dc:creator>Cícero Pimenteira</dc:creator>
			<dc:creator>Nicolás Márquez</dc:creator>
			<dc:creator>Cristian Vidal-Silva</dc:creator>
		<dc:identifier>doi: 10.3390/computers15040213</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-03-31</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-03-31</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>213</prism:startingPage>
		<prism:doi>10.3390/computers15040213</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/4/213</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/4/212">

	<title>Computers, Vol. 15, Pages 212: A Federated FHIR-Based Interoperability Framework for Multi-Site Heart Failure Monitoring: The RETENTION Project</title>
	<link>https://www.mdpi.com/2073-431X/15/4/212</link>
	<description>Heart failure management increasingly relies on heterogeneous clinical and real-world data generated through remote monitoring technologies. However, transforming these multimodal data streams into actionable insights requires robust interoperability infrastructures. This study presents the RETENTION interoperability framework, a federated HL7 Fast Healthcare Interoperability Resources (FHIR)-based architecture designed to support multi-site heart failure monitoring across five independent clinical environments. A semantic reference model comprising 444 clinical and contextual variables was developed and aligned with FHIR R4 resources and internationally recognised terminology systems. The platform adopts a selective profiling strategy, extending only the Patient resource while standardising the remaining variables through example-driven Implementation Guide documentation. Identifiable data are retained locally within Clinical Site Backends, whereas anonymised datasets are periodically aggregated into a Global Insights Cloud to enable centralised analytics and controlled third-party interactions. The framework was deployed across six hospitals (with two Spanish hospitals sharing the same deployment), supporting 390 patients and over 130,000 patient-days of monitoring, with more than 3.6 million remote device data points harmonised without schema conflicts. The results demonstrate that large-scale semantic harmonisation and privacy-preserving aggregation can be achieved using a lightweight profiling approach, providing a scalable and reproducible interoperability model for multi-centre digital health research infrastructures.</description>
	<pubDate>2026-03-31</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 212: A Federated FHIR-Based Interoperability Framework for Multi-Site Heart Failure Monitoring: The RETENTION Project</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/4/212">doi: 10.3390/computers15040212</a></p>
	<p>Authors:
		Nikolaos Vasileiou
		Olympia Giannakopoulou
		Ourania Manta
		Konstantinos Bromis
		Theodoros P. Vagenas
		Ioannis Kouris
		Maria Roumpi
		Lefteris Koumakis
		Yorgos Goletsis
		Maria Haritou
		George K. Matsopoulos
		Dimitris Fotiadis
		Dimitris D. Koutsouris
		</p>
	<p>Heart failure management increasingly relies on heterogeneous clinical and real-world data generated through remote monitoring technologies. However, transforming these multimodal data streams into actionable insights requires robust interoperability infrastructures. This study presents the RETENTION interoperability framework, a federated HL7 Fast Healthcare Interoperability Resources (FHIR)-based architecture designed to support multi-site heart failure monitoring across five independent clinical environments. A semantic reference model comprising 444 clinical and contextual variables was developed and aligned with FHIR R4 resources and internationally recognised terminology systems. The platform adopts a selective profiling strategy, extending only the Patient resource while standardising the remaining variables through example-driven Implementation Guide documentation. Identifiable data are retained locally within Clinical Site Backends, whereas anonymised datasets are periodically aggregated into a Global Insights Cloud to enable centralised analytics and controlled third-party interactions. The framework was deployed across six hospitals (with two Spanish hospitals sharing the same deployment), supporting 390 patients and over 130,000 patient-days of monitoring, with more than 3.6 million remote device data points harmonised without schema conflicts. The results demonstrate that large-scale semantic harmonisation and privacy-preserving aggregation can be achieved using a lightweight profiling approach, providing a scalable and reproducible interoperability model for multi-centre digital health research infrastructures.</p>
	]]></content:encoded>

	<dc:title>A Federated FHIR-Based Interoperability Framework for Multi-Site Heart Failure Monitoring: The RETENTION Project</dc:title>
			<dc:creator>Nikolaos Vasileiou</dc:creator>
			<dc:creator>Olympia Giannakopoulou</dc:creator>
			<dc:creator>Ourania Manta</dc:creator>
			<dc:creator>Konstantinos Bromis</dc:creator>
			<dc:creator>Theodoros P. Vagenas</dc:creator>
			<dc:creator>Ioannis Kouris</dc:creator>
			<dc:creator>Maria Roumpi</dc:creator>
			<dc:creator>Lefteris Koumakis</dc:creator>
			<dc:creator>Yorgos Goletsis</dc:creator>
			<dc:creator>Maria Haritou</dc:creator>
			<dc:creator>George K. Matsopoulos</dc:creator>
			<dc:creator>Dimitris Fotiadis</dc:creator>
			<dc:creator>Dimitris D. Koutsouris</dc:creator>
		<dc:identifier>doi: 10.3390/computers15040212</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-03-31</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-03-31</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>212</prism:startingPage>
		<prism:doi>10.3390/computers15040212</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/4/212</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/4/211">

	<title>Computers, Vol. 15, Pages 211: Novel Hybrid Nature-Inspired Metaheuristic Algorithm for Global and Engineering Design Optimization</title>
	<link>https://www.mdpi.com/2073-431X/15/4/211</link>
	<description>Metaheuristic algorithms have become indispensable for solving high-dimensional, non-convex, and constrained optimization problems arising in science and engineering. However, no single method can simultaneously provide strong global exploration, accurate local exploitation, and robust performance across diverse problem classes. This paper proposes JADEFLO, a new hybrid nature-inspired metaheuristic that couples Adaptive Differential Evolution with Optional External Archive (JADE) and Frilled Lizard Optimization (FLO) in a two-stage search framework. In the first stage, JADE drives global exploration using p-best mutation, an external archive, and adaptive control of the mutation factor and crossover rate to maintain population diversity. In the second stage, FLO performs intensive local refinement by mimicking the hunting and tree-climbing behaviors of frilled lizards through dedicated exploration and exploitation moves. The resulting algorithm has linear time complexity with respect to the population size, dimensionality, and number of iterations. JADEFLO is evaluated on the IEEE CEC 2022 single-objective benchmark suite (F1&amp;amp;ndash;F12) and three constrained engineering design problems (Pressure Vessel, tension/compression spring, and speed reducer), using 30 independent runs and comparisons against more than thirty state-of-the-art metaheuristics, including GA, PSO, DE variants, GWO, WOA, MFO, and FLO. The results show that JADEFLO attains the best overall rank on the CEC functions, delivers faster convergence and higher accuracy on most test cases, and matches or improves the best-known designs with markedly reduced variance. These findings indicate that JADEFLO is a promising general-purpose optimizer and a flexible foundation for future extensions to multi-objective and large-scale optimization.</description>
	<pubDate>2026-03-27</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 211: Novel Hybrid Nature-Inspired Metaheuristic Algorithm for Global and Engineering Design Optimization</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/4/211">doi: 10.3390/computers15040211</a></p>
	<p>Authors:
		Hasan Kanaker
		Osama Al Sayaydeh
		Essam Alhroob
		Nader Abdel Karim
		Sami Smadi
		Nurul Halimatul Asmak Ismail
		</p>
	<p>Metaheuristic algorithms have become indispensable for solving high-dimensional, non-convex, and constrained optimization problems arising in science and engineering. However, no single method can simultaneously provide strong global exploration, accurate local exploitation, and robust performance across diverse problem classes. This paper proposes JADEFLO, a new hybrid nature-inspired metaheuristic that couples Adaptive Differential Evolution with Optional External Archive (JADE) and Frilled Lizard Optimization (FLO) in a two-stage search framework. In the first stage, JADE drives global exploration using p-best mutation, an external archive, and adaptive control of the mutation factor and crossover rate to maintain population diversity. In the second stage, FLO performs intensive local refinement by mimicking the hunting and tree-climbing behaviors of frilled lizards through dedicated exploration and exploitation moves. The resulting algorithm has linear time complexity with respect to the population size, dimensionality, and number of iterations. JADEFLO is evaluated on the IEEE CEC 2022 single-objective benchmark suite (F1&amp;amp;ndash;F12) and three constrained engineering design problems (Pressure Vessel, tension/compression spring, and speed reducer), using 30 independent runs and comparisons against more than thirty state-of-the-art metaheuristics, including GA, PSO, DE variants, GWO, WOA, MFO, and FLO. The results show that JADEFLO attains the best overall rank on the CEC functions, delivers faster convergence and higher accuracy on most test cases, and matches or improves the best-known designs with markedly reduced variance. These findings indicate that JADEFLO is a promising general-purpose optimizer and a flexible foundation for future extensions to multi-objective and large-scale optimization.</p>
	]]></content:encoded>

	<dc:title>Novel Hybrid Nature-Inspired Metaheuristic Algorithm for Global and Engineering Design Optimization</dc:title>
			<dc:creator>Hasan Kanaker</dc:creator>
			<dc:creator>Osama Al Sayaydeh</dc:creator>
			<dc:creator>Essam Alhroob</dc:creator>
			<dc:creator>Nader Abdel Karim</dc:creator>
			<dc:creator>Sami Smadi</dc:creator>
			<dc:creator>Nurul Halimatul Asmak Ismail</dc:creator>
		<dc:identifier>doi: 10.3390/computers15040211</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-03-27</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-03-27</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>211</prism:startingPage>
		<prism:doi>10.3390/computers15040211</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/4/211</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/4/210">

	<title>Computers, Vol. 15, Pages 210: SQDPoS: A Secure and Practical Semi-Quantum Blockchain System for the Post-Quantum Era</title>
	<link>https://www.mdpi.com/2073-431X/15/4/210</link>
	<description>The rapid development of quantum computing poses severe threats to traditional blockchain security mechanisms, while existing full-quantum blockchains face challenges regarding high hardware costs and limited scalability. To address these issues, this paper proposes a secure and practical semi-quantum blockchain system. Specifically, a Semi-Quantum Delegated Proof of Stake consensus mechanism is constructed by integrating an adapted semi-quantum voting protocol with the Borda count method and a malicious behavior penalty model. Furthermore, a lightweight transaction verification framework is designed based on semi-quantum key distribution, enabling classical users with limited quantum capabilities to participate securely. Theoretical analysis demonstrates that the system achieves unconditional security against quantum attacks while maintaining high throughput. These results indicate that the proposed asymmetric resource design significantly lowers hardware barriers compared to full-quantum schemes, effectively balancing security, practicality, and cost-effectiveness for post-quantum blockchain networks.</description>
	<pubDate>2026-03-27</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 210: SQDPoS: A Secure and Practical Semi-Quantum Blockchain System for the Post-Quantum Era</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/4/210">doi: 10.3390/computers15040210</a></p>
	<p>Authors:
		Ang Liu
		Qi An
		Sijiang Xie
		Yalong Yan
		</p>
	<p>The rapid development of quantum computing poses severe threats to traditional blockchain security mechanisms, while existing full-quantum blockchains face challenges regarding high hardware costs and limited scalability. To address these issues, this paper proposes a secure and practical semi-quantum blockchain system. Specifically, a Semi-Quantum Delegated Proof of Stake consensus mechanism is constructed by integrating an adapted semi-quantum voting protocol with the Borda count method and a malicious behavior penalty model. Furthermore, a lightweight transaction verification framework is designed based on semi-quantum key distribution, enabling classical users with limited quantum capabilities to participate securely. Theoretical analysis demonstrates that the system achieves unconditional security against quantum attacks while maintaining high throughput. These results indicate that the proposed asymmetric resource design significantly lowers hardware barriers compared to full-quantum schemes, effectively balancing security, practicality, and cost-effectiveness for post-quantum blockchain networks.</p>
	]]></content:encoded>

	<dc:title>SQDPoS: A Secure and Practical Semi-Quantum Blockchain System for the Post-Quantum Era</dc:title>
			<dc:creator>Ang Liu</dc:creator>
			<dc:creator>Qi An</dc:creator>
			<dc:creator>Sijiang Xie</dc:creator>
			<dc:creator>Yalong Yan</dc:creator>
		<dc:identifier>doi: 10.3390/computers15040210</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-03-27</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-03-27</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>210</prism:startingPage>
		<prism:doi>10.3390/computers15040210</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/4/210</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/4/209">

	<title>Computers, Vol. 15, Pages 209: Interpretable Photoplethysmography Feature Engineering for Multi-Class Blood Pressure Staging</title>
	<link>https://www.mdpi.com/2073-431X/15/4/209</link>
	<description>Hypertension is a leading global health risk and requires accurate and continuous monitoring for effective management. Although photoplethysmography (PPG) is a promising non-invasive modality for cuffless blood pressure (BP) assessment, many existing approaches (especially raw-signal deep learning) are vulnerable to data leakage, overfitting on small datasets, limited interpretability, and poor performance on minority BP stages. To address these limitations, we propose a robust and physiologically grounded framework for multi-class BP stage classification based on interpretable PPG features. Our approach centers on a comprehensive multi-domain feature engineering pipeline that extracts 124 PPG features, including demographic, morphological, functional decomposition, spectral, nonlinear dynamics, and clinical composite indices. We apply rigorous preprocessing and feature selection prior to model training. We validate the framework on two datasets: PPG-BP dataset (657 segments, 4 classes) for benchmarking and PulseDB (283,773 segments, 3 classes) to assess scalability. We evaluate the proposed framework using a segment-level train/test split, appropriate for assessing intra-subject BP tracking after initial personalization. For the PulseDB dataset, this follows the protocol established by the dataset creators, while for the PPG-BP dataset, it enables direct comparison with prior work given practical dataset constraints. On PPG-BP, LightGBM trained on the selected features achieved macro-F1 = 0.78 and accuracy = 0.74, outperforming comparable deep-learning models. On the PulseDB, a custom Residual MLP achieved accuracy = 0.81 and macro-F1 = 0.79, supporting generalization at scale. These results show that the proposed feature-based approach can outperform complex end-to-end deep-learning models on small datasets while providing improved interpretability. This work establishes a reliable and transparent pathway toward clinically viable continuous BP staging, moving beyond black-box models toward physiologically grounded decision support. Ablation analysis reveals that engineered features provide most of the predictive power (F1 = 0.911), while raw PPG features alone achieve modest performance (F1 = 0.384). For the minority hypertension stage 2 (HT-2) class, a bootstrap 95% confidence interval of [0.762, 1.000] is reported, reflecting uncertainty due to limited sample size.</description>
	<pubDate>2026-03-27</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 209: Interpretable Photoplethysmography Feature Engineering for Multi-Class Blood Pressure Staging</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/4/209">doi: 10.3390/computers15040209</a></p>
	<p>Authors:
		Souhair Msokar
		Roman Davydov
		Vadim Davydov
		</p>
	<p>Hypertension is a leading global health risk and requires accurate and continuous monitoring for effective management. Although photoplethysmography (PPG) is a promising non-invasive modality for cuffless blood pressure (BP) assessment, many existing approaches (especially raw-signal deep learning) are vulnerable to data leakage, overfitting on small datasets, limited interpretability, and poor performance on minority BP stages. To address these limitations, we propose a robust and physiologically grounded framework for multi-class BP stage classification based on interpretable PPG features. Our approach centers on a comprehensive multi-domain feature engineering pipeline that extracts 124 PPG features, including demographic, morphological, functional decomposition, spectral, nonlinear dynamics, and clinical composite indices. We apply rigorous preprocessing and feature selection prior to model training. We validate the framework on two datasets: PPG-BP dataset (657 segments, 4 classes) for benchmarking and PulseDB (283,773 segments, 3 classes) to assess scalability. We evaluate the proposed framework using a segment-level train/test split, appropriate for assessing intra-subject BP tracking after initial personalization. For the PulseDB dataset, this follows the protocol established by the dataset creators, while for the PPG-BP dataset, it enables direct comparison with prior work given practical dataset constraints. On PPG-BP, LightGBM trained on the selected features achieved macro-F1 = 0.78 and accuracy = 0.74, outperforming comparable deep-learning models. On the PulseDB, a custom Residual MLP achieved accuracy = 0.81 and macro-F1 = 0.79, supporting generalization at scale. These results show that the proposed feature-based approach can outperform complex end-to-end deep-learning models on small datasets while providing improved interpretability. This work establishes a reliable and transparent pathway toward clinically viable continuous BP staging, moving beyond black-box models toward physiologically grounded decision support. Ablation analysis reveals that engineered features provide most of the predictive power (F1 = 0.911), while raw PPG features alone achieve modest performance (F1 = 0.384). For the minority hypertension stage 2 (HT-2) class, a bootstrap 95% confidence interval of [0.762, 1.000] is reported, reflecting uncertainty due to limited sample size.</p>
	]]></content:encoded>

	<dc:title>Interpretable Photoplethysmography Feature Engineering for Multi-Class Blood Pressure Staging</dc:title>
			<dc:creator>Souhair Msokar</dc:creator>
			<dc:creator>Roman Davydov</dc:creator>
			<dc:creator>Vadim Davydov</dc:creator>
		<dc:identifier>doi: 10.3390/computers15040209</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-03-27</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-03-27</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>209</prism:startingPage>
		<prism:doi>10.3390/computers15040209</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/4/209</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2073-431X/15/4/208">

	<title>Computers, Vol. 15, Pages 208: Context-Aware Decision Fusion for Multimodal Access Control Under Contradictory Biometric Evidence</title>
	<link>https://www.mdpi.com/2073-431X/15/4/208</link>
	<description>Access control systems rely increasingly on multimodal biometric and behavioral signals to enhance security and robustness against sophisticated attacks. However, when heterogeneous modalities provide conflicting evidence, such as valid biometric credentials accompanied by abnormal behavioral or acoustic patterns, traditional fusion strategies based on static thresholds or majority voting often fail, leading to false alarms or insecure authorization decisions. This paper addresses this critical limitation by proposing a contextual decision-making fusion framework designed to resolve conflicting multimodal evidence at the decision-making level. The proposed approach models access control as a decision-making problem in a context of uncertainty, where independent agents generate modality-specific evidence from authentication channels based on face, voice, and fingerprints. A centralized fusion mechanism integrates heterogeneous results using adaptive reliability weighting and contextual reasoning to resolve conflicts before operational decisions are made. Rather than treating each modality independently, the framework explicitly considers inconsistencies, uncertainties, and situational context when aggregating evidence. The framework is evaluated using public benchmarks, including VGGFace2, VoxCeleb2, and FVC2004, combined with controlled multimodal scenarios that induce conflicting evidence. Experimental results obtained under controlled contradiction scenarios show that the proposed fusion strategy reduces false alarms and improves decision consistency by approximately 18%. These results are interpreted within the scope of controlled multimodal simulations.</description>
	<pubDate>2026-03-27</pubDate>

	<content:encoded><![CDATA[
	<p><b>Computers, Vol. 15, Pages 208: Context-Aware Decision Fusion for Multimodal Access Control Under Contradictory Biometric Evidence</b></p>
	<p>Computers <a href="https://www.mdpi.com/2073-431X/15/4/208">doi: 10.3390/computers15040208</a></p>
	<p>Authors:
		Yasser Hmimou
		Azedine Khiat
		Hassna Bensag
		Zineb Hidila
		Mohamed Tabaa
		</p>
	<p>Access control systems rely increasingly on multimodal biometric and behavioral signals to enhance security and robustness against sophisticated attacks. However, when heterogeneous modalities provide conflicting evidence, such as valid biometric credentials accompanied by abnormal behavioral or acoustic patterns, traditional fusion strategies based on static thresholds or majority voting often fail, leading to false alarms or insecure authorization decisions. This paper addresses this critical limitation by proposing a contextual decision-making fusion framework designed to resolve conflicting multimodal evidence at the decision-making level. The proposed approach models access control as a decision-making problem in a context of uncertainty, where independent agents generate modality-specific evidence from authentication channels based on face, voice, and fingerprints. A centralized fusion mechanism integrates heterogeneous results using adaptive reliability weighting and contextual reasoning to resolve conflicts before operational decisions are made. Rather than treating each modality independently, the framework explicitly considers inconsistencies, uncertainties, and situational context when aggregating evidence. The framework is evaluated using public benchmarks, including VGGFace2, VoxCeleb2, and FVC2004, combined with controlled multimodal scenarios that induce conflicting evidence. Experimental results obtained under controlled contradiction scenarios show that the proposed fusion strategy reduces false alarms and improves decision consistency by approximately 18%. These results are interpreted within the scope of controlled multimodal simulations.</p>
	]]></content:encoded>

	<dc:title>Context-Aware Decision Fusion for Multimodal Access Control Under Contradictory Biometric Evidence</dc:title>
			<dc:creator>Yasser Hmimou</dc:creator>
			<dc:creator>Azedine Khiat</dc:creator>
			<dc:creator>Hassna Bensag</dc:creator>
			<dc:creator>Zineb Hidila</dc:creator>
			<dc:creator>Mohamed Tabaa</dc:creator>
		<dc:identifier>doi: 10.3390/computers15040208</dc:identifier>
	<dc:source>Computers</dc:source>
	<dc:date>2026-03-27</dc:date>

	<prism:publicationName>Computers</prism:publicationName>
	<prism:publicationDate>2026-03-27</prism:publicationDate>
	<prism:volume>15</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>208</prism:startingPage>
		<prism:doi>10.3390/computers15040208</prism:doi>
	<prism:url>https://www.mdpi.com/2073-431X/15/4/208</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
    
<cc:License rdf:about="https://creativecommons.org/licenses/by/4.0/">
	<cc:permits rdf:resource="https://creativecommons.org/ns#Reproduction" />
	<cc:permits rdf:resource="https://creativecommons.org/ns#Distribution" />
	<cc:permits rdf:resource="https://creativecommons.org/ns#DerivativeWorks" />
</cc:License>

</rdf:RDF>
