<?xml version="1.0" encoding="UTF-8"?>
<rdf:RDF xmlns="http://purl.org/rss/1.0/"
 xmlns:dc="http://purl.org/dc/elements/1.1/"
 xmlns:dcterms="http://purl.org/dc/terms/"
 xmlns:cc="http://web.resource.org/cc/"
 xmlns:prism="http://prismstandard.org/namespaces/basic/2.0/"
 xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
 xmlns:admin="http://webns.net/mvcb/"
 xmlns:content="http://purl.org/rss/1.0/modules/content/">
    <channel rdf:about="https://www.mdpi.com/rss/journal/informatics">
		<title>Informatics</title>
		<description>Latest open access articles published in Informatics at https://www.mdpi.com/journal/informatics</description>
		<link>https://www.mdpi.com/journal/informatics</link>
		<admin:generatorAgent rdf:resource="https://www.mdpi.com/journal/informatics"/>
		<admin:errorReportsTo rdf:resource="mailto:support@mdpi.com"/>
		<dc:publisher>MDPI</dc:publisher>
		<dc:language>en</dc:language>
		<dc:rights>Creative Commons Attribution (CC-BY)</dc:rights>
						<prism:copyright>MDPI</prism:copyright>
		<prism:rightsAgent>support@mdpi.com</prism:rightsAgent>
		<image rdf:resource="https://pub.mdpi-res.com/img/design/mdpi-pub-logo.png?13cf3b5bd783e021?1778581344"/>
				<items>
			<rdf:Seq>
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-9709/13/5/72" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-9709/13/5/71" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-9709/13/5/70" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-9709/13/5/69" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-9709/13/5/68" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-9709/13/5/67" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-9709/13/5/66" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-9709/13/5/65" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-9709/13/4/64" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-9709/13/4/63" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-9709/13/4/62" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-9709/13/4/61" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-9709/13/4/60" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-9709/13/4/59" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-9709/13/4/58" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-9709/13/4/57" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-9709/13/4/56" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-9709/13/4/55" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-9709/13/4/54" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-9709/13/4/53" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-9709/13/4/52" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-9709/13/4/51" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-9709/13/4/50" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-9709/13/4/49" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-9709/13/4/48" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-9709/13/4/47" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-9709/13/4/46" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-9709/13/3/45" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-9709/13/3/44" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-9709/13/3/43" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-9709/13/3/42" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-9709/13/3/41" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-9709/13/3/40" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-9709/13/3/39" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-9709/13/3/38" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-9709/13/3/37" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-9709/13/3/36" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-9709/13/3/35" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-9709/13/3/34" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-9709/13/2/33" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-9709/13/2/32" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-9709/13/2/31" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-9709/13/2/30" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-9709/13/2/29" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-9709/13/2/28" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-9709/13/2/27" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-9709/13/2/26" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-9709/13/2/25" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-9709/13/2/24" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-9709/13/2/23" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-9709/13/2/21" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-9709/13/2/22" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-9709/13/2/20" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-9709/13/2/19" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-9709/13/2/18" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-9709/13/1/17" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-9709/13/1/16" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-9709/13/1/15" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-9709/13/1/14" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-9709/13/1/13" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-9709/13/1/12" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-9709/13/1/11" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-9709/13/1/10" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-9709/13/1/9" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-9709/13/1/8" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-9709/13/1/7" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-9709/13/1/6" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-9709/13/1/5" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-9709/13/1/4" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-9709/13/1/3" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-9709/13/1/2" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-9709/13/1/1" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-9709/12/4/141" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-9709/12/4/140" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-9709/12/4/139" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-9709/12/4/138" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-9709/12/4/137" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-9709/12/4/136" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-9709/12/4/135" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-9709/12/4/134" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-9709/12/4/133" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-9709/12/4/132" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-9709/12/4/131" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-9709/12/4/130" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-9709/12/4/129" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-9709/12/4/128" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-9709/12/4/127" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-9709/12/4/126" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-9709/12/4/125" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-9709/12/4/124" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-9709/12/4/123" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-9709/12/4/122" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-9709/12/4/121" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-9709/12/4/120" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-9709/12/4/119" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-9709/12/4/118" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-9709/12/4/117" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-9709/12/4/116" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-9709/12/4/115" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2227-9709/12/4/114" />
                    	</rdf:Seq>
		</items>
				<cc:license rdf:resource="https://creativecommons.org/licenses/by/4.0/" />
	</channel>

        <item rdf:about="https://www.mdpi.com/2227-9709/13/5/72">

	<title>Informatics, Vol. 13, Pages 72: TERA: A Trade-Off Evaluation and Resource-Aware Framework for Spam and Phishing Email Detection</title>
	<link>https://www.mdpi.com/2227-9709/13/5/72</link>
	<description>Email spam and phishing detection is typically evaluated using accuracy-centric metrics under implicitly unconstrained computational settings. However, in practical deployment scenarios&amp;amp;mdash;particularly in real-time and resource-constrained environments&amp;amp;mdash;models with comparable predictive performance may differ substantially in inference latency and resource usage, directly affecting their operational feasibility. This paper introduces TERA, a deployment-aware evaluation framework that formulates model assessment as a constraint-aware decision problem. Instead of aggregating performance and efficiency into a single objective, TERA treats predictive performance as a feasibility requirement that defines an admissible set of models. Within this feasible region, operational factors such as latency and resource usage are used to differentiate among candidates through structured, multi-dimensional analysis. Experiments on benchmark email datasets show that multiple models achieve comparable detection performance, forming a region of predictive equivalence. Within this region, significant variations in latency and resource consumption are observed, indicating that predictive equivalence does not imply deployment equivalence. These findings demonstrate that accuracy-based evaluation alone may provide limited guidance for deployment-oriented model selection. By explicitly separating feasibility constraints from preference-based trade-offs, TERA enables transparent and deployment-aligned model evaluation. The framework supports consistent comparison and selection among accuracy-comparable models without altering the role of detection effectiveness as a primary requirement, thereby complementing existing evaluation practices with a structured decision-oriented perspective.</description>
	<pubDate>2026-05-12</pubDate>

	<content:encoded><![CDATA[
	<p><b>Informatics, Vol. 13, Pages 72: TERA: A Trade-Off Evaluation and Resource-Aware Framework for Spam and Phishing Email Detection</b></p>
	<p>Informatics <a href="https://www.mdpi.com/2227-9709/13/5/72">doi: 10.3390/informatics13050072</a></p>
	<p>Authors:
		Chanankorn Jandaeng
		Peeravit Koad
		Mohamad Fadli Zolkipli
		Jurairat Phuttharak
		</p>
	<p>Email spam and phishing detection is typically evaluated using accuracy-centric metrics under implicitly unconstrained computational settings. However, in practical deployment scenarios&amp;amp;mdash;particularly in real-time and resource-constrained environments&amp;amp;mdash;models with comparable predictive performance may differ substantially in inference latency and resource usage, directly affecting their operational feasibility. This paper introduces TERA, a deployment-aware evaluation framework that formulates model assessment as a constraint-aware decision problem. Instead of aggregating performance and efficiency into a single objective, TERA treats predictive performance as a feasibility requirement that defines an admissible set of models. Within this feasible region, operational factors such as latency and resource usage are used to differentiate among candidates through structured, multi-dimensional analysis. Experiments on benchmark email datasets show that multiple models achieve comparable detection performance, forming a region of predictive equivalence. Within this region, significant variations in latency and resource consumption are observed, indicating that predictive equivalence does not imply deployment equivalence. These findings demonstrate that accuracy-based evaluation alone may provide limited guidance for deployment-oriented model selection. By explicitly separating feasibility constraints from preference-based trade-offs, TERA enables transparent and deployment-aligned model evaluation. The framework supports consistent comparison and selection among accuracy-comparable models without altering the role of detection effectiveness as a primary requirement, thereby complementing existing evaluation practices with a structured decision-oriented perspective.</p>
	]]></content:encoded>

	<dc:title>TERA: A Trade-Off Evaluation and Resource-Aware Framework for Spam and Phishing Email Detection</dc:title>
			<dc:creator>Chanankorn Jandaeng</dc:creator>
			<dc:creator>Peeravit Koad</dc:creator>
			<dc:creator>Mohamad Fadli Zolkipli</dc:creator>
			<dc:creator>Jurairat Phuttharak</dc:creator>
		<dc:identifier>doi: 10.3390/informatics13050072</dc:identifier>
	<dc:source>Informatics</dc:source>
	<dc:date>2026-05-12</dc:date>

	<prism:publicationName>Informatics</prism:publicationName>
	<prism:publicationDate>2026-05-12</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>72</prism:startingPage>
		<prism:doi>10.3390/informatics13050072</prism:doi>
	<prism:url>https://www.mdpi.com/2227-9709/13/5/72</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-9709/13/5/71">

	<title>Informatics, Vol. 13, Pages 71: Enhancing the Efficiency of Blockchain Verification Through Resource-Weighted Node Selection</title>
	<link>https://www.mdpi.com/2227-9709/13/5/71</link>
	<description>Blockchain technology has emerged as a foundational paradigm for building decentralized, transparent, and secure systems, particularly in environments that operate without centralized authority. At the core of these systems are consensus mechanisms that ensure transaction validity and maintain trust among distributed participants. However, the efficiency of a blockchain network is strongly influenced by how verifier (or validator) nodes are selected, particularly in sharded architectures where transaction processing is distributed across multiple shards. A critical challenge in blockchain design is selecting appropriate nodes for transaction verification in a manner that is efficient, fair, and resilient to adversarial behavior, while also minimizing communication overhead. Existing approaches often rely primarily on resource availability or on the ability to create blocks, particularly in sharded blockchain architectures. Building on these ideas, this paper proposes a Resource Weighted&amp;amp;ndash;Block Score selection algorithm, which integrates a node&amp;amp;rsquo;s block score with its computational resource availability to guide verifier node selection. Simulation-based evaluation demonstrates that the proposed approach significantly reduces transaction verification latency and improves overall node utilization, thereby enhancing network performance and scalability in sharded blockchain systems.</description>
	<pubDate>2026-05-08</pubDate>

	<content:encoded><![CDATA[
	<p><b>Informatics, Vol. 13, Pages 71: Enhancing the Efficiency of Blockchain Verification Through Resource-Weighted Node Selection</b></p>
	<p>Informatics <a href="https://www.mdpi.com/2227-9709/13/5/71">doi: 10.3390/informatics13050071</a></p>
	<p>Authors:
		Vedika Jorika
		Nagaratna Medishetty
		</p>
	<p>Blockchain technology has emerged as a foundational paradigm for building decentralized, transparent, and secure systems, particularly in environments that operate without centralized authority. At the core of these systems are consensus mechanisms that ensure transaction validity and maintain trust among distributed participants. However, the efficiency of a blockchain network is strongly influenced by how verifier (or validator) nodes are selected, particularly in sharded architectures where transaction processing is distributed across multiple shards. A critical challenge in blockchain design is selecting appropriate nodes for transaction verification in a manner that is efficient, fair, and resilient to adversarial behavior, while also minimizing communication overhead. Existing approaches often rely primarily on resource availability or on the ability to create blocks, particularly in sharded blockchain architectures. Building on these ideas, this paper proposes a Resource Weighted&amp;amp;ndash;Block Score selection algorithm, which integrates a node&amp;amp;rsquo;s block score with its computational resource availability to guide verifier node selection. Simulation-based evaluation demonstrates that the proposed approach significantly reduces transaction verification latency and improves overall node utilization, thereby enhancing network performance and scalability in sharded blockchain systems.</p>
	]]></content:encoded>

	<dc:title>Enhancing the Efficiency of Blockchain Verification Through Resource-Weighted Node Selection</dc:title>
			<dc:creator>Vedika Jorika</dc:creator>
			<dc:creator>Nagaratna Medishetty</dc:creator>
		<dc:identifier>doi: 10.3390/informatics13050071</dc:identifier>
	<dc:source>Informatics</dc:source>
	<dc:date>2026-05-08</dc:date>

	<prism:publicationName>Informatics</prism:publicationName>
	<prism:publicationDate>2026-05-08</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>71</prism:startingPage>
		<prism:doi>10.3390/informatics13050071</prism:doi>
	<prism:url>https://www.mdpi.com/2227-9709/13/5/71</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-9709/13/5/70">

	<title>Informatics, Vol. 13, Pages 70: Cross-Lingual Transfer of Named Entity Markup with Large Language Models</title>
	<link>https://www.mdpi.com/2227-9709/13/5/70</link>
	<description>This paper investigates the problem of cross-lingual named entity recognition (NER), which involves automatically identifying entities such as persons, organizations, locations, and other structured elements in text. High-quality NER typically requires manually annotated corpora; however, for many low-resource languages, such data are scarce and costly to produce. The study addresses the following question: can annotated sentences in one language be used to transfer NER markup to their machine-translated counterparts in other languages? To explore this, we propose an approach based on a large language model (LLM) that performs two tasks simultaneously: translating a source sentence and generating BIOES-formatted entity tags for the translated output. To improve robustness and reduce semantic drift, a back-translation step is incorporated to verify meaning preservation by comparing the reconstructed source sentence with the original. The proposed method is compared with two baseline approaches: (1) annotation projection via machine translation and (2) automatic tagging using pre-existing NER tools. Performance is evaluated using standard metrics, including precision, recall, and F1-score. Experimental results demonstrate that the LLM-based approach provides a practical and efficient mechanism for transferring NER annotations across languages. While the method achieves strong and balanced performance, its quality remains influenced by translation accuracy and adherence to annotation constraints. Methodologically, the approach can be considered relatively language-independent, as it relies on general LLM capabilities, a universal tagging scheme, and multilingual semantic representations rather than language-specific model training.</description>
	<pubDate>2026-05-07</pubDate>

	<content:encoded><![CDATA[
	<p><b>Informatics, Vol. 13, Pages 70: Cross-Lingual Transfer of Named Entity Markup with Large Language Models</b></p>
	<p>Informatics <a href="https://www.mdpi.com/2227-9709/13/5/70">doi: 10.3390/informatics13050070</a></p>
	<p>Authors:
		Vladimir Barakhnin
		Rustam Mussabayev
		Davlatyor Mengliev
		Alexander Krassovitskiy
		Alymzhan Toleu
		Daniil Lyutaev
		Iskander Akhmetov
		Bahodir Ibragimov
		</p>
	<p>This paper investigates the problem of cross-lingual named entity recognition (NER), which involves automatically identifying entities such as persons, organizations, locations, and other structured elements in text. High-quality NER typically requires manually annotated corpora; however, for many low-resource languages, such data are scarce and costly to produce. The study addresses the following question: can annotated sentences in one language be used to transfer NER markup to their machine-translated counterparts in other languages? To explore this, we propose an approach based on a large language model (LLM) that performs two tasks simultaneously: translating a source sentence and generating BIOES-formatted entity tags for the translated output. To improve robustness and reduce semantic drift, a back-translation step is incorporated to verify meaning preservation by comparing the reconstructed source sentence with the original. The proposed method is compared with two baseline approaches: (1) annotation projection via machine translation and (2) automatic tagging using pre-existing NER tools. Performance is evaluated using standard metrics, including precision, recall, and F1-score. Experimental results demonstrate that the LLM-based approach provides a practical and efficient mechanism for transferring NER annotations across languages. While the method achieves strong and balanced performance, its quality remains influenced by translation accuracy and adherence to annotation constraints. Methodologically, the approach can be considered relatively language-independent, as it relies on general LLM capabilities, a universal tagging scheme, and multilingual semantic representations rather than language-specific model training.</p>
	]]></content:encoded>

	<dc:title>Cross-Lingual Transfer of Named Entity Markup with Large Language Models</dc:title>
			<dc:creator>Vladimir Barakhnin</dc:creator>
			<dc:creator>Rustam Mussabayev</dc:creator>
			<dc:creator>Davlatyor Mengliev</dc:creator>
			<dc:creator>Alexander Krassovitskiy</dc:creator>
			<dc:creator>Alymzhan Toleu</dc:creator>
			<dc:creator>Daniil Lyutaev</dc:creator>
			<dc:creator>Iskander Akhmetov</dc:creator>
			<dc:creator>Bahodir Ibragimov</dc:creator>
		<dc:identifier>doi: 10.3390/informatics13050070</dc:identifier>
	<dc:source>Informatics</dc:source>
	<dc:date>2026-05-07</dc:date>

	<prism:publicationName>Informatics</prism:publicationName>
	<prism:publicationDate>2026-05-07</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>70</prism:startingPage>
		<prism:doi>10.3390/informatics13050070</prism:doi>
	<prism:url>https://www.mdpi.com/2227-9709/13/5/70</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-9709/13/5/69">

	<title>Informatics, Vol. 13, Pages 69: A Lightweight Hybrid CNN&amp;ndash;CBAM Model for Multistage Acute Lymphoblastic Leukemia Classification from Peripheral Blood Smear Images</title>
	<link>https://www.mdpi.com/2227-9709/13/5/69</link>
	<description>Accurate and efficient classification of hematological malignancies from peripheral blood smear (PBS) images remains challenging due to the scarcity of annotated datasets, staining variability, and subtle morphological differences among blood cancer subtypes. To address these limitations, this study proposes an Advanced Lightweight Deep Learning (ALDL) framework for the multi-class classification of Acute Lymphoblastic Leukemia (ALL) across four clinically significant stages: Benign, Pro-B, Pre-B, and Early Pre-B. The framework integrates EfficientNetV2-S with Convolutional Block Attention Modules (CBAM) to enhance spatial and channel-wise feature refinement. At the same time, Focal Loss is employed to mitigate class imbalance by prioritizing hard-to-classify samples. A robust preprocessing pipeline, including CLAHE contrast enhancement, Reinhard stain normalization, and data augmentation, improves feature visibility and dataset generalization. Lesion segmentation is performed using RGB-based thresholding and watershed overlay, followed by lesion-level cropping to ensure consistency across inputs. Experimental evaluations on the ALL-DB dataset demonstrate the superior performance of the proposed method, achieving an average accuracy of 96.11%, an F1-score of 95.99%, and an AUC of 0.9875. Comparative analyses against MobileNetV3, ResNet50, DenseNet121, VGG16, and InceptionV3 confirm that the proposed segmentation-guided EfficientNetV2-S + CBAM + Focal Loss framework consistently outperforms conventional CNN architectures across both 70:30 and 60:40 train&amp;amp;ndash;test splits. Furthermore, a detailed investigation of color spaces (RGB, HSV, LAB, and HED) indicates that RGB yields the most reliable segmentation and classification results. At the same time, HED enhances lesion visualization at the expense of higher computational cost. The proposed ALDL framework demonstrates strong potential for real-world application as a computer-aided diagnostic (CAD) system for early leukemia detection, offering improved diagnostic reliability, reduced error rates, and practical scalability for clinical environments.</description>
	<pubDate>2026-04-30</pubDate>

	<content:encoded><![CDATA[
	<p><b>Informatics, Vol. 13, Pages 69: A Lightweight Hybrid CNN&amp;ndash;CBAM Model for Multistage Acute Lymphoblastic Leukemia Classification from Peripheral Blood Smear Images</b></p>
	<p>Informatics <a href="https://www.mdpi.com/2227-9709/13/5/69">doi: 10.3390/informatics13050069</a></p>
	<p>Authors:
		Kittipol Wisaeng
		</p>
	<p>Accurate and efficient classification of hematological malignancies from peripheral blood smear (PBS) images remains challenging due to the scarcity of annotated datasets, staining variability, and subtle morphological differences among blood cancer subtypes. To address these limitations, this study proposes an Advanced Lightweight Deep Learning (ALDL) framework for the multi-class classification of Acute Lymphoblastic Leukemia (ALL) across four clinically significant stages: Benign, Pro-B, Pre-B, and Early Pre-B. The framework integrates EfficientNetV2-S with Convolutional Block Attention Modules (CBAM) to enhance spatial and channel-wise feature refinement. At the same time, Focal Loss is employed to mitigate class imbalance by prioritizing hard-to-classify samples. A robust preprocessing pipeline, including CLAHE contrast enhancement, Reinhard stain normalization, and data augmentation, improves feature visibility and dataset generalization. Lesion segmentation is performed using RGB-based thresholding and watershed overlay, followed by lesion-level cropping to ensure consistency across inputs. Experimental evaluations on the ALL-DB dataset demonstrate the superior performance of the proposed method, achieving an average accuracy of 96.11%, an F1-score of 95.99%, and an AUC of 0.9875. Comparative analyses against MobileNetV3, ResNet50, DenseNet121, VGG16, and InceptionV3 confirm that the proposed segmentation-guided EfficientNetV2-S + CBAM + Focal Loss framework consistently outperforms conventional CNN architectures across both 70:30 and 60:40 train&amp;amp;ndash;test splits. Furthermore, a detailed investigation of color spaces (RGB, HSV, LAB, and HED) indicates that RGB yields the most reliable segmentation and classification results. At the same time, HED enhances lesion visualization at the expense of higher computational cost. The proposed ALDL framework demonstrates strong potential for real-world application as a computer-aided diagnostic (CAD) system for early leukemia detection, offering improved diagnostic reliability, reduced error rates, and practical scalability for clinical environments.</p>
	]]></content:encoded>

	<dc:title>A Lightweight Hybrid CNN&amp;amp;ndash;CBAM Model for Multistage Acute Lymphoblastic Leukemia Classification from Peripheral Blood Smear Images</dc:title>
			<dc:creator>Kittipol Wisaeng</dc:creator>
		<dc:identifier>doi: 10.3390/informatics13050069</dc:identifier>
	<dc:source>Informatics</dc:source>
	<dc:date>2026-04-30</dc:date>

	<prism:publicationName>Informatics</prism:publicationName>
	<prism:publicationDate>2026-04-30</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>69</prism:startingPage>
		<prism:doi>10.3390/informatics13050069</prism:doi>
	<prism:url>https://www.mdpi.com/2227-9709/13/5/69</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-9709/13/5/68">

	<title>Informatics, Vol. 13, Pages 68: Beverage Stain Classification Using Hyperspectral Imaging with an L-BFGS-B-Optimized Autoencoder and a Channel-Attention 1D CNN</title>
	<link>https://www.mdpi.com/2227-9709/13/5/68</link>
	<description>Hyperspectral imaging (HSI) provides rich spectral information and serves as a non-destructive technique for forensic stain analysis. Conventional approaches often exhibit degraded performance due to the high dimensionality and spectral redundancy inherent in hyperspectral data. To address this challenge, a hyperspectral dataset comprising nine beverage stains&amp;amp;mdash;papaya, coffee, pomegranate, orange, tea, wine, whisky, rum, and brandy&amp;amp;mdash;is developed. Building on this dataset, an ensemble framework that combines an optimized autoencoder (AE), channel-attention (CA)-enhanced one-dimensional convolutional neural networks (1D CNNs), and a Limited Memory Broyden&amp;amp;ndash;Fletcher&amp;amp;ndash;Goldfarb&amp;amp;ndash;Shanno (L-BFGS-B)-based weighted fusion strategy is proposed. The autoencoder learns compact latent representations from the 204-band hyperspectral vectors, reducing redundancy while preserving discriminative spectral features. CA emphasizes informative spectral bands and improves stain separability. Multiple 1D CNN models are trained using different latent dimensionalities, and their class probability outputs are fused through an optimized L-BFGS-B weighting scheme, where higher-performing models contribute more strongly to the final decision. Experimental results demonstrate classification accuracies of 96.54%, 97.19%, and 97.86% for the AE32 CA, AE64 CA, and AE128 CA models, respectively, with the optimized ensemble achieving an accuracy of 98.28%. Additionally, the time-dependent evolution of beverage stain reflectance is systematically analyzed using overlapped, normalized reflectance signatures acquired at time intervals of 0 min, 1 h, 2 h, 3 h, 4 h, and 5 h. The results confirm that AE-based latent compression, CA, and L-BFGS-B optimized ensemble fusion enhance hyperspectral beverage stain classification, providing an effective and extensible framework for forensic trace evidence analysis.</description>
	<pubDate>2026-04-28</pubDate>

	<content:encoded><![CDATA[
	<p><b>Informatics, Vol. 13, Pages 68: Beverage Stain Classification Using Hyperspectral Imaging with an L-BFGS-B-Optimized Autoencoder and a Channel-Attention 1D CNN</b></p>
	<p>Informatics <a href="https://www.mdpi.com/2227-9709/13/5/68">doi: 10.3390/informatics13050068</a></p>
	<p>Authors:
		Jitendra Shit
		Muzaffar Ahmad Dar
		Manikandan V M
		Partha Pratim Roy
		</p>
	<p>Hyperspectral imaging (HSI) provides rich spectral information and serves as a non-destructive technique for forensic stain analysis. Conventional approaches often exhibit degraded performance due to the high dimensionality and spectral redundancy inherent in hyperspectral data. To address this challenge, a hyperspectral dataset comprising nine beverage stains&amp;amp;mdash;papaya, coffee, pomegranate, orange, tea, wine, whisky, rum, and brandy&amp;amp;mdash;is developed. Building on this dataset, an ensemble framework that combines an optimized autoencoder (AE), channel-attention (CA)-enhanced one-dimensional convolutional neural networks (1D CNNs), and a Limited Memory Broyden&amp;amp;ndash;Fletcher&amp;amp;ndash;Goldfarb&amp;amp;ndash;Shanno (L-BFGS-B)-based weighted fusion strategy is proposed. The autoencoder learns compact latent representations from the 204-band hyperspectral vectors, reducing redundancy while preserving discriminative spectral features. CA emphasizes informative spectral bands and improves stain separability. Multiple 1D CNN models are trained using different latent dimensionalities, and their class probability outputs are fused through an optimized L-BFGS-B weighting scheme, where higher-performing models contribute more strongly to the final decision. Experimental results demonstrate classification accuracies of 96.54%, 97.19%, and 97.86% for the AE32 CA, AE64 CA, and AE128 CA models, respectively, with the optimized ensemble achieving an accuracy of 98.28%. Additionally, the time-dependent evolution of beverage stain reflectance is systematically analyzed using overlapped, normalized reflectance signatures acquired at time intervals of 0 min, 1 h, 2 h, 3 h, 4 h, and 5 h. The results confirm that AE-based latent compression, CA, and L-BFGS-B optimized ensemble fusion enhance hyperspectral beverage stain classification, providing an effective and extensible framework for forensic trace evidence analysis.</p>
	]]></content:encoded>

	<dc:title>Beverage Stain Classification Using Hyperspectral Imaging with an L-BFGS-B-Optimized Autoencoder and a Channel-Attention 1D CNN</dc:title>
			<dc:creator>Jitendra Shit</dc:creator>
			<dc:creator>Muzaffar Ahmad Dar</dc:creator>
			<dc:creator>Manikandan V M</dc:creator>
			<dc:creator>Partha Pratim Roy</dc:creator>
		<dc:identifier>doi: 10.3390/informatics13050068</dc:identifier>
	<dc:source>Informatics</dc:source>
	<dc:date>2026-04-28</dc:date>

	<prism:publicationName>Informatics</prism:publicationName>
	<prism:publicationDate>2026-04-28</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>68</prism:startingPage>
		<prism:doi>10.3390/informatics13050068</prism:doi>
	<prism:url>https://www.mdpi.com/2227-9709/13/5/68</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-9709/13/5/67">

	<title>Informatics, Vol. 13, Pages 67: The Evolution of Artificial Intelligence in Marketing: A Bibliometric Analysis of Three Decades (1992&amp;ndash;2025)</title>
	<link>https://www.mdpi.com/2227-9709/13/5/67</link>
	<description>Over the past three decades, artificial intelligence (AI) has substantially reshaped marketing research and practice, yet the discipline has not established a systematic understanding of its evolutionary trajectory and intellectual structure. A bibliometric analysis of 1923 Scopus publications (1992&amp;amp;ndash;2025) was conducted using CiteSpace to explore collaboration patterns, conceptual development, and thematic organization. It identified six evolutionary stages with accelerating innovation cycles, starting with neural networks (1992&amp;amp;ndash;2000) and ending with generative AI (2024&amp;amp;ndash;2025), with research attention per stage compressing from approximately 9 years to just 2 years. The analysis of the collaboration network shows that the key contributors are India, China, the USA, and the UK. Co-citation analysis indicates that there are three thematic dimensions with seven clusters, namely: (i) AI technological foundations and capabilities, (ii) AI marketing applications and transformation, and (iii) responsible AI governance and ethics. It suggests a Three-Force Evolutionary Framework, which combines technology-push, market-pull, and governance-moderator forces to describe the dynamics of the field. This framework shows that the Regulatory Awakening of 2018 (e.g., GDPR and the Cambridge Analytica incident) guided, not limited, innovation, and highlighted the critical personalization&amp;amp;ndash;privacy paradox on which modern developments are based. It identifies three priority research directions: generative AI in creative marketing, consumer trust in the personalization&amp;amp;ndash;privacy paradox, and organizational adaptation to fast innovation cycles. This study provides scholars with a comprehensive knowledge map, practitioners with strategic imperatives for responsible AI adoption, and policymakers with evidence that well-designed regulation accelerates innovation by balancing commercial value with societal concerns.</description>
	<pubDate>2026-04-27</pubDate>

	<content:encoded><![CDATA[
	<p><b>Informatics, Vol. 13, Pages 67: The Evolution of Artificial Intelligence in Marketing: A Bibliometric Analysis of Three Decades (1992&amp;ndash;2025)</b></p>
	<p>Informatics <a href="https://www.mdpi.com/2227-9709/13/5/67">doi: 10.3390/informatics13050067</a></p>
	<p>Authors:
		Weiming Wang
		Zijia Li
		</p>
	<p>Over the past three decades, artificial intelligence (AI) has substantially reshaped marketing research and practice, yet the discipline has not established a systematic understanding of its evolutionary trajectory and intellectual structure. A bibliometric analysis of 1923 Scopus publications (1992&amp;amp;ndash;2025) was conducted using CiteSpace to explore collaboration patterns, conceptual development, and thematic organization. It identified six evolutionary stages with accelerating innovation cycles, starting with neural networks (1992&amp;amp;ndash;2000) and ending with generative AI (2024&amp;amp;ndash;2025), with research attention per stage compressing from approximately 9 years to just 2 years. The analysis of the collaboration network shows that the key contributors are India, China, the USA, and the UK. Co-citation analysis indicates that there are three thematic dimensions with seven clusters, namely: (i) AI technological foundations and capabilities, (ii) AI marketing applications and transformation, and (iii) responsible AI governance and ethics. It suggests a Three-Force Evolutionary Framework, which combines technology-push, market-pull, and governance-moderator forces to describe the dynamics of the field. This framework shows that the Regulatory Awakening of 2018 (e.g., GDPR and the Cambridge Analytica incident) guided, not limited, innovation, and highlighted the critical personalization&amp;amp;ndash;privacy paradox on which modern developments are based. It identifies three priority research directions: generative AI in creative marketing, consumer trust in the personalization&amp;amp;ndash;privacy paradox, and organizational adaptation to fast innovation cycles. This study provides scholars with a comprehensive knowledge map, practitioners with strategic imperatives for responsible AI adoption, and policymakers with evidence that well-designed regulation accelerates innovation by balancing commercial value with societal concerns.</p>
	]]></content:encoded>

	<dc:title>The Evolution of Artificial Intelligence in Marketing: A Bibliometric Analysis of Three Decades (1992&amp;amp;ndash;2025)</dc:title>
			<dc:creator>Weiming Wang</dc:creator>
			<dc:creator>Zijia Li</dc:creator>
		<dc:identifier>doi: 10.3390/informatics13050067</dc:identifier>
	<dc:source>Informatics</dc:source>
	<dc:date>2026-04-27</dc:date>

	<prism:publicationName>Informatics</prism:publicationName>
	<prism:publicationDate>2026-04-27</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>67</prism:startingPage>
		<prism:doi>10.3390/informatics13050067</prism:doi>
	<prism:url>https://www.mdpi.com/2227-9709/13/5/67</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-9709/13/5/66">

	<title>Informatics, Vol. 13, Pages 66: Intelligent Question-Answering System for New Energy Vehicles Integrating Deep Semantic Parsing and Knowledge Graphs</title>
	<link>https://www.mdpi.com/2227-9709/13/5/66</link>
	<description>The new energy vehicle (NEV) industry generates massive multi-source heterogeneous data. To overcome traditional database limitations in terminology disambiguation and multi-hop reasoning, this paper proposes a knowledge graph (KG)-based question-answering (QA) architecture. Three primary domain challenges are addressed: First, to tackle the poor semantic extraction of informal diagnostic texts, a deep semantic parsing network (BERT-BiLSTM-CRF) is integrated to extract high-precision knowledge from 150,000 real-world maintenance records. Second, to solve topological redundancy, the Labeled Property Graph (LPG) specification is employed to encapsulate parameters of 2157 vehicle models as internal attributes, significantly streamlining complex multi-hop reasoning. Finally, to enhance limited reasoning capabilities, an intent classification module (TextCNN) automatically translates natural language into graph queries, enabling deep fault tracing across up to five semantic levels. Experimental results demonstrate 98% and 93% accuracy in entity-relation recognition and intent classification, respectively. The resulting KG (8274 nodes, 14,488 edges) establishes a scalable paradigm for intelligent diagnostic reasoning in complex vertical domains.</description>
	<pubDate>2026-04-24</pubDate>

	<content:encoded><![CDATA[
	<p><b>Informatics, Vol. 13, Pages 66: Intelligent Question-Answering System for New Energy Vehicles Integrating Deep Semantic Parsing and Knowledge Graphs</b></p>
	<p>Informatics <a href="https://www.mdpi.com/2227-9709/13/5/66">doi: 10.3390/informatics13050066</a></p>
	<p>Authors:
		Yaqi Wu
		Pengcheng Li
		Tong Geng
		Yi Wang
		Haiyu Zhang
		Shixiong Li
		</p>
	<p>The new energy vehicle (NEV) industry generates massive multi-source heterogeneous data. To overcome traditional database limitations in terminology disambiguation and multi-hop reasoning, this paper proposes a knowledge graph (KG)-based question-answering (QA) architecture. Three primary domain challenges are addressed: First, to tackle the poor semantic extraction of informal diagnostic texts, a deep semantic parsing network (BERT-BiLSTM-CRF) is integrated to extract high-precision knowledge from 150,000 real-world maintenance records. Second, to solve topological redundancy, the Labeled Property Graph (LPG) specification is employed to encapsulate parameters of 2157 vehicle models as internal attributes, significantly streamlining complex multi-hop reasoning. Finally, to enhance limited reasoning capabilities, an intent classification module (TextCNN) automatically translates natural language into graph queries, enabling deep fault tracing across up to five semantic levels. Experimental results demonstrate 98% and 93% accuracy in entity-relation recognition and intent classification, respectively. The resulting KG (8274 nodes, 14,488 edges) establishes a scalable paradigm for intelligent diagnostic reasoning in complex vertical domains.</p>
	]]></content:encoded>

	<dc:title>Intelligent Question-Answering System for New Energy Vehicles Integrating Deep Semantic Parsing and Knowledge Graphs</dc:title>
			<dc:creator>Yaqi Wu</dc:creator>
			<dc:creator>Pengcheng Li</dc:creator>
			<dc:creator>Tong Geng</dc:creator>
			<dc:creator>Yi Wang</dc:creator>
			<dc:creator>Haiyu Zhang</dc:creator>
			<dc:creator>Shixiong Li</dc:creator>
		<dc:identifier>doi: 10.3390/informatics13050066</dc:identifier>
	<dc:source>Informatics</dc:source>
	<dc:date>2026-04-24</dc:date>

	<prism:publicationName>Informatics</prism:publicationName>
	<prism:publicationDate>2026-04-24</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>66</prism:startingPage>
		<prism:doi>10.3390/informatics13050066</prism:doi>
	<prism:url>https://www.mdpi.com/2227-9709/13/5/66</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-9709/13/5/65">

	<title>Informatics, Vol. 13, Pages 65: The Relevance of Compound Events in Bee Traffic Monitoring</title>
	<link>https://www.mdpi.com/2227-9709/13/5/65</link>
	<description>Bees are essential pollinators for agricultural systems, making accurate, automated monitoring of their behavior critical for assessing colony health and ecosystem stability. Recent advances in computer vision and artificial intelligence have enabled large-scale bee traffic monitoring at hive entrances; however, most existing event classification methods focus exclusively on simple entrance and exit events. This simplification overlooks compound movements&amp;amp;mdash;such as U-turns and guarding behaviors&amp;amp;mdash;that represent a substantial portion of bee activity and can lead to inaccurate trajectory reconstruction and misleading behavioral interpretations. In this work, we systematically analyze existing event classification strategies used in automatic bee traffic monitoring, evaluating their performance on both simple and compound movements. We then propose extended classification methods that explicitly model compound events by incorporating bidirectional movement patterns derived from positional and angular cues. Using a manually annotated dataset of computer-vision-based hive entrance recordings, we compare threshold-based, displacement-based, and angle-based approaches under simple and mixed-event conditions. Our results demonstrate that compound events account for over one-third of all detected movements and that classification methods explicitly designed to handle bidirectional behavior substantially outperform traditional approaches in both accuracy and robustness. In particular, threshold-based bidirectional classification achieves near-perfect performance when full trajectories are available, while displacement-based methods provide a reliable alternative under partial observations. These findings highlight the importance of modeling compound behaviors in automated bee monitoring systems and contribute to more accurate flight reconstruction, behavioral analysis, and AI-driven decision support for precision agriculture and pollinator management.</description>
	<pubDate>2026-04-23</pubDate>

	<content:encoded><![CDATA[
	<p><b>Informatics, Vol. 13, Pages 65: The Relevance of Compound Events in Bee Traffic Monitoring</b></p>
	<p>Informatics <a href="https://www.mdpi.com/2227-9709/13/5/65">doi: 10.3390/informatics13050065</a></p>
	<p>Authors:
		Andrea Nieves-Rivera
		Marie Lluberes-Contreras
		Rémi Mégret
		</p>
	<p>Bees are essential pollinators for agricultural systems, making accurate, automated monitoring of their behavior critical for assessing colony health and ecosystem stability. Recent advances in computer vision and artificial intelligence have enabled large-scale bee traffic monitoring at hive entrances; however, most existing event classification methods focus exclusively on simple entrance and exit events. This simplification overlooks compound movements&amp;amp;mdash;such as U-turns and guarding behaviors&amp;amp;mdash;that represent a substantial portion of bee activity and can lead to inaccurate trajectory reconstruction and misleading behavioral interpretations. In this work, we systematically analyze existing event classification strategies used in automatic bee traffic monitoring, evaluating their performance on both simple and compound movements. We then propose extended classification methods that explicitly model compound events by incorporating bidirectional movement patterns derived from positional and angular cues. Using a manually annotated dataset of computer-vision-based hive entrance recordings, we compare threshold-based, displacement-based, and angle-based approaches under simple and mixed-event conditions. Our results demonstrate that compound events account for over one-third of all detected movements and that classification methods explicitly designed to handle bidirectional behavior substantially outperform traditional approaches in both accuracy and robustness. In particular, threshold-based bidirectional classification achieves near-perfect performance when full trajectories are available, while displacement-based methods provide a reliable alternative under partial observations. These findings highlight the importance of modeling compound behaviors in automated bee monitoring systems and contribute to more accurate flight reconstruction, behavioral analysis, and AI-driven decision support for precision agriculture and pollinator management.</p>
	]]></content:encoded>

	<dc:title>The Relevance of Compound Events in Bee Traffic Monitoring</dc:title>
			<dc:creator>Andrea Nieves-Rivera</dc:creator>
			<dc:creator>Marie Lluberes-Contreras</dc:creator>
			<dc:creator>Rémi Mégret</dc:creator>
		<dc:identifier>doi: 10.3390/informatics13050065</dc:identifier>
	<dc:source>Informatics</dc:source>
	<dc:date>2026-04-23</dc:date>

	<prism:publicationName>Informatics</prism:publicationName>
	<prism:publicationDate>2026-04-23</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>65</prism:startingPage>
		<prism:doi>10.3390/informatics13050065</prism:doi>
	<prism:url>https://www.mdpi.com/2227-9709/13/5/65</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-9709/13/4/64">

	<title>Informatics, Vol. 13, Pages 64: The Role of Human&amp;ndash;Computer Interaction in Shaping User Engagement with E-Commerce Applications</title>
	<link>https://www.mdpi.com/2227-9709/13/4/64</link>
	<description>This research aimed to determine the influence of human&amp;amp;ndash;computer interaction usability on the behavioral intention and self-reported continued usage intentions of e-commerce applications. Moreover, it investigated the moderating role of trust in the relationship between behavioral intention and self-reported continued usage intentions of e-commerce applications. The data were gathered from 398 Bahraini individuals using a convenience sampling approach and analyzed using SmartPLS 4. The results highlighted that human&amp;amp;ndash;computer interaction usability sub-characteristics, including appropriateness, recognizability, user interface esthetics, learnability, and operability, are significantly associated with behavioral intention toward e-commerce applications within this sample. Furthermore, the results reported that trust strengthens the influence of behavioral intention on self-reported continued usage intentions toward e-commerce applications. The research provides context-specific exploratory insights from a segment of the Bahraini e-commerce sector. Due to the study&amp;amp;rsquo;s non-probabilistic convenience sampling design, the cross-sectional nature of the data, and a sample predominantly composed of young, male, English-proficient respondents, the findings should be interpreted as exploratory rather than representative of the entire Bahraini population. In addition, the research findings helped e-commerce application developers and marketing experts within e-commerce companies develop efficient, operable, attractive, and learnable applications.</description>
	<pubDate>2026-04-20</pubDate>

	<content:encoded><![CDATA[
	<p><b>Informatics, Vol. 13, Pages 64: The Role of Human&amp;ndash;Computer Interaction in Shaping User Engagement with E-Commerce Applications</b></p>
	<p>Informatics <a href="https://www.mdpi.com/2227-9709/13/4/64">doi: 10.3390/informatics13040064</a></p>
	<p>Authors:
		Hasan Razzaqi
		Mahmood Akbar
		Jayendira P. Sankar
		T. Ramayah
		</p>
	<p>This research aimed to determine the influence of human&amp;amp;ndash;computer interaction usability on the behavioral intention and self-reported continued usage intentions of e-commerce applications. Moreover, it investigated the moderating role of trust in the relationship between behavioral intention and self-reported continued usage intentions of e-commerce applications. The data were gathered from 398 Bahraini individuals using a convenience sampling approach and analyzed using SmartPLS 4. The results highlighted that human&amp;amp;ndash;computer interaction usability sub-characteristics, including appropriateness, recognizability, user interface esthetics, learnability, and operability, are significantly associated with behavioral intention toward e-commerce applications within this sample. Furthermore, the results reported that trust strengthens the influence of behavioral intention on self-reported continued usage intentions toward e-commerce applications. The research provides context-specific exploratory insights from a segment of the Bahraini e-commerce sector. Due to the study&amp;amp;rsquo;s non-probabilistic convenience sampling design, the cross-sectional nature of the data, and a sample predominantly composed of young, male, English-proficient respondents, the findings should be interpreted as exploratory rather than representative of the entire Bahraini population. In addition, the research findings helped e-commerce application developers and marketing experts within e-commerce companies develop efficient, operable, attractive, and learnable applications.</p>
	]]></content:encoded>

	<dc:title>The Role of Human&amp;amp;ndash;Computer Interaction in Shaping User Engagement with E-Commerce Applications</dc:title>
			<dc:creator>Hasan Razzaqi</dc:creator>
			<dc:creator>Mahmood Akbar</dc:creator>
			<dc:creator>Jayendira P. Sankar</dc:creator>
			<dc:creator>T. Ramayah</dc:creator>
		<dc:identifier>doi: 10.3390/informatics13040064</dc:identifier>
	<dc:source>Informatics</dc:source>
	<dc:date>2026-04-20</dc:date>

	<prism:publicationName>Informatics</prism:publicationName>
	<prism:publicationDate>2026-04-20</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>64</prism:startingPage>
		<prism:doi>10.3390/informatics13040064</prism:doi>
	<prism:url>https://www.mdpi.com/2227-9709/13/4/64</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-9709/13/4/63">

	<title>Informatics, Vol. 13, Pages 63: SPARK_AI: A Prompt-Orchestrated Architecture for Stateful, Process-Oriented Reasoning with Large Language Models</title>
	<link>https://www.mdpi.com/2227-9709/13/4/63</link>
	<description>This paper presents SPARK_AI, a prompt-orchestrated system architecture for governing how large language models (LLMs) conduct structured and adaptive reasoning in human&amp;amp;ndash;AI interaction. The framework mitigates ad hoc LLM use by replacing direct answer generation with a process-oriented, step-by-step reasoning workflow. We focus on SPARK_AI_MATH, a domain module that supports learners in solving non-routine problem-solving tasks by operationalizing well-established problem-solving phases and guided questioning dialog strategies (Socratic-style prompts), with an optional tool-mediated visualization layer (e.g., GeoGebra). The module implements a five-phase conversational protocol consisting of problem interpretation, analysis of givens, planning, execution, and reflection, together with a controlled hint policy. This design is realized through a stateful system architecture in which each problem instance is maintained as an independent interaction track with a persistent reasoning state. User acceptance was evaluated by first-year mechanical engineering students (N = 108) using an expanded Technology Acceptance Model instrument, and the results were analyzed via PLS-SEM. The findings indicate overall favorable perceptions, with perceived usefulness and learning support emerging as key predictors of intention for continued use. Beyond this specific domain, the SPARK_AI framework enables efficient domain adaptation through localized prompt strategies while preserving a shared cognitive control layer for reasoning-centered human&amp;amp;ndash;LLM interaction.</description>
	<pubDate>2026-04-17</pubDate>

	<content:encoded><![CDATA[
	<p><b>Informatics, Vol. 13, Pages 63: SPARK_AI: A Prompt-Orchestrated Architecture for Stateful, Process-Oriented Reasoning with Large Language Models</b></p>
	<p>Informatics <a href="https://www.mdpi.com/2227-9709/13/4/63">doi: 10.3390/informatics13040063</a></p>
	<p>Authors:
		Marija Kaplar
		Sebastijan Kaplar
		Miloš Vučić
		Lidija Ivanović
		Aleksandra Stevanović
		Aleksandar Milenković
		Nemanja Vučićević
		</p>
	<p>This paper presents SPARK_AI, a prompt-orchestrated system architecture for governing how large language models (LLMs) conduct structured and adaptive reasoning in human&amp;amp;ndash;AI interaction. The framework mitigates ad hoc LLM use by replacing direct answer generation with a process-oriented, step-by-step reasoning workflow. We focus on SPARK_AI_MATH, a domain module that supports learners in solving non-routine problem-solving tasks by operationalizing well-established problem-solving phases and guided questioning dialog strategies (Socratic-style prompts), with an optional tool-mediated visualization layer (e.g., GeoGebra). The module implements a five-phase conversational protocol consisting of problem interpretation, analysis of givens, planning, execution, and reflection, together with a controlled hint policy. This design is realized through a stateful system architecture in which each problem instance is maintained as an independent interaction track with a persistent reasoning state. User acceptance was evaluated by first-year mechanical engineering students (N = 108) using an expanded Technology Acceptance Model instrument, and the results were analyzed via PLS-SEM. The findings indicate overall favorable perceptions, with perceived usefulness and learning support emerging as key predictors of intention for continued use. Beyond this specific domain, the SPARK_AI framework enables efficient domain adaptation through localized prompt strategies while preserving a shared cognitive control layer for reasoning-centered human&amp;amp;ndash;LLM interaction.</p>
	]]></content:encoded>

	<dc:title>SPARK_AI: A Prompt-Orchestrated Architecture for Stateful, Process-Oriented Reasoning with Large Language Models</dc:title>
			<dc:creator>Marija Kaplar</dc:creator>
			<dc:creator>Sebastijan Kaplar</dc:creator>
			<dc:creator>Miloš Vučić</dc:creator>
			<dc:creator>Lidija Ivanović</dc:creator>
			<dc:creator>Aleksandra Stevanović</dc:creator>
			<dc:creator>Aleksandar Milenković</dc:creator>
			<dc:creator>Nemanja Vučićević</dc:creator>
		<dc:identifier>doi: 10.3390/informatics13040063</dc:identifier>
	<dc:source>Informatics</dc:source>
	<dc:date>2026-04-17</dc:date>

	<prism:publicationName>Informatics</prism:publicationName>
	<prism:publicationDate>2026-04-17</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>63</prism:startingPage>
		<prism:doi>10.3390/informatics13040063</prism:doi>
	<prism:url>https://www.mdpi.com/2227-9709/13/4/63</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-9709/13/4/62">

	<title>Informatics, Vol. 13, Pages 62: Collaborative Multi-Agent Method for Zero-Shot LLM-Generated Text Detection</title>
	<link>https://www.mdpi.com/2227-9709/13/4/62</link>
	<description>With the rapid proliferation of large language models (LLMs), distinguishing machine-generated text from human-authored content has become increasingly critical for ensuring content authenticity, academic integrity, and trust in information systems. However, detecting text generated by LLMs remains a challenging problem, particularly in zero-shot settings where labeled data and domain-specific tuning are unavailable. To address this challenge, in this paper, we propose a novel Collaborative Multi-Agent Zero-Shot Detection framework (CMA-ZSD). In contrast to existing methods based on watermarking, statistical heuristics, or neural classifiers, our CMA-ZSD employs three functionally heterogeneous agents that perform differentiated perturbations of the input text. By jointly modeling semantic consistency, grammatical normalization, and feature-level reconstruction, our method captures intrinsic asymmetries between human-authored and LLM-generated text. A semantic similarity evaluation mechanism, combined with majority voting, enables robust and interpretable detection decisions that balance individual agent autonomy with collective consensus. Extensive experiments across 11 domains demonstrate the effectiveness of our method, with its zero-shot detection achieving accuracy comparable to domain-finetuned models in specific domains such as Finance and Reddit-dli5.</description>
	<pubDate>2026-04-16</pubDate>

	<content:encoded><![CDATA[
	<p><b>Informatics, Vol. 13, Pages 62: Collaborative Multi-Agent Method for Zero-Shot LLM-Generated Text Detection</b></p>
	<p>Informatics <a href="https://www.mdpi.com/2227-9709/13/4/62">doi: 10.3390/informatics13040062</a></p>
	<p>Authors:
		Gang Sun
		Bowen Li
		Ying Zhou
		Yi Zhu
		Jipeng Qiang
		</p>
	<p>With the rapid proliferation of large language models (LLMs), distinguishing machine-generated text from human-authored content has become increasingly critical for ensuring content authenticity, academic integrity, and trust in information systems. However, detecting text generated by LLMs remains a challenging problem, particularly in zero-shot settings where labeled data and domain-specific tuning are unavailable. To address this challenge, in this paper, we propose a novel Collaborative Multi-Agent Zero-Shot Detection framework (CMA-ZSD). In contrast to existing methods based on watermarking, statistical heuristics, or neural classifiers, our CMA-ZSD employs three functionally heterogeneous agents that perform differentiated perturbations of the input text. By jointly modeling semantic consistency, grammatical normalization, and feature-level reconstruction, our method captures intrinsic asymmetries between human-authored and LLM-generated text. A semantic similarity evaluation mechanism, combined with majority voting, enables robust and interpretable detection decisions that balance individual agent autonomy with collective consensus. Extensive experiments across 11 domains demonstrate the effectiveness of our method, with its zero-shot detection achieving accuracy comparable to domain-finetuned models in specific domains such as Finance and Reddit-dli5.</p>
	]]></content:encoded>

	<dc:title>Collaborative Multi-Agent Method for Zero-Shot LLM-Generated Text Detection</dc:title>
			<dc:creator>Gang Sun</dc:creator>
			<dc:creator>Bowen Li</dc:creator>
			<dc:creator>Ying Zhou</dc:creator>
			<dc:creator>Yi Zhu</dc:creator>
			<dc:creator>Jipeng Qiang</dc:creator>
		<dc:identifier>doi: 10.3390/informatics13040062</dc:identifier>
	<dc:source>Informatics</dc:source>
	<dc:date>2026-04-16</dc:date>

	<prism:publicationName>Informatics</prism:publicationName>
	<prism:publicationDate>2026-04-16</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>62</prism:startingPage>
		<prism:doi>10.3390/informatics13040062</prism:doi>
	<prism:url>https://www.mdpi.com/2227-9709/13/4/62</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-9709/13/4/61">

	<title>Informatics, Vol. 13, Pages 61: On the Implementations of the BiTemporal RDF Model: An Experimental Approach</title>
	<link>https://www.mdpi.com/2227-9709/13/4/61</link>
	<description>The BiTemporal RDF (BiTRDF) model extends the standard RDF data model by integrating both valid time and transaction time, thus enabling the representation and querying of dynamic and historical knowledge. While the theoretical foundations of BiTRDF have been established, practical implementation strategies have not yet been systematically studied. This paper bridges this gap by exploring six alternative approaches to implementing BiTRDF, combining object-oriented programming and database-oriented designs using Python and PostgreSQL. We evaluate these approaches using six synthetic datasets ranging from 0.5 million to 16 million bitemporal triples. The evaluation focuses on memory consumption, data-loading time, and query performance as data load increases. The results show that all approaches perform comparably when the knowledge store fits in memory. As the dataset size grows beyond available RAM, database-oriented implementations achieve substantially better loading and query performance, while object-oriented implementations offer greater flexibility and extensibility. These findings demonstrate the feasibility of implementing BiTRDF using existing technologies and provide practical guidance for selecting appropriate implementation strategies based on data size, performance requirements, and extensibility needs.</description>
	<pubDate>2026-04-15</pubDate>

	<content:encoded><![CDATA[
	<p><b>Informatics, Vol. 13, Pages 61: On the Implementations of the BiTemporal RDF Model: An Experimental Approach</b></p>
	<p>Informatics <a href="https://www.mdpi.com/2227-9709/13/4/61">doi: 10.3390/informatics13040061</a></p>
	<p>Authors:
		Di Wu
		Hsien-Tseng Wang
		Abdullah Uz Tansel
		</p>
	<p>The BiTemporal RDF (BiTRDF) model extends the standard RDF data model by integrating both valid time and transaction time, thus enabling the representation and querying of dynamic and historical knowledge. While the theoretical foundations of BiTRDF have been established, practical implementation strategies have not yet been systematically studied. This paper bridges this gap by exploring six alternative approaches to implementing BiTRDF, combining object-oriented programming and database-oriented designs using Python and PostgreSQL. We evaluate these approaches using six synthetic datasets ranging from 0.5 million to 16 million bitemporal triples. The evaluation focuses on memory consumption, data-loading time, and query performance as data load increases. The results show that all approaches perform comparably when the knowledge store fits in memory. As the dataset size grows beyond available RAM, database-oriented implementations achieve substantially better loading and query performance, while object-oriented implementations offer greater flexibility and extensibility. These findings demonstrate the feasibility of implementing BiTRDF using existing technologies and provide practical guidance for selecting appropriate implementation strategies based on data size, performance requirements, and extensibility needs.</p>
	]]></content:encoded>

	<dc:title>On the Implementations of the BiTemporal RDF Model: An Experimental Approach</dc:title>
			<dc:creator>Di Wu</dc:creator>
			<dc:creator>Hsien-Tseng Wang</dc:creator>
			<dc:creator>Abdullah Uz Tansel</dc:creator>
		<dc:identifier>doi: 10.3390/informatics13040061</dc:identifier>
	<dc:source>Informatics</dc:source>
	<dc:date>2026-04-15</dc:date>

	<prism:publicationName>Informatics</prism:publicationName>
	<prism:publicationDate>2026-04-15</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>61</prism:startingPage>
		<prism:doi>10.3390/informatics13040061</prism:doi>
	<prism:url>https://www.mdpi.com/2227-9709/13/4/61</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-9709/13/4/60">

	<title>Informatics, Vol. 13, Pages 60: GRU-Based Beam Pattern Synthesis for Optimized Uniform Linear Antenna Arrays</title>
	<link>https://www.mdpi.com/2227-9709/13/4/60</link>
	<description>This study presents a deep learning-based framework for beam pattern synthesis in optimized uniform linear antenna arrays, combining Differential Evolution&amp;amp;ndash;based pre-optimization with recurrent neural network (RNN) modeling. Radiation patterns are first generated to satisfy sidelobe suppression and directivity constraints and are then used to train recurrent models that learn the mapping between radiation patterns and complex excitation parameters. A formal mathematical formulation of the Simple RNN, Gated Recurrent Unit (GRU), and Long Short-Term Memory (LSTM) architectures is provided, together with a per&amp;amp;ndash;time-step computational cost analysis based on dominant matrix&amp;amp;ndash;vector multiplications. A comparative evaluation under identical training conditions shows that gated architectures significantly outperform the standard RNN. Although the LSTM achieves the lowest prediction errors, the GRU attains comparable performance with reduced structural complexity. Beam pattern synthesis experiments for unseen steering directions demonstrate accurate reconstruction of main lobe alignment, sidelobe levels (approximately &amp;amp;minus;12 to &amp;amp;minus;13 dB), and directivity values close to 8 dB. The floating-point operations (FLOPs) analysis indicates that the GRU requires fewer dominant operations per time step than the LSTM, potentially reducing computational cost and energy consumption in resource-constrained beamforming applications.</description>
	<pubDate>2026-04-14</pubDate>

	<content:encoded><![CDATA[
	<p><b>Informatics, Vol. 13, Pages 60: GRU-Based Beam Pattern Synthesis for Optimized Uniform Linear Antenna Arrays</b></p>
	<p>Informatics <a href="https://www.mdpi.com/2227-9709/13/4/60">doi: 10.3390/informatics13040060</a></p>
	<p>Authors:
		Armando Arce
		Fernando Arce
		Enrique Stevens-Navarro
		Ulises Pineda-Rico
		Mohammad Reza Rahmati
		Abel García-Barrientos
		</p>
	<p>This study presents a deep learning-based framework for beam pattern synthesis in optimized uniform linear antenna arrays, combining Differential Evolution&amp;amp;ndash;based pre-optimization with recurrent neural network (RNN) modeling. Radiation patterns are first generated to satisfy sidelobe suppression and directivity constraints and are then used to train recurrent models that learn the mapping between radiation patterns and complex excitation parameters. A formal mathematical formulation of the Simple RNN, Gated Recurrent Unit (GRU), and Long Short-Term Memory (LSTM) architectures is provided, together with a per&amp;amp;ndash;time-step computational cost analysis based on dominant matrix&amp;amp;ndash;vector multiplications. A comparative evaluation under identical training conditions shows that gated architectures significantly outperform the standard RNN. Although the LSTM achieves the lowest prediction errors, the GRU attains comparable performance with reduced structural complexity. Beam pattern synthesis experiments for unseen steering directions demonstrate accurate reconstruction of main lobe alignment, sidelobe levels (approximately &amp;amp;minus;12 to &amp;amp;minus;13 dB), and directivity values close to 8 dB. The floating-point operations (FLOPs) analysis indicates that the GRU requires fewer dominant operations per time step than the LSTM, potentially reducing computational cost and energy consumption in resource-constrained beamforming applications.</p>
	]]></content:encoded>

	<dc:title>GRU-Based Beam Pattern Synthesis for Optimized Uniform Linear Antenna Arrays</dc:title>
			<dc:creator>Armando Arce</dc:creator>
			<dc:creator>Fernando Arce</dc:creator>
			<dc:creator>Enrique Stevens-Navarro</dc:creator>
			<dc:creator>Ulises Pineda-Rico</dc:creator>
			<dc:creator>Mohammad Reza Rahmati</dc:creator>
			<dc:creator>Abel García-Barrientos</dc:creator>
		<dc:identifier>doi: 10.3390/informatics13040060</dc:identifier>
	<dc:source>Informatics</dc:source>
	<dc:date>2026-04-14</dc:date>

	<prism:publicationName>Informatics</prism:publicationName>
	<prism:publicationDate>2026-04-14</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>60</prism:startingPage>
		<prism:doi>10.3390/informatics13040060</prism:doi>
	<prism:url>https://www.mdpi.com/2227-9709/13/4/60</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-9709/13/4/59">

	<title>Informatics, Vol. 13, Pages 59: Enabling Inclusive Access to Restricted Sacred Spaces: A Real-World Comparison of VR360 and AI-Driven Virtual Reality</title>
	<link>https://www.mdpi.com/2227-9709/13/4/59</link>
	<description>This study investigates how virtual reality systems can support inclusive access to culturally restricted sacred heritage sites. Two extended reality (XR) approaches were developed and deployed in a real-world setting: a VR360 virtual tour and an AI-driven immersive virtual reality prototype with conversational interaction. A research-in-the-wild, between-subjects study was conducted with 136 participants using mixed methods, including standardized questionnaires (System Usability Scale, User Engagement Scale, and Igroup Presence Questionnaire), retrospective interviews, and exhibition staff observations. The results reveal clear trade-offs between the two systems. The VR360 system demonstrated higher usability and operational reliability, requiring minimal supervision and technical resources, whereas the AI-driven immersive VR system supported embodied exploration and conversational inquiry, which was associated with higher spatial presence and helped visitors address questions during exploration. Qualitative findings further indicate that conversational interaction enhanced user experience but also introduced greater technical complexity and staffing requirements. Overall, the study provides empirical insights for designing and deploying XR systems in heritage contexts and highlights how different levels of immersion and interaction influence usability, presence, and operational feasibility when supporting inclusive access to culturally restricted sites.</description>
	<pubDate>2026-04-09</pubDate>

	<content:encoded><![CDATA[
	<p><b>Informatics, Vol. 13, Pages 59: Enabling Inclusive Access to Restricted Sacred Spaces: A Real-World Comparison of VR360 and AI-Driven Virtual Reality</b></p>
	<p>Informatics <a href="https://www.mdpi.com/2227-9709/13/4/59">doi: 10.3390/informatics13040059</a></p>
	<p>Authors:
		Phimphakan Thongthip
		Darin Poollapalin
		Songpon Khanchai
		Pakinee Ariya
		Phichete Julrode
		</p>
	<p>This study investigates how virtual reality systems can support inclusive access to culturally restricted sacred heritage sites. Two extended reality (XR) approaches were developed and deployed in a real-world setting: a VR360 virtual tour and an AI-driven immersive virtual reality prototype with conversational interaction. A research-in-the-wild, between-subjects study was conducted with 136 participants using mixed methods, including standardized questionnaires (System Usability Scale, User Engagement Scale, and Igroup Presence Questionnaire), retrospective interviews, and exhibition staff observations. The results reveal clear trade-offs between the two systems. The VR360 system demonstrated higher usability and operational reliability, requiring minimal supervision and technical resources, whereas the AI-driven immersive VR system supported embodied exploration and conversational inquiry, which was associated with higher spatial presence and helped visitors address questions during exploration. Qualitative findings further indicate that conversational interaction enhanced user experience but also introduced greater technical complexity and staffing requirements. Overall, the study provides empirical insights for designing and deploying XR systems in heritage contexts and highlights how different levels of immersion and interaction influence usability, presence, and operational feasibility when supporting inclusive access to culturally restricted sites.</p>
	]]></content:encoded>

	<dc:title>Enabling Inclusive Access to Restricted Sacred Spaces: A Real-World Comparison of VR360 and AI-Driven Virtual Reality</dc:title>
			<dc:creator>Phimphakan Thongthip</dc:creator>
			<dc:creator>Darin Poollapalin</dc:creator>
			<dc:creator>Songpon Khanchai</dc:creator>
			<dc:creator>Pakinee Ariya</dc:creator>
			<dc:creator>Phichete Julrode</dc:creator>
		<dc:identifier>doi: 10.3390/informatics13040059</dc:identifier>
	<dc:source>Informatics</dc:source>
	<dc:date>2026-04-09</dc:date>

	<prism:publicationName>Informatics</prism:publicationName>
	<prism:publicationDate>2026-04-09</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>59</prism:startingPage>
		<prism:doi>10.3390/informatics13040059</prism:doi>
	<prism:url>https://www.mdpi.com/2227-9709/13/4/59</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-9709/13/4/58">

	<title>Informatics, Vol. 13, Pages 58: Augmented Reality as a Tool for 5G Learning: Interactive Visualization of NSA/SA Architectures and Network Components</title>
	<link>https://www.mdpi.com/2227-9709/13/4/58</link>
	<description>The rapid advancement of digital and mobile technologies has reshaped the educational landscape, fostering the adoption of interactive and learner-centered methodologies. Among these, immersive technologies such as Augmented Reality (AR), when coupled with next-generation wireless communication systems, hold the potential to revolutionize knowledge acquisition and student engagement. In this paper, we present the design and development of an AR-based educational tool specifically oriented to teaching concepts of fifth-generation (5G) mobile networks. The tool provides a real-time interactive visualization of 3D network components on mobile devices, enabling learners to explore 5G NSA/SA architectures in an accessible manner with real-world environments through mobile devices and their integrated cameras. The application was developed using Blender for 3D modeling and Unity as the rendering engine, incorporating the Vuforia SDK for marker-based AR tracking, and it was deployed on the Android operating system. Unlike traditional static approaches, the proposed solution enables learners to explore complex network architectures and key functionalities of 5G in an interactive and accessible manner. To assess its perceived effectiveness, quantitative surveys were conducted with both university and high school students, focusing on usability, engagement, and perceived learning outcomes. Results indicate that the tool is user-friendly, enhances motivation, and supports conceptual understanding as perceived by participants of 5G technologies. These findings highlight the potential of AR, supported by advanced wireless networks, as a pedagogical strategy to improve STEM education and foster technological literacy in the era of digital transformation.</description>
	<pubDate>2026-04-03</pubDate>

	<content:encoded><![CDATA[
	<p><b>Informatics, Vol. 13, Pages 58: Augmented Reality as a Tool for 5G Learning: Interactive Visualization of NSA/SA Architectures and Network Components</b></p>
	<p>Informatics <a href="https://www.mdpi.com/2227-9709/13/4/58">doi: 10.3390/informatics13040058</a></p>
	<p>Authors:
		Nathaly Orozco Garzón
		David Herrera
		Angel Gomez
		Pablo Plaza
		Henry Carvajal Mora
		Roberto Sánchez Albán
		José Vega-Sánchez
		Paola Vinueza-Naranjo
		</p>
	<p>The rapid advancement of digital and mobile technologies has reshaped the educational landscape, fostering the adoption of interactive and learner-centered methodologies. Among these, immersive technologies such as Augmented Reality (AR), when coupled with next-generation wireless communication systems, hold the potential to revolutionize knowledge acquisition and student engagement. In this paper, we present the design and development of an AR-based educational tool specifically oriented to teaching concepts of fifth-generation (5G) mobile networks. The tool provides a real-time interactive visualization of 3D network components on mobile devices, enabling learners to explore 5G NSA/SA architectures in an accessible manner with real-world environments through mobile devices and their integrated cameras. The application was developed using Blender for 3D modeling and Unity as the rendering engine, incorporating the Vuforia SDK for marker-based AR tracking, and it was deployed on the Android operating system. Unlike traditional static approaches, the proposed solution enables learners to explore complex network architectures and key functionalities of 5G in an interactive and accessible manner. To assess its perceived effectiveness, quantitative surveys were conducted with both university and high school students, focusing on usability, engagement, and perceived learning outcomes. Results indicate that the tool is user-friendly, enhances motivation, and supports conceptual understanding as perceived by participants of 5G technologies. These findings highlight the potential of AR, supported by advanced wireless networks, as a pedagogical strategy to improve STEM education and foster technological literacy in the era of digital transformation.</p>
	]]></content:encoded>

	<dc:title>Augmented Reality as a Tool for 5G Learning: Interactive Visualization of NSA/SA Architectures and Network Components</dc:title>
			<dc:creator>Nathaly Orozco Garzón</dc:creator>
			<dc:creator>David Herrera</dc:creator>
			<dc:creator>Angel Gomez</dc:creator>
			<dc:creator>Pablo Plaza</dc:creator>
			<dc:creator>Henry Carvajal Mora</dc:creator>
			<dc:creator>Roberto Sánchez Albán</dc:creator>
			<dc:creator>José Vega-Sánchez</dc:creator>
			<dc:creator>Paola Vinueza-Naranjo</dc:creator>
		<dc:identifier>doi: 10.3390/informatics13040058</dc:identifier>
	<dc:source>Informatics</dc:source>
	<dc:date>2026-04-03</dc:date>

	<prism:publicationName>Informatics</prism:publicationName>
	<prism:publicationDate>2026-04-03</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>58</prism:startingPage>
		<prism:doi>10.3390/informatics13040058</prism:doi>
	<prism:url>https://www.mdpi.com/2227-9709/13/4/58</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-9709/13/4/57">

	<title>Informatics, Vol. 13, Pages 57: A Multimodal Vision: Language Framework for Intelligent Detection and Semantic Interpretation of Urban Waste</title>
	<link>https://www.mdpi.com/2227-9709/13/4/57</link>
	<description>Urban waste management remains a significant challenge for achieving environmental sustainability and advancing smart city infrastructures. This study proposes a multimodal vision&amp;amp;ndash;language framework that integrates real-time object detection with automated semantic interpretation and structured semantic analysis for intelligent urban waste monitoring. A custom dataset including 2247 manually annotated images was constructed from publicly available sources (TrashNet and TACO), enabling robust multi-class detection across six waste categories. Two state-of-the-art object detection models, YOLOv8m and YOLOv10m, were trained and evaluated using a fixed 70/15/15 train&amp;amp;ndash;validation&amp;amp;ndash;test split. Under this configuration, YOLOv8m achieved a mAP@50 of 90.5% and a mAP@50&amp;amp;ndash;95 of 87.1%, slightly outperforming YOLOv10m (89.5% and 86.0%, respectively). Moreover, YOLOv8m demonstrated superior inference efficiency, reaching 120 FPS compared to 105 FPS for YOLOv10m. To obtain a more reliable estimate of performance stability across data partitions, stratified 5-Fold Cross-Validation was conducted. YOLOv8m achieved an average Precision of 0.9324 and an average mAP@50&amp;amp;ndash;95 of 0.9315 &amp;amp;plusmn; 0.0575 across folds, suggesting generally stable performance across data partitions, while also revealing variability associated with dataset heterogeneity. Beyond object detection, the framework integrates MiniGPT-4 to generate context-aware textual descriptions of detected waste items, thereby enhancing semantic interpretability and user engagement. Furthermore, GPT-5 Vision is incorporated as a structured auxiliary semantic classification and category-suggestion module that analyzes object crops and multi-class scenes, producing constrained JSON-formatted outputs that include category labels, concise descriptions, and recyclability indicators. Overall, the proposed YOLOv8&amp;amp;ndash;MiniGPT-4&amp;amp;ndash;GPT-5 Vision pipeline shows that combining accurate real-time detection with multimodal semantic reasoning can improve interpretability and support interactive, semantically enriched waste analysis in smart-city and environmental monitoring scenarios.</description>
	<pubDate>2026-04-03</pubDate>

	<content:encoded><![CDATA[
	<p><b>Informatics, Vol. 13, Pages 57: A Multimodal Vision: Language Framework for Intelligent Detection and Semantic Interpretation of Urban Waste</b></p>
	<p>Informatics <a href="https://www.mdpi.com/2227-9709/13/4/57">doi: 10.3390/informatics13040057</a></p>
	<p>Authors:
		Verda Misimi Jonuzi
		Igor Mishkovski
		</p>
	<p>Urban waste management remains a significant challenge for achieving environmental sustainability and advancing smart city infrastructures. This study proposes a multimodal vision&amp;amp;ndash;language framework that integrates real-time object detection with automated semantic interpretation and structured semantic analysis for intelligent urban waste monitoring. A custom dataset including 2247 manually annotated images was constructed from publicly available sources (TrashNet and TACO), enabling robust multi-class detection across six waste categories. Two state-of-the-art object detection models, YOLOv8m and YOLOv10m, were trained and evaluated using a fixed 70/15/15 train&amp;amp;ndash;validation&amp;amp;ndash;test split. Under this configuration, YOLOv8m achieved a mAP@50 of 90.5% and a mAP@50&amp;amp;ndash;95 of 87.1%, slightly outperforming YOLOv10m (89.5% and 86.0%, respectively). Moreover, YOLOv8m demonstrated superior inference efficiency, reaching 120 FPS compared to 105 FPS for YOLOv10m. To obtain a more reliable estimate of performance stability across data partitions, stratified 5-Fold Cross-Validation was conducted. YOLOv8m achieved an average Precision of 0.9324 and an average mAP@50&amp;amp;ndash;95 of 0.9315 &amp;amp;plusmn; 0.0575 across folds, suggesting generally stable performance across data partitions, while also revealing variability associated with dataset heterogeneity. Beyond object detection, the framework integrates MiniGPT-4 to generate context-aware textual descriptions of detected waste items, thereby enhancing semantic interpretability and user engagement. Furthermore, GPT-5 Vision is incorporated as a structured auxiliary semantic classification and category-suggestion module that analyzes object crops and multi-class scenes, producing constrained JSON-formatted outputs that include category labels, concise descriptions, and recyclability indicators. Overall, the proposed YOLOv8&amp;amp;ndash;MiniGPT-4&amp;amp;ndash;GPT-5 Vision pipeline shows that combining accurate real-time detection with multimodal semantic reasoning can improve interpretability and support interactive, semantically enriched waste analysis in smart-city and environmental monitoring scenarios.</p>
	]]></content:encoded>

	<dc:title>A Multimodal Vision: Language Framework for Intelligent Detection and Semantic Interpretation of Urban Waste</dc:title>
			<dc:creator>Verda Misimi Jonuzi</dc:creator>
			<dc:creator>Igor Mishkovski</dc:creator>
		<dc:identifier>doi: 10.3390/informatics13040057</dc:identifier>
	<dc:source>Informatics</dc:source>
	<dc:date>2026-04-03</dc:date>

	<prism:publicationName>Informatics</prism:publicationName>
	<prism:publicationDate>2026-04-03</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>57</prism:startingPage>
		<prism:doi>10.3390/informatics13040057</prism:doi>
	<prism:url>https://www.mdpi.com/2227-9709/13/4/57</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-9709/13/4/56">

	<title>Informatics, Vol. 13, Pages 56: Assessment Validity in the Age of Generative AI: A Natural Experiment</title>
	<link>https://www.mdpi.com/2227-9709/13/4/56</link>
	<description>Universities play a dual role as sites of learning and as institutions that certify student competence through assessment. The rapid diffusion of generative artificial intelligence (GenAI) challenges this certification function by altering the conditions under which assessment evidence is produced. When powerful AI tools are widely available, grades may increasingly reflect a combination of individual understanding and external cognitive support rather than solely independent competence. This study examines how changes in assessment format interact with GenAI availability to reshape observable performance outcomes in higher education. Using exam grade data from a compulsory undergraduate course delivered over five years (2021&amp;amp;ndash;2025; N = 1066), the study exploits a naturally occurring change in assessment conditions as a natural experiment. From 2021 to 2024, the course was assessed using an AI-permissive take-home examination, while in 2025 the assessment shifted to an AI-restricted, supervised in-person examination. Course content, intended learning outcomes, grading criteria, examiner continuity, and the structural design of the examination tasks remained stable across cohorts. The results reveal a pronounced shift in grade distributions coinciding with the format change. Failure rates increased sharply in 2025, mid-range grades declined, and the proportion of top grades remained largely unchanged. Statistical analysis indicates a significant association between examination period and grade outcomes (&amp;amp;chi;2(5, N = 1066) = 60.62, p &amp;amp;lt; 0.001), with a small-to-moderate effect size (Cram&amp;amp;eacute;r&amp;amp;rsquo;s V = 0.24), driven primarily by the increase in failing grades. These findings suggest that AI-permissive and AI-restricted assessment formats may not be measurement-equivalent under conditions of widespread GenAI use. The results raise concerns about construct validity and the credibility of grades as signals of independent competence, while also highlighting tensions between certification credibility and assessment authenticity.</description>
	<pubDate>2026-04-03</pubDate>

	<content:encoded><![CDATA[
	<p><b>Informatics, Vol. 13, Pages 56: Assessment Validity in the Age of Generative AI: A Natural Experiment</b></p>
	<p>Informatics <a href="https://www.mdpi.com/2227-9709/13/4/56">doi: 10.3390/informatics13040056</a></p>
	<p>Authors:
		Håvar Brattli
		Alexander Utne
		Matthew Lynch
		</p>
	<p>Universities play a dual role as sites of learning and as institutions that certify student competence through assessment. The rapid diffusion of generative artificial intelligence (GenAI) challenges this certification function by altering the conditions under which assessment evidence is produced. When powerful AI tools are widely available, grades may increasingly reflect a combination of individual understanding and external cognitive support rather than solely independent competence. This study examines how changes in assessment format interact with GenAI availability to reshape observable performance outcomes in higher education. Using exam grade data from a compulsory undergraduate course delivered over five years (2021&amp;amp;ndash;2025; N = 1066), the study exploits a naturally occurring change in assessment conditions as a natural experiment. From 2021 to 2024, the course was assessed using an AI-permissive take-home examination, while in 2025 the assessment shifted to an AI-restricted, supervised in-person examination. Course content, intended learning outcomes, grading criteria, examiner continuity, and the structural design of the examination tasks remained stable across cohorts. The results reveal a pronounced shift in grade distributions coinciding with the format change. Failure rates increased sharply in 2025, mid-range grades declined, and the proportion of top grades remained largely unchanged. Statistical analysis indicates a significant association between examination period and grade outcomes (&amp;amp;chi;2(5, N = 1066) = 60.62, p &amp;amp;lt; 0.001), with a small-to-moderate effect size (Cram&amp;amp;eacute;r&amp;amp;rsquo;s V = 0.24), driven primarily by the increase in failing grades. These findings suggest that AI-permissive and AI-restricted assessment formats may not be measurement-equivalent under conditions of widespread GenAI use. The results raise concerns about construct validity and the credibility of grades as signals of independent competence, while also highlighting tensions between certification credibility and assessment authenticity.</p>
	]]></content:encoded>

	<dc:title>Assessment Validity in the Age of Generative AI: A Natural Experiment</dc:title>
			<dc:creator>Håvar Brattli</dc:creator>
			<dc:creator>Alexander Utne</dc:creator>
			<dc:creator>Matthew Lynch</dc:creator>
		<dc:identifier>doi: 10.3390/informatics13040056</dc:identifier>
	<dc:source>Informatics</dc:source>
	<dc:date>2026-04-03</dc:date>

	<prism:publicationName>Informatics</prism:publicationName>
	<prism:publicationDate>2026-04-03</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>56</prism:startingPage>
		<prism:doi>10.3390/informatics13040056</prism:doi>
	<prism:url>https://www.mdpi.com/2227-9709/13/4/56</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-9709/13/4/55">

	<title>Informatics, Vol. 13, Pages 55: Toward Network-Managed 5G Fixed Wireless Access: Technologies, Challenges, and Future Directions</title>
	<link>https://www.mdpi.com/2227-9709/13/4/55</link>
	<description>The increasing digitalization of industrial ecosystems under the Industrial Revolution 4.0 has intensified the demand for fast, reliable, and inclusive broadband connectivity. The expansion of 5G technology led by data-driven services addresses the growing demand for high-capacity, low-latency communication through Fixed Wireless Access (FWA) as a cost-effective broadband solution. FWA is a wireless broadband access technology that provides high-speed connectivity to fixed locations using 5G New Radio (NR) infrastructure instead of physical fiber networks, while reducing deployment time and infrastructure investment. This review examines the technical challenges, economic business implications, and comparative performance of 5G FWA relative to other broadband technologies. It also examines the implementation of Enhanced Telecom Operations Map (eTOM) in several telecommunication network functions. The analysis indicates that successful 5G FWA implementation requires not only technical optimization, but also the adaption of standardized, scalable, and AI-driven network management practices. Emphasis is placed on the role of the eTOM as a structured framework for aligning technical, operational, and organizational processes in FWA deployment. This review highlights how eTOM can support readiness assessment, process harmonization, and lifecycle management to ensure consistent and efficient service delivery. This study provides a comprehensive reference for researchers and industry stakeholders in developing sustainable and future-ready 5G FWA networks.</description>
	<pubDate>2026-04-03</pubDate>

	<content:encoded><![CDATA[
	<p><b>Informatics, Vol. 13, Pages 55: Toward Network-Managed 5G Fixed Wireless Access: Technologies, Challenges, and Future Directions</b></p>
	<p>Informatics <a href="https://www.mdpi.com/2227-9709/13/4/55">doi: 10.3390/informatics13040055</a></p>
	<p>Authors:
		Asri Wulandari
		Muhammad Suryanegara
		Dadang Gunawan
		</p>
	<p>The increasing digitalization of industrial ecosystems under the Industrial Revolution 4.0 has intensified the demand for fast, reliable, and inclusive broadband connectivity. The expansion of 5G technology led by data-driven services addresses the growing demand for high-capacity, low-latency communication through Fixed Wireless Access (FWA) as a cost-effective broadband solution. FWA is a wireless broadband access technology that provides high-speed connectivity to fixed locations using 5G New Radio (NR) infrastructure instead of physical fiber networks, while reducing deployment time and infrastructure investment. This review examines the technical challenges, economic business implications, and comparative performance of 5G FWA relative to other broadband technologies. It also examines the implementation of Enhanced Telecom Operations Map (eTOM) in several telecommunication network functions. The analysis indicates that successful 5G FWA implementation requires not only technical optimization, but also the adaption of standardized, scalable, and AI-driven network management practices. Emphasis is placed on the role of the eTOM as a structured framework for aligning technical, operational, and organizational processes in FWA deployment. This review highlights how eTOM can support readiness assessment, process harmonization, and lifecycle management to ensure consistent and efficient service delivery. This study provides a comprehensive reference for researchers and industry stakeholders in developing sustainable and future-ready 5G FWA networks.</p>
	]]></content:encoded>

	<dc:title>Toward Network-Managed 5G Fixed Wireless Access: Technologies, Challenges, and Future Directions</dc:title>
			<dc:creator>Asri Wulandari</dc:creator>
			<dc:creator>Muhammad Suryanegara</dc:creator>
			<dc:creator>Dadang Gunawan</dc:creator>
		<dc:identifier>doi: 10.3390/informatics13040055</dc:identifier>
	<dc:source>Informatics</dc:source>
	<dc:date>2026-04-03</dc:date>

	<prism:publicationName>Informatics</prism:publicationName>
	<prism:publicationDate>2026-04-03</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Review</prism:section>
	<prism:startingPage>55</prism:startingPage>
		<prism:doi>10.3390/informatics13040055</prism:doi>
	<prism:url>https://www.mdpi.com/2227-9709/13/4/55</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-9709/13/4/54">

	<title>Informatics, Vol. 13, Pages 54: Cybersecurity Challenges in Hospitals: International Incident Reports Analysis and Expert Validation</title>
	<link>https://www.mdpi.com/2227-9709/13/4/54</link>
	<description>The healthcare sector is undergoing a digital transformation that improves the quality of care, increases efficiency, and enhances connectivity. With digitalization comes an increase in cyber threats. Hospitals are among the primary targets of cybercriminals. Adequate protective measures require knowledge and analysis of frequently occurring incidents. This study aimed to identify types of cyber risks and to evaluate factors influencing incident occurrence using a mixed-methods approach. Data on cyber incidents and data breaches from 2021 to 2024 were consolidated from five publicly accessible international datasets into a single unified dataset with 3459 entries and analyzed with a focus on hospital incidents. Results showed that hacking, especially involving ransomware, poses a key security risk in hospitals. The results were then discussed in four focus groups with 14 IT experts from hospitals. They highlighted threats and potential conflicts arising from the integration of new technologies, including the escalation of external risks as hacking activities become more organized and professionalized. The need for openly accessible and understandable data on hospital cyber risks, as well as for collaborative exchange among institutions, was emphasized. The study identifies gaps in current knowledge regarding the integration of technology into hospital networks, suggesting directions for future research.</description>
	<pubDate>2026-04-02</pubDate>

	<content:encoded><![CDATA[
	<p><b>Informatics, Vol. 13, Pages 54: Cybersecurity Challenges in Hospitals: International Incident Reports Analysis and Expert Validation</b></p>
	<p>Informatics <a href="https://www.mdpi.com/2227-9709/13/4/54">doi: 10.3390/informatics13040054</a></p>
	<p>Authors:
		Grigori Rogge
		Sabine Bohnet-Joschko
		</p>
	<p>The healthcare sector is undergoing a digital transformation that improves the quality of care, increases efficiency, and enhances connectivity. With digitalization comes an increase in cyber threats. Hospitals are among the primary targets of cybercriminals. Adequate protective measures require knowledge and analysis of frequently occurring incidents. This study aimed to identify types of cyber risks and to evaluate factors influencing incident occurrence using a mixed-methods approach. Data on cyber incidents and data breaches from 2021 to 2024 were consolidated from five publicly accessible international datasets into a single unified dataset with 3459 entries and analyzed with a focus on hospital incidents. Results showed that hacking, especially involving ransomware, poses a key security risk in hospitals. The results were then discussed in four focus groups with 14 IT experts from hospitals. They highlighted threats and potential conflicts arising from the integration of new technologies, including the escalation of external risks as hacking activities become more organized and professionalized. The need for openly accessible and understandable data on hospital cyber risks, as well as for collaborative exchange among institutions, was emphasized. The study identifies gaps in current knowledge regarding the integration of technology into hospital networks, suggesting directions for future research.</p>
	]]></content:encoded>

	<dc:title>Cybersecurity Challenges in Hospitals: International Incident Reports Analysis and Expert Validation</dc:title>
			<dc:creator>Grigori Rogge</dc:creator>
			<dc:creator>Sabine Bohnet-Joschko</dc:creator>
		<dc:identifier>doi: 10.3390/informatics13040054</dc:identifier>
	<dc:source>Informatics</dc:source>
	<dc:date>2026-04-02</dc:date>

	<prism:publicationName>Informatics</prism:publicationName>
	<prism:publicationDate>2026-04-02</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>54</prism:startingPage>
		<prism:doi>10.3390/informatics13040054</prism:doi>
	<prism:url>https://www.mdpi.com/2227-9709/13/4/54</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-9709/13/4/53">

	<title>Informatics, Vol. 13, Pages 53: Quality Assessment of Generative AI in Cybersecurity Certification</title>
	<link>https://www.mdpi.com/2227-9709/13/4/53</link>
	<description>Generative Artificial Intelligence (GenAI), particularly Large Language Models (LLMs), is rapidly changing how higher education approaches teaching, learning, and assessment. In cybersecurity education, professional certification exams are key for measuring competence and helping professionals find better job offers, but there is little research on how GenAI systems perform in these exam settings. This study looks at how three popular LLMs, ChatGPT-5, Gemini-2.5 Pro, and Copilot-2.5 Pro, handle 183 practice questions from the CompTIA Security+ certification. The study used a two-phase evaluation: a domain-based assessment and a full-length practice exam that mirrors real certification tests. The researchers measured model performance with accuracy scores, chi-square tests for statistical differences, and an error taxonomy to spot patterns of mistakes important for education. All three GenAI systems scored above the passing mark, and there were no significant differences between them. Still, the error analysis showed ongoing conceptual and classification mistakes that did not show up in the overall accuracy scores. Our results show that GenAI systems can pass structured certification tests, but accuracy by itself does not fully measure professional skills. The study points out important issues for the reliability and validity of AI-based assessments in higher education and stresses the need for more realistic, concept-focused ways to evaluate GenAI in cybersecurity education.</description>
	<pubDate>2026-03-30</pubDate>

	<content:encoded><![CDATA[
	<p><b>Informatics, Vol. 13, Pages 53: Quality Assessment of Generative AI in Cybersecurity Certification</b></p>
	<p>Informatics <a href="https://www.mdpi.com/2227-9709/13/4/53">doi: 10.3390/informatics13040053</a></p>
	<p>Authors:
		Vanessa G. Félix
		Rodolfo Ostos
		Luis J. Mena
		Homero Toral-Cruz
		Alberto Ochoa-Brust
		Pablo Velarde-Alvarado
		Apolinar González-Potes
		Ramón A. Félix-Cuadras
		José A. León-Borges
		Rafael Martínez-Peláez
		</p>
	<p>Generative Artificial Intelligence (GenAI), particularly Large Language Models (LLMs), is rapidly changing how higher education approaches teaching, learning, and assessment. In cybersecurity education, professional certification exams are key for measuring competence and helping professionals find better job offers, but there is little research on how GenAI systems perform in these exam settings. This study looks at how three popular LLMs, ChatGPT-5, Gemini-2.5 Pro, and Copilot-2.5 Pro, handle 183 practice questions from the CompTIA Security+ certification. The study used a two-phase evaluation: a domain-based assessment and a full-length practice exam that mirrors real certification tests. The researchers measured model performance with accuracy scores, chi-square tests for statistical differences, and an error taxonomy to spot patterns of mistakes important for education. All three GenAI systems scored above the passing mark, and there were no significant differences between them. Still, the error analysis showed ongoing conceptual and classification mistakes that did not show up in the overall accuracy scores. Our results show that GenAI systems can pass structured certification tests, but accuracy by itself does not fully measure professional skills. The study points out important issues for the reliability and validity of AI-based assessments in higher education and stresses the need for more realistic, concept-focused ways to evaluate GenAI in cybersecurity education.</p>
	]]></content:encoded>

	<dc:title>Quality Assessment of Generative AI in Cybersecurity Certification</dc:title>
			<dc:creator>Vanessa G. Félix</dc:creator>
			<dc:creator>Rodolfo Ostos</dc:creator>
			<dc:creator>Luis J. Mena</dc:creator>
			<dc:creator>Homero Toral-Cruz</dc:creator>
			<dc:creator>Alberto Ochoa-Brust</dc:creator>
			<dc:creator>Pablo Velarde-Alvarado</dc:creator>
			<dc:creator>Apolinar González-Potes</dc:creator>
			<dc:creator>Ramón A. Félix-Cuadras</dc:creator>
			<dc:creator>José A. León-Borges</dc:creator>
			<dc:creator>Rafael Martínez-Peláez</dc:creator>
		<dc:identifier>doi: 10.3390/informatics13040053</dc:identifier>
	<dc:source>Informatics</dc:source>
	<dc:date>2026-03-30</dc:date>

	<prism:publicationName>Informatics</prism:publicationName>
	<prism:publicationDate>2026-03-30</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>53</prism:startingPage>
		<prism:doi>10.3390/informatics13040053</prism:doi>
	<prism:url>https://www.mdpi.com/2227-9709/13/4/53</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-9709/13/4/52">

	<title>Informatics, Vol. 13, Pages 52: Tax Professionals&amp;rsquo; Perceptions, Compliance Costs, and Compliance Intentions Under Indonesia&amp;rsquo;s Core Tax Administration System</title>
	<link>https://www.mdpi.com/2227-9709/13/4/52</link>
	<description>This study provides an early evaluation of the effectiveness of the Core Tax Administration System, a digital taxation platform introduced to integrate all tax administration processes in Indonesia into a single system. To conduct this evaluation, the study integrates two of the most established frameworks in the information systems literature, namely the DeLone and McLean Information Systems Success Model and the Technology Acceptance Model. Tax professionals are involved in the evaluation process because they are the primary users of the system and possess advanced knowledge of taxation. Structural equation modeling is employed as the analytical technique. The results indicate that system usage generates individual-level benefits by reducing perceived compliance costs, which in turn translate into organizational-level outcomes in the form of increased tax compliance intentions. However, the non-linear effect analysis reveals that this relationship is not entirely linear but follows an inverted U-shaped pattern. This finding suggests that over time, highly routine system usage may reduce professional vigilance by fostering excessive reliance on automated features and superficial processing. Such dependence can weaken perceived efficiency gains and diminish intrinsic motivation for careful and accurate reporting, highlighting the importance of balancing efficiency with system design features that support professional judgment and vigilance.</description>
	<pubDate>2026-03-27</pubDate>

	<content:encoded><![CDATA[
	<p><b>Informatics, Vol. 13, Pages 52: Tax Professionals&amp;rsquo; Perceptions, Compliance Costs, and Compliance Intentions Under Indonesia&amp;rsquo;s Core Tax Administration System</b></p>
	<p>Informatics <a href="https://www.mdpi.com/2227-9709/13/4/52">doi: 10.3390/informatics13040052</a></p>
	<p>Authors:
		Prianto Budi Saptono
		Gustofan Mahmud
		Ismail Khozen
		Arfah Habib Saragih
		Wulandari Kartika Sari
		Adang Hendrawan
		Milla Sepliana Setyowati
		</p>
	<p>This study provides an early evaluation of the effectiveness of the Core Tax Administration System, a digital taxation platform introduced to integrate all tax administration processes in Indonesia into a single system. To conduct this evaluation, the study integrates two of the most established frameworks in the information systems literature, namely the DeLone and McLean Information Systems Success Model and the Technology Acceptance Model. Tax professionals are involved in the evaluation process because they are the primary users of the system and possess advanced knowledge of taxation. Structural equation modeling is employed as the analytical technique. The results indicate that system usage generates individual-level benefits by reducing perceived compliance costs, which in turn translate into organizational-level outcomes in the form of increased tax compliance intentions. However, the non-linear effect analysis reveals that this relationship is not entirely linear but follows an inverted U-shaped pattern. This finding suggests that over time, highly routine system usage may reduce professional vigilance by fostering excessive reliance on automated features and superficial processing. Such dependence can weaken perceived efficiency gains and diminish intrinsic motivation for careful and accurate reporting, highlighting the importance of balancing efficiency with system design features that support professional judgment and vigilance.</p>
	]]></content:encoded>

	<dc:title>Tax Professionals&amp;amp;rsquo; Perceptions, Compliance Costs, and Compliance Intentions Under Indonesia&amp;amp;rsquo;s Core Tax Administration System</dc:title>
			<dc:creator>Prianto Budi Saptono</dc:creator>
			<dc:creator>Gustofan Mahmud</dc:creator>
			<dc:creator>Ismail Khozen</dc:creator>
			<dc:creator>Arfah Habib Saragih</dc:creator>
			<dc:creator>Wulandari Kartika Sari</dc:creator>
			<dc:creator>Adang Hendrawan</dc:creator>
			<dc:creator>Milla Sepliana Setyowati</dc:creator>
		<dc:identifier>doi: 10.3390/informatics13040052</dc:identifier>
	<dc:source>Informatics</dc:source>
	<dc:date>2026-03-27</dc:date>

	<prism:publicationName>Informatics</prism:publicationName>
	<prism:publicationDate>2026-03-27</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>52</prism:startingPage>
		<prism:doi>10.3390/informatics13040052</prism:doi>
	<prism:url>https://www.mdpi.com/2227-9709/13/4/52</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-9709/13/4/51">

	<title>Informatics, Vol. 13, Pages 51: Ethics in Artificial Intelligence: A Cross-Sectoral Review of 2019&amp;ndash;2025</title>
	<link>https://www.mdpi.com/2227-9709/13/4/51</link>
	<description>Artificial Intelligence (AI) has transitioned from a specialized research area to a ubiquitous socio-technical infrastructure influencing sectors from healthcare and law to manufacturing and defense. In tandem with its transformative promise, AI has created an exponentially expanding ethics literature questioning, fairness, transparency, accountability, and justice. This review synthesizes publications and key policy developments between 2019 and 2025, bringing sectoral discourses together with cross-cutting frameworks. Grounded in a systematic scoping review methodology, we frame the field along four meta-dimensions: trust and transparency, bias and fairness, governance &amp;amp;amp; regulation, and justice, while we investigate their expression across diverse sectors. Special attention is dedicated to healthcare (patient trust and algorithmic bias), education (integrity and authorship), media (misinformation), law (accountability), and the industrial sector (data integrity, intellectual property protection, and environmental safety). We ground abstract principles in concrete case studies to illustrate real-world harms and mitigation strategies. Furthermore, we incorporate pluralistic ethics (e.g., Ubuntu, Islamic perspectives), environmental ethics, and emerging challenges posed by Generative AI and neuro-AI interfaces. To bridge theory and practice, we propose an operational governance framework for organizations. We contend that success involves transitioning from principles toward ethics-by-design, pluralistic governance, sustainability, and adaptive oversight. This review is intended for scholars, practitioners, and policymakers who need a comprehensive and actionable framework for navigating the complex landscape of AI ethics.</description>
	<pubDate>2026-03-27</pubDate>

	<content:encoded><![CDATA[
	<p><b>Informatics, Vol. 13, Pages 51: Ethics in Artificial Intelligence: A Cross-Sectoral Review of 2019&amp;ndash;2025</b></p>
	<p>Informatics <a href="https://www.mdpi.com/2227-9709/13/4/51">doi: 10.3390/informatics13040051</a></p>
	<p>Authors:
		Charalampos M. Liapis
		Nikos Fazakis
		Sotiris Kotsiantis
		Yannis Dimakopoulos
		</p>
	<p>Artificial Intelligence (AI) has transitioned from a specialized research area to a ubiquitous socio-technical infrastructure influencing sectors from healthcare and law to manufacturing and defense. In tandem with its transformative promise, AI has created an exponentially expanding ethics literature questioning, fairness, transparency, accountability, and justice. This review synthesizes publications and key policy developments between 2019 and 2025, bringing sectoral discourses together with cross-cutting frameworks. Grounded in a systematic scoping review methodology, we frame the field along four meta-dimensions: trust and transparency, bias and fairness, governance &amp;amp;amp; regulation, and justice, while we investigate their expression across diverse sectors. Special attention is dedicated to healthcare (patient trust and algorithmic bias), education (integrity and authorship), media (misinformation), law (accountability), and the industrial sector (data integrity, intellectual property protection, and environmental safety). We ground abstract principles in concrete case studies to illustrate real-world harms and mitigation strategies. Furthermore, we incorporate pluralistic ethics (e.g., Ubuntu, Islamic perspectives), environmental ethics, and emerging challenges posed by Generative AI and neuro-AI interfaces. To bridge theory and practice, we propose an operational governance framework for organizations. We contend that success involves transitioning from principles toward ethics-by-design, pluralistic governance, sustainability, and adaptive oversight. This review is intended for scholars, practitioners, and policymakers who need a comprehensive and actionable framework for navigating the complex landscape of AI ethics.</p>
	]]></content:encoded>

	<dc:title>Ethics in Artificial Intelligence: A Cross-Sectoral Review of 2019&amp;amp;ndash;2025</dc:title>
			<dc:creator>Charalampos M. Liapis</dc:creator>
			<dc:creator>Nikos Fazakis</dc:creator>
			<dc:creator>Sotiris Kotsiantis</dc:creator>
			<dc:creator>Yannis Dimakopoulos</dc:creator>
		<dc:identifier>doi: 10.3390/informatics13040051</dc:identifier>
	<dc:source>Informatics</dc:source>
	<dc:date>2026-03-27</dc:date>

	<prism:publicationName>Informatics</prism:publicationName>
	<prism:publicationDate>2026-03-27</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Review</prism:section>
	<prism:startingPage>51</prism:startingPage>
		<prism:doi>10.3390/informatics13040051</prism:doi>
	<prism:url>https://www.mdpi.com/2227-9709/13/4/51</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-9709/13/4/50">

	<title>Informatics, Vol. 13, Pages 50: Data Mining to Identify Factors Associated with University Student Retention</title>
	<link>https://www.mdpi.com/2227-9709/13/4/50</link>
	<description>Student retention has become a major challenge for higher education institutions due to the influence that academic, socioeconomic, family, and motivational factors exert on students&amp;amp;rsquo; academic continuity. In this context, understanding the determinants that explain university persistence is essential for designing effective retention strategies. Based on the analysis of factors related to motivation, commitment, attitude, academic integration, and social and economic conditions, retention patterns were examined in a population of 532 university students, of whom 57.7% showed high retention, 38.2% medium retention, and 4.1% low retention. To identify the factors with the greatest influence on academic continuity, educational data mining techniques and supervised classification models were applied and evaluated using stratified 10-fold cross-validation. Tree-based ensemble models showed the most consistent predictive performance, with Random Forest achieving the best results (accuracy = 0.729 &amp;amp;plusmn; 0.058; F1-macro = 0.636 &amp;amp;plusmn; 0.136). Model interpretability was examined through SHAP analysis, which revealed that transportation conditions (0.249), task completion (0.170), absence of work obligations (0.168), and course completion (0.164) were the most influential predictors in the classification of retention levels. In addition, sensitivity analysis indicated that academic commitment accounts for 41.6% of the predictive impact, followed by motivation (23.5%). These findings demonstrate that student retention is shaped by the interaction of academic, motivational, and contextual factors and provide practical implications for the development of **early warning systems, personalized tutoring programs, psychosocial support initiatives, and financial assistance policies aimed at strengthening university retention.</description>
	<pubDate>2026-03-27</pubDate>

	<content:encoded><![CDATA[
	<p><b>Informatics, Vol. 13, Pages 50: Data Mining to Identify Factors Associated with University Student Retention</b></p>
	<p>Informatics <a href="https://www.mdpi.com/2227-9709/13/4/50">doi: 10.3390/informatics13040050</a></p>
	<p>Authors:
		Yuri Reina Marín
		Lenin Quiñones Huatangari
		Judith Nathaly Alva Tuesta
		Omer Cruz Caro
		Jorge Luis Maicelo Guevara
		Einstein Sánchez Bardales
		River Chávez Santos
		</p>
	<p>Student retention has become a major challenge for higher education institutions due to the influence that academic, socioeconomic, family, and motivational factors exert on students&amp;amp;rsquo; academic continuity. In this context, understanding the determinants that explain university persistence is essential for designing effective retention strategies. Based on the analysis of factors related to motivation, commitment, attitude, academic integration, and social and economic conditions, retention patterns were examined in a population of 532 university students, of whom 57.7% showed high retention, 38.2% medium retention, and 4.1% low retention. To identify the factors with the greatest influence on academic continuity, educational data mining techniques and supervised classification models were applied and evaluated using stratified 10-fold cross-validation. Tree-based ensemble models showed the most consistent predictive performance, with Random Forest achieving the best results (accuracy = 0.729 &amp;amp;plusmn; 0.058; F1-macro = 0.636 &amp;amp;plusmn; 0.136). Model interpretability was examined through SHAP analysis, which revealed that transportation conditions (0.249), task completion (0.170), absence of work obligations (0.168), and course completion (0.164) were the most influential predictors in the classification of retention levels. In addition, sensitivity analysis indicated that academic commitment accounts for 41.6% of the predictive impact, followed by motivation (23.5%). These findings demonstrate that student retention is shaped by the interaction of academic, motivational, and contextual factors and provide practical implications for the development of **early warning systems, personalized tutoring programs, psychosocial support initiatives, and financial assistance policies aimed at strengthening university retention.</p>
	]]></content:encoded>

	<dc:title>Data Mining to Identify Factors Associated with University Student Retention</dc:title>
			<dc:creator>Yuri Reina Marín</dc:creator>
			<dc:creator>Lenin Quiñones Huatangari</dc:creator>
			<dc:creator>Judith Nathaly Alva Tuesta</dc:creator>
			<dc:creator>Omer Cruz Caro</dc:creator>
			<dc:creator>Jorge Luis Maicelo Guevara</dc:creator>
			<dc:creator>Einstein Sánchez Bardales</dc:creator>
			<dc:creator>River Chávez Santos</dc:creator>
		<dc:identifier>doi: 10.3390/informatics13040050</dc:identifier>
	<dc:source>Informatics</dc:source>
	<dc:date>2026-03-27</dc:date>

	<prism:publicationName>Informatics</prism:publicationName>
	<prism:publicationDate>2026-03-27</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>50</prism:startingPage>
		<prism:doi>10.3390/informatics13040050</prism:doi>
	<prism:url>https://www.mdpi.com/2227-9709/13/4/50</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-9709/13/4/49">

	<title>Informatics, Vol. 13, Pages 49: A Discrete-Form Double-Integration-Enhanced Recurrent Neural Network for Stewart Platform Control with Time-Varying Disturbance Suppression</title>
	<link>https://www.mdpi.com/2227-9709/13/4/49</link>
	<description>The discrete-form control of the Stewart platform is essential for digital implementation in intelligent manufacturing and robotic systems under the context of Industry 4.0, yet its performance is often degraded by unavoidable discrete disturbances. This challenge motivates the development of algorithms with strong disturbance suppression capability. To address this issue, a continuous-form double-integration-enhanced recurrent neural network (CF-DIE-RNN) algorithm incorporating a novel double-integration-enhanced design concept is first developed to improve robustness against time-varying disturbances. For digital hardware applications, a discrete-form double-integration-enhanced RNN (DF-DIE-RNN) algorithm is then constructed by discretizing the CF-DIE-RNN algorithm using a general four-step discretization formula and a one-step forward difference formula based on Taylor expansion. Rigorous theoretical analysis establishes the convergence properties of the proposed algorithm and characterizes its steady-state residual bounds under different disturbance types, revealing its capability to suppress discrete quadratic time-varying disturbances. Numerical and simulation experiments demonstrate that the DF-DIE-RNN algorithm achieves superior disturbance suppression and more accurate trajectory tracking than existing discrete-form RNN algorithms, confirming its effectiveness for discrete-form Stewart platform control.</description>
	<pubDate>2026-03-25</pubDate>

	<content:encoded><![CDATA[
	<p><b>Informatics, Vol. 13, Pages 49: A Discrete-Form Double-Integration-Enhanced Recurrent Neural Network for Stewart Platform Control with Time-Varying Disturbance Suppression</b></p>
	<p>Informatics <a href="https://www.mdpi.com/2227-9709/13/4/49">doi: 10.3390/informatics13040049</a></p>
	<p>Authors:
		Yueyang Ma
		Yang Shi
		Chao Jiang
		</p>
	<p>The discrete-form control of the Stewart platform is essential for digital implementation in intelligent manufacturing and robotic systems under the context of Industry 4.0, yet its performance is often degraded by unavoidable discrete disturbances. This challenge motivates the development of algorithms with strong disturbance suppression capability. To address this issue, a continuous-form double-integration-enhanced recurrent neural network (CF-DIE-RNN) algorithm incorporating a novel double-integration-enhanced design concept is first developed to improve robustness against time-varying disturbances. For digital hardware applications, a discrete-form double-integration-enhanced RNN (DF-DIE-RNN) algorithm is then constructed by discretizing the CF-DIE-RNN algorithm using a general four-step discretization formula and a one-step forward difference formula based on Taylor expansion. Rigorous theoretical analysis establishes the convergence properties of the proposed algorithm and characterizes its steady-state residual bounds under different disturbance types, revealing its capability to suppress discrete quadratic time-varying disturbances. Numerical and simulation experiments demonstrate that the DF-DIE-RNN algorithm achieves superior disturbance suppression and more accurate trajectory tracking than existing discrete-form RNN algorithms, confirming its effectiveness for discrete-form Stewart platform control.</p>
	]]></content:encoded>

	<dc:title>A Discrete-Form Double-Integration-Enhanced Recurrent Neural Network for Stewart Platform Control with Time-Varying Disturbance Suppression</dc:title>
			<dc:creator>Yueyang Ma</dc:creator>
			<dc:creator>Yang Shi</dc:creator>
			<dc:creator>Chao Jiang</dc:creator>
		<dc:identifier>doi: 10.3390/informatics13040049</dc:identifier>
	<dc:source>Informatics</dc:source>
	<dc:date>2026-03-25</dc:date>

	<prism:publicationName>Informatics</prism:publicationName>
	<prism:publicationDate>2026-03-25</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>49</prism:startingPage>
		<prism:doi>10.3390/informatics13040049</prism:doi>
	<prism:url>https://www.mdpi.com/2227-9709/13/4/49</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-9709/13/4/48">

	<title>Informatics, Vol. 13, Pages 48: Generative AI-Assisted Automation of Clinical Data Processing: A Methodological Framework for Streamlining Behavioral Research Workflows</title>
	<link>https://www.mdpi.com/2227-9709/13/4/48</link>
	<description>This article presents a methodological framework for automating clinical data processing workflows using Generative Artificial Intelligence (AI) as an interactive co-developer. We demonstrate how Large Language Models (LLMs), specifically ChatGPT and Claude, can assist researchers in designing, implementing, and deploying complete ETL (Extract, Transform, Load) pipelines without requiring advanced programming or DevOps expertise. Using a dataset of 102 participants from a nonverbal expression study as a proof-of-concept, we show how AI-assisted automation transforms FaceReader video analysis outputs during the Cyberball paradigm into structured, analysis-ready datasets through containerized workflows orchestrated via Docker and n8n. The resulting framework successfully processes all 102 datasets, generating machine learning outputs to validate pipeline execution stability (rather than clinical predictivity), and deploys interactive visualization dashboards, tasks that would normally require significant manual effort and technical specialization expertise. This work establishes a replicable methodology for integrating Generative AI into research data management workflows, with implications for accelerating scientific discovery across behavioral and medical research domains.</description>
	<pubDate>2026-03-25</pubDate>

	<content:encoded><![CDATA[
	<p><b>Informatics, Vol. 13, Pages 48: Generative AI-Assisted Automation of Clinical Data Processing: A Methodological Framework for Streamlining Behavioral Research Workflows</b></p>
	<p>Informatics <a href="https://www.mdpi.com/2227-9709/13/4/48">doi: 10.3390/informatics13040048</a></p>
	<p>Authors:
		Marta Lilia Eraña-Díaz
		Alejandra Rosales-Lagarde
		Iván Arango-de-Montis
		José Alejandro Velázquez-Monzón
		</p>
	<p>This article presents a methodological framework for automating clinical data processing workflows using Generative Artificial Intelligence (AI) as an interactive co-developer. We demonstrate how Large Language Models (LLMs), specifically ChatGPT and Claude, can assist researchers in designing, implementing, and deploying complete ETL (Extract, Transform, Load) pipelines without requiring advanced programming or DevOps expertise. Using a dataset of 102 participants from a nonverbal expression study as a proof-of-concept, we show how AI-assisted automation transforms FaceReader video analysis outputs during the Cyberball paradigm into structured, analysis-ready datasets through containerized workflows orchestrated via Docker and n8n. The resulting framework successfully processes all 102 datasets, generating machine learning outputs to validate pipeline execution stability (rather than clinical predictivity), and deploys interactive visualization dashboards, tasks that would normally require significant manual effort and technical specialization expertise. This work establishes a replicable methodology for integrating Generative AI into research data management workflows, with implications for accelerating scientific discovery across behavioral and medical research domains.</p>
	]]></content:encoded>

	<dc:title>Generative AI-Assisted Automation of Clinical Data Processing: A Methodological Framework for Streamlining Behavioral Research Workflows</dc:title>
			<dc:creator>Marta Lilia Eraña-Díaz</dc:creator>
			<dc:creator>Alejandra Rosales-Lagarde</dc:creator>
			<dc:creator>Iván Arango-de-Montis</dc:creator>
			<dc:creator>José Alejandro Velázquez-Monzón</dc:creator>
		<dc:identifier>doi: 10.3390/informatics13040048</dc:identifier>
	<dc:source>Informatics</dc:source>
	<dc:date>2026-03-25</dc:date>

	<prism:publicationName>Informatics</prism:publicationName>
	<prism:publicationDate>2026-03-25</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>48</prism:startingPage>
		<prism:doi>10.3390/informatics13040048</prism:doi>
	<prism:url>https://www.mdpi.com/2227-9709/13/4/48</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-9709/13/4/47">

	<title>Informatics, Vol. 13, Pages 47: Concurrent Prediction of Length of Stay, Mortality, and Total Charges in Patients with Acute Lymphoblastic Leukemia Using Continuous Machine Learning</title>
	<link>https://www.mdpi.com/2227-9709/13/4/47</link>
	<description>Acute lymphoblastic leukemia (ALL) presents significant clinical challenges due to its genetic complexity and high relapse rates. While outcomes like length of stay (LOS), mortality, and total charges (TCs) are critical quality indicators, most existing models rely on static data and separate outcome modeling. This study utilized the HCUP National Inpatient Sample (NIS) to develop a dynamic, concurrent prediction model for prolonged LOS and mortality (PLOSM), alongside a framework for TCs. By integrating temporally updated patient information, the concurrent approach outperformed single-outcome models. Within the first seven days of hospitalization, the model achieved accuracy and precision above 90%, with recall and F1-scores exceeding 80%. Key predictors of these outcomes included age, race, insurance type, financial indicators, and elective surgery status. Notably, both prolonged LOS and mortality were significant drivers of TCs. By bridging predictive modeling and real-time clinical data, this framework enables data-driven decision-making to optimize patient management, enhance safety, and mitigate the financial burden of ALL care.</description>
	<pubDate>2026-03-24</pubDate>

	<content:encoded><![CDATA[
	<p><b>Informatics, Vol. 13, Pages 47: Concurrent Prediction of Length of Stay, Mortality, and Total Charges in Patients with Acute Lymphoblastic Leukemia Using Continuous Machine Learning</b></p>
	<p>Informatics <a href="https://www.mdpi.com/2227-9709/13/4/47">doi: 10.3390/informatics13040047</a></p>
	<p>Authors:
		Jiahui Ma
		Elizabeth Johnson
		Bradley M. Whitaker
		Faraz Dadgostari
		Hansjorg Schwertz
		Bernadette McCrory
		</p>
	<p>Acute lymphoblastic leukemia (ALL) presents significant clinical challenges due to its genetic complexity and high relapse rates. While outcomes like length of stay (LOS), mortality, and total charges (TCs) are critical quality indicators, most existing models rely on static data and separate outcome modeling. This study utilized the HCUP National Inpatient Sample (NIS) to develop a dynamic, concurrent prediction model for prolonged LOS and mortality (PLOSM), alongside a framework for TCs. By integrating temporally updated patient information, the concurrent approach outperformed single-outcome models. Within the first seven days of hospitalization, the model achieved accuracy and precision above 90%, with recall and F1-scores exceeding 80%. Key predictors of these outcomes included age, race, insurance type, financial indicators, and elective surgery status. Notably, both prolonged LOS and mortality were significant drivers of TCs. By bridging predictive modeling and real-time clinical data, this framework enables data-driven decision-making to optimize patient management, enhance safety, and mitigate the financial burden of ALL care.</p>
	]]></content:encoded>

	<dc:title>Concurrent Prediction of Length of Stay, Mortality, and Total Charges in Patients with Acute Lymphoblastic Leukemia Using Continuous Machine Learning</dc:title>
			<dc:creator>Jiahui Ma</dc:creator>
			<dc:creator>Elizabeth Johnson</dc:creator>
			<dc:creator>Bradley M. Whitaker</dc:creator>
			<dc:creator>Faraz Dadgostari</dc:creator>
			<dc:creator>Hansjorg Schwertz</dc:creator>
			<dc:creator>Bernadette McCrory</dc:creator>
		<dc:identifier>doi: 10.3390/informatics13040047</dc:identifier>
	<dc:source>Informatics</dc:source>
	<dc:date>2026-03-24</dc:date>

	<prism:publicationName>Informatics</prism:publicationName>
	<prism:publicationDate>2026-03-24</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>47</prism:startingPage>
		<prism:doi>10.3390/informatics13040047</prism:doi>
	<prism:url>https://www.mdpi.com/2227-9709/13/4/47</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-9709/13/4/46">

	<title>Informatics, Vol. 13, Pages 46: Reimagining Traditional Workspaces Through Digitalisation and Hybrid Perspective: A Systematic Review</title>
	<link>https://www.mdpi.com/2227-9709/13/4/46</link>
	<description>Workspace digitalisation presents a transformative shift from traditional, physically bounded offices to virtual, technology-enabled environments. Digital technologies like cloud computing, artificial intelligence, and the Internet of Things enable remote collaboration, data accessibility, and operational efficiency, thereby accelerating this transformation. Digital workspaces transcend geographical limitations, enabling a more flexible, inclusive, and adaptive work culture. They offer better work&amp;amp;ndash;life balance, with flexible options, reduced commuting time, and increased personal autonomy and control over commitments, compared to traditional workspaces. Despite these benefits, digitalisation creates cybersecurity, data privacy, and digital divide issues, where unequal access to digital tools and skills can exacerbate social and economic inequalities. The lack of physical interaction affects team cohesion and company culture. Hence, this paper explores these phenomena to uncover their implications and consider possible strategies to optimise workspace digitalisation, providing a comprehensive systematic review of extant literature within the study context, offering pragmatic insights and recommendations for workspaces. This study has found workspace digitalisation to be a complex, multifaceted phenomenon that provides flexibility, efficiency, and innovation, but also poses challenges that must be carefully managed. It postulates that as technology and work progress, a hybrid model that blends digital and traditional workspaces would be suited to each organisation&amp;amp;rsquo;s needs and goals.</description>
	<pubDate>2026-03-24</pubDate>

	<content:encoded><![CDATA[
	<p><b>Informatics, Vol. 13, Pages 46: Reimagining Traditional Workspaces Through Digitalisation and Hybrid Perspective: A Systematic Review</b></p>
	<p>Informatics <a href="https://www.mdpi.com/2227-9709/13/4/46">doi: 10.3390/informatics13040046</a></p>
	<p>Authors:
		Ayogeboh Epizitone
		Smangele Pretty Moyane
		</p>
	<p>Workspace digitalisation presents a transformative shift from traditional, physically bounded offices to virtual, technology-enabled environments. Digital technologies like cloud computing, artificial intelligence, and the Internet of Things enable remote collaboration, data accessibility, and operational efficiency, thereby accelerating this transformation. Digital workspaces transcend geographical limitations, enabling a more flexible, inclusive, and adaptive work culture. They offer better work&amp;amp;ndash;life balance, with flexible options, reduced commuting time, and increased personal autonomy and control over commitments, compared to traditional workspaces. Despite these benefits, digitalisation creates cybersecurity, data privacy, and digital divide issues, where unequal access to digital tools and skills can exacerbate social and economic inequalities. The lack of physical interaction affects team cohesion and company culture. Hence, this paper explores these phenomena to uncover their implications and consider possible strategies to optimise workspace digitalisation, providing a comprehensive systematic review of extant literature within the study context, offering pragmatic insights and recommendations for workspaces. This study has found workspace digitalisation to be a complex, multifaceted phenomenon that provides flexibility, efficiency, and innovation, but also poses challenges that must be carefully managed. It postulates that as technology and work progress, a hybrid model that blends digital and traditional workspaces would be suited to each organisation&amp;amp;rsquo;s needs and goals.</p>
	]]></content:encoded>

	<dc:title>Reimagining Traditional Workspaces Through Digitalisation and Hybrid Perspective: A Systematic Review</dc:title>
			<dc:creator>Ayogeboh Epizitone</dc:creator>
			<dc:creator>Smangele Pretty Moyane</dc:creator>
		<dc:identifier>doi: 10.3390/informatics13040046</dc:identifier>
	<dc:source>Informatics</dc:source>
	<dc:date>2026-03-24</dc:date>

	<prism:publicationName>Informatics</prism:publicationName>
	<prism:publicationDate>2026-03-24</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Systematic Review</prism:section>
	<prism:startingPage>46</prism:startingPage>
		<prism:doi>10.3390/informatics13040046</prism:doi>
	<prism:url>https://www.mdpi.com/2227-9709/13/4/46</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-9709/13/3/45">

	<title>Informatics, Vol. 13, Pages 45: Data Foundations for Medical AI: Provenance, Reliability and Limitations of Russian Clinical NLP Resources</title>
	<link>https://www.mdpi.com/2227-9709/13/3/45</link>
	<description>Russian-language resources for medical natural language processing (NLP) are expanding rapidly; however, their fragmentation, uneven curation, and limited clinical reliability hinder the development of safe machine learning systems for prognosis, prevention, and precision medicine. We provide the first systematic survey of Russian medical NLP datasets and analyze their suitability for clinically meaningful tasks as defined by the MedHELM taxonomy. We additionally perform expert clinical validation of three representative public corpora&amp;amp;mdash;RuMedPrimeData (real outpatient notes), MedSyn (synthetic clinical notes), and RuMedNLI (translated natural language inference)&amp;amp;mdash;assessing clinical plausibility, diagnosis accuracy, and logical consistency. Experts identified substantial reliability issues: across randomly sampled subsets of each corpus, only approximately 20% of RuMedPrimeData records, fewer than 15% of MedSyn records, and approximately 55% of RuMedNLI pairs met essential quality criteria, which can hinder downstream ML systems built on these data. To support robust applications&amp;amp;mdash;ranging from medical chatbots and triage assistants to predictive and preventive models&amp;amp;mdash;we outline practical requirements for high-quality datasets: coordinated, expert-validated, machine-readable corpora aligned with clinical guidelines and insurance logic, standardized de-identification, and transparent provenance. Strengthening these data foundations will enable the development of reliable, reproducible, and clinically relevant AI systems suitable for real-world healthcare applications.</description>
	<pubDate>2026-03-20</pubDate>

	<content:encoded><![CDATA[
	<p><b>Informatics, Vol. 13, Pages 45: Data Foundations for Medical AI: Provenance, Reliability and Limitations of Russian Clinical NLP Resources</b></p>
	<p>Informatics <a href="https://www.mdpi.com/2227-9709/13/3/45">doi: 10.3390/informatics13030045</a></p>
	<p>Authors:
		Arsenii Litvinov
		Lev Malishevskii
		Evgeny Karpulevich
		Iaroslav Bespalov
		Yaroslav Nedumov
		Sergey Zhdanov
		Ivan Oseledets
		Evgeniy Shlyakhto
		Arutyun Avetisyan
		</p>
	<p>Russian-language resources for medical natural language processing (NLP) are expanding rapidly; however, their fragmentation, uneven curation, and limited clinical reliability hinder the development of safe machine learning systems for prognosis, prevention, and precision medicine. We provide the first systematic survey of Russian medical NLP datasets and analyze their suitability for clinically meaningful tasks as defined by the MedHELM taxonomy. We additionally perform expert clinical validation of three representative public corpora&amp;amp;mdash;RuMedPrimeData (real outpatient notes), MedSyn (synthetic clinical notes), and RuMedNLI (translated natural language inference)&amp;amp;mdash;assessing clinical plausibility, diagnosis accuracy, and logical consistency. Experts identified substantial reliability issues: across randomly sampled subsets of each corpus, only approximately 20% of RuMedPrimeData records, fewer than 15% of MedSyn records, and approximately 55% of RuMedNLI pairs met essential quality criteria, which can hinder downstream ML systems built on these data. To support robust applications&amp;amp;mdash;ranging from medical chatbots and triage assistants to predictive and preventive models&amp;amp;mdash;we outline practical requirements for high-quality datasets: coordinated, expert-validated, machine-readable corpora aligned with clinical guidelines and insurance logic, standardized de-identification, and transparent provenance. Strengthening these data foundations will enable the development of reliable, reproducible, and clinically relevant AI systems suitable for real-world healthcare applications.</p>
	]]></content:encoded>

	<dc:title>Data Foundations for Medical AI: Provenance, Reliability and Limitations of Russian Clinical NLP Resources</dc:title>
			<dc:creator>Arsenii Litvinov</dc:creator>
			<dc:creator>Lev Malishevskii</dc:creator>
			<dc:creator>Evgeny Karpulevich</dc:creator>
			<dc:creator>Iaroslav Bespalov</dc:creator>
			<dc:creator>Yaroslav Nedumov</dc:creator>
			<dc:creator>Sergey Zhdanov</dc:creator>
			<dc:creator>Ivan Oseledets</dc:creator>
			<dc:creator>Evgeniy Shlyakhto</dc:creator>
			<dc:creator>Arutyun Avetisyan</dc:creator>
		<dc:identifier>doi: 10.3390/informatics13030045</dc:identifier>
	<dc:source>Informatics</dc:source>
	<dc:date>2026-03-20</dc:date>

	<prism:publicationName>Informatics</prism:publicationName>
	<prism:publicationDate>2026-03-20</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Review</prism:section>
	<prism:startingPage>45</prism:startingPage>
		<prism:doi>10.3390/informatics13030045</prism:doi>
	<prism:url>https://www.mdpi.com/2227-9709/13/3/45</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-9709/13/3/44">

	<title>Informatics, Vol. 13, Pages 44: Machine Learning and Generative AI in Administrative Processes in Peru: Administrative Efficiency in the National Public Sector</title>
	<link>https://www.mdpi.com/2227-9709/13/3/44</link>
	<description>Public organizations in Peru have committed substantial resources to artificial intelligence over recent years, yet evidence on whether these investments produce measurable returns has remained scarce. This study evaluated the causal impact of AI adoption on administrative efficiency across 20 Peruvian national public organizations, using a quasi-experimental design combining Difference-in-Differences with Propensity Score Matching, complemented by XGBoost version 1.7.6, Random Forest, GPT-4, and SHAP explainability analysis. The sample comprised 428 civil servants across treatment and control organizations. Results showed significant efficiency gains as perceived by civil servants through validated Likert instruments: work absenteeism decreased by 9.4%, processing times by 8.7%, and administrative costs by 18.2%, all at p &amp;amp;lt; 0.001 with Cohen&amp;amp;rsquo;s d ranging from 0.55 to 0.90. The convergence between DiD and PSM estimates supports a causal reading of these effects. Four of five hypotheses were supported. AI delivered comparable efficiency gains regardless of institutional complexity, so H2 was not confirmed. Digital infrastructure significantly moderated AI effectiveness (H3: r = 0.198, p = 0.004). Higher resistance to change was significantly associated with lower efficiency outcomes (H5: r = &amp;amp;minus;0.256, p &amp;amp;lt; 0.001), reinforcing the role of proactive change management as a positive moderator of AI effectiveness. SHAP analysis revealed that training investment, specialized IT personnel, and resistance management together explained 51% of predictive importance, outweighing structural variables such as budget size or geographic location. These findings provide the first systematic causal evidence on AI efficiency in Peruvian public administration and offer actionable benchmarks for comparable middle-income public sectors.</description>
	<pubDate>2026-03-19</pubDate>

	<content:encoded><![CDATA[
	<p><b>Informatics, Vol. 13, Pages 44: Machine Learning and Generative AI in Administrative Processes in Peru: Administrative Efficiency in the National Public Sector</b></p>
	<p>Informatics <a href="https://www.mdpi.com/2227-9709/13/3/44">doi: 10.3390/informatics13030044</a></p>
	<p>Authors:
		Miluska Odely Rodriguez Saavedra
		Juliana Mery Bautista Lopez
		Wilian Quispe Nina
		Antonio Víctor Morales Gonzales
		Iván Cuentas Galindo
		Luis Miguel Campos Ascuña
		Anthony Stefano Saenz Colana
		Robinson Bernardino Almanza Cabe
		Paola Gabriela Lujan Tito
		Sharon Veronika Liendo Teran
		</p>
	<p>Public organizations in Peru have committed substantial resources to artificial intelligence over recent years, yet evidence on whether these investments produce measurable returns has remained scarce. This study evaluated the causal impact of AI adoption on administrative efficiency across 20 Peruvian national public organizations, using a quasi-experimental design combining Difference-in-Differences with Propensity Score Matching, complemented by XGBoost version 1.7.6, Random Forest, GPT-4, and SHAP explainability analysis. The sample comprised 428 civil servants across treatment and control organizations. Results showed significant efficiency gains as perceived by civil servants through validated Likert instruments: work absenteeism decreased by 9.4%, processing times by 8.7%, and administrative costs by 18.2%, all at p &amp;amp;lt; 0.001 with Cohen&amp;amp;rsquo;s d ranging from 0.55 to 0.90. The convergence between DiD and PSM estimates supports a causal reading of these effects. Four of five hypotheses were supported. AI delivered comparable efficiency gains regardless of institutional complexity, so H2 was not confirmed. Digital infrastructure significantly moderated AI effectiveness (H3: r = 0.198, p = 0.004). Higher resistance to change was significantly associated with lower efficiency outcomes (H5: r = &amp;amp;minus;0.256, p &amp;amp;lt; 0.001), reinforcing the role of proactive change management as a positive moderator of AI effectiveness. SHAP analysis revealed that training investment, specialized IT personnel, and resistance management together explained 51% of predictive importance, outweighing structural variables such as budget size or geographic location. These findings provide the first systematic causal evidence on AI efficiency in Peruvian public administration and offer actionable benchmarks for comparable middle-income public sectors.</p>
	]]></content:encoded>

	<dc:title>Machine Learning and Generative AI in Administrative Processes in Peru: Administrative Efficiency in the National Public Sector</dc:title>
			<dc:creator>Miluska Odely Rodriguez Saavedra</dc:creator>
			<dc:creator>Juliana Mery Bautista Lopez</dc:creator>
			<dc:creator>Wilian Quispe Nina</dc:creator>
			<dc:creator>Antonio Víctor Morales Gonzales</dc:creator>
			<dc:creator>Iván Cuentas Galindo</dc:creator>
			<dc:creator>Luis Miguel Campos Ascuña</dc:creator>
			<dc:creator>Anthony Stefano Saenz Colana</dc:creator>
			<dc:creator>Robinson Bernardino Almanza Cabe</dc:creator>
			<dc:creator>Paola Gabriela Lujan Tito</dc:creator>
			<dc:creator>Sharon Veronika Liendo Teran</dc:creator>
		<dc:identifier>doi: 10.3390/informatics13030044</dc:identifier>
	<dc:source>Informatics</dc:source>
	<dc:date>2026-03-19</dc:date>

	<prism:publicationName>Informatics</prism:publicationName>
	<prism:publicationDate>2026-03-19</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>44</prism:startingPage>
		<prism:doi>10.3390/informatics13030044</prism:doi>
	<prism:url>https://www.mdpi.com/2227-9709/13/3/44</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-9709/13/3/43">

	<title>Informatics, Vol. 13, Pages 43: Artificial Intelligence in Literature Review Synthesis: A Step-by-Step Methodological Approach for Researchers and Academics</title>
	<link>https://www.mdpi.com/2227-9709/13/3/43</link>
	<description>The integration of artificial intelligence (AI) in literature reviews aims to transform research by potentially automating processes, enhancing rigour, and improving quality. The study proposes a structured step-by-step approach to integrate AI tools into the literature review synthesis process. The developed methodological approach has five steps. The first step, planning and readiness, involves scoping, understanding practices, and defining boundaries of AI use. Next is selecting AI tools and aligning their capabilities with the literature needs through a matrix. The third step focuses on using AI to conduct the review, followed by validation and cross-referencing of AI-generated results. The final step is disclosing AI use in line with ethical and reporting standards. The approach is demonstrated through five scenarios: emerging or fragmented literature, large or saturated fields, interdisciplinary domains, methodologically diverse studies, and under-researched topics. This approach is designed to enhance transparency, potentially reduce bias, and support reproducibility by aligning AI functions with research goals. It also addresses ethical considerations and promotes human&amp;amp;ndash;AI collaboration. For researchers and academics, it aims to provide a practical roadmap for the responsible adoption of AI in literature reviews, supporting efficiency, ethical tool use, transparency, and the balance between machine assistance and academic judgment.</description>
	<pubDate>2026-03-13</pubDate>

	<content:encoded><![CDATA[
	<p><b>Informatics, Vol. 13, Pages 43: Artificial Intelligence in Literature Review Synthesis: A Step-by-Step Methodological Approach for Researchers and Academics</b></p>
	<p>Informatics <a href="https://www.mdpi.com/2227-9709/13/3/43">doi: 10.3390/informatics13030043</a></p>
	<p>Authors:
		Matolwandile M. Mtotywa
		Jeri-Lee J. Mowers
		Wavhudi Ndou
		Thabang V. Q. Moleko
		Matsobane J. Ledwaba
		</p>
	<p>The integration of artificial intelligence (AI) in literature reviews aims to transform research by potentially automating processes, enhancing rigour, and improving quality. The study proposes a structured step-by-step approach to integrate AI tools into the literature review synthesis process. The developed methodological approach has five steps. The first step, planning and readiness, involves scoping, understanding practices, and defining boundaries of AI use. Next is selecting AI tools and aligning their capabilities with the literature needs through a matrix. The third step focuses on using AI to conduct the review, followed by validation and cross-referencing of AI-generated results. The final step is disclosing AI use in line with ethical and reporting standards. The approach is demonstrated through five scenarios: emerging or fragmented literature, large or saturated fields, interdisciplinary domains, methodologically diverse studies, and under-researched topics. This approach is designed to enhance transparency, potentially reduce bias, and support reproducibility by aligning AI functions with research goals. It also addresses ethical considerations and promotes human&amp;amp;ndash;AI collaboration. For researchers and academics, it aims to provide a practical roadmap for the responsible adoption of AI in literature reviews, supporting efficiency, ethical tool use, transparency, and the balance between machine assistance and academic judgment.</p>
	]]></content:encoded>

	<dc:title>Artificial Intelligence in Literature Review Synthesis: A Step-by-Step Methodological Approach for Researchers and Academics</dc:title>
			<dc:creator>Matolwandile M. Mtotywa</dc:creator>
			<dc:creator>Jeri-Lee J. Mowers</dc:creator>
			<dc:creator>Wavhudi Ndou</dc:creator>
			<dc:creator>Thabang V. Q. Moleko</dc:creator>
			<dc:creator>Matsobane J. Ledwaba</dc:creator>
		<dc:identifier>doi: 10.3390/informatics13030043</dc:identifier>
	<dc:source>Informatics</dc:source>
	<dc:date>2026-03-13</dc:date>

	<prism:publicationName>Informatics</prism:publicationName>
	<prism:publicationDate>2026-03-13</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>43</prism:startingPage>
		<prism:doi>10.3390/informatics13030043</prism:doi>
	<prism:url>https://www.mdpi.com/2227-9709/13/3/43</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-9709/13/3/42">

	<title>Informatics, Vol. 13, Pages 42: Voice, Text, or Embodied AI Avatar? Effects of Generative AI Interface Modalities in VR Museums</title>
	<link>https://www.mdpi.com/2227-9709/13/3/42</link>
	<description>Virtual museums delivered through immersive virtual reality (VR) function as information environments where users access interpretive content while navigating spatially. With the integration of generative artificial intelligence (AI), conversational assistants can dynamically mediate information interaction; however, evidence remains limited regarding how different AI interface representations affect user experience. This study compares three generative AI interface modalities in a VR virtual museum: voice only, voice with synchronized text, and voice with an embodied AI avatar. A controlled experiment with 75 participants examined their effects on user engagement, perceived information quality, and subjective cognitive workload while holding informational content constant. The results indicate that the voice-and-text modality produced the highest perceived information quality, whereas the embodied AI avatar modality yielded the highest user engagement. No significant differences were observed in cognitive workload across modalities. These findings suggest that AI interface modalities play complementary roles in VR-based information interaction and provide design guidance for selecting appropriate AI representations in immersive information systems.</description>
	<pubDate>2026-03-11</pubDate>

	<content:encoded><![CDATA[
	<p><b>Informatics, Vol. 13, Pages 42: Voice, Text, or Embodied AI Avatar? Effects of Generative AI Interface Modalities in VR Museums</b></p>
	<p>Informatics <a href="https://www.mdpi.com/2227-9709/13/3/42">doi: 10.3390/informatics13030042</a></p>
	<p>Authors:
		Pakinee Ariya
		Perasuk Worragin
		Songpon Khanchai
		Darin Poollapalin
		Phichete Julrode
		</p>
	<p>Virtual museums delivered through immersive virtual reality (VR) function as information environments where users access interpretive content while navigating spatially. With the integration of generative artificial intelligence (AI), conversational assistants can dynamically mediate information interaction; however, evidence remains limited regarding how different AI interface representations affect user experience. This study compares three generative AI interface modalities in a VR virtual museum: voice only, voice with synchronized text, and voice with an embodied AI avatar. A controlled experiment with 75 participants examined their effects on user engagement, perceived information quality, and subjective cognitive workload while holding informational content constant. The results indicate that the voice-and-text modality produced the highest perceived information quality, whereas the embodied AI avatar modality yielded the highest user engagement. No significant differences were observed in cognitive workload across modalities. These findings suggest that AI interface modalities play complementary roles in VR-based information interaction and provide design guidance for selecting appropriate AI representations in immersive information systems.</p>
	]]></content:encoded>

	<dc:title>Voice, Text, or Embodied AI Avatar? Effects of Generative AI Interface Modalities in VR Museums</dc:title>
			<dc:creator>Pakinee Ariya</dc:creator>
			<dc:creator>Perasuk Worragin</dc:creator>
			<dc:creator>Songpon Khanchai</dc:creator>
			<dc:creator>Darin Poollapalin</dc:creator>
			<dc:creator>Phichete Julrode</dc:creator>
		<dc:identifier>doi: 10.3390/informatics13030042</dc:identifier>
	<dc:source>Informatics</dc:source>
	<dc:date>2026-03-11</dc:date>

	<prism:publicationName>Informatics</prism:publicationName>
	<prism:publicationDate>2026-03-11</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>42</prism:startingPage>
		<prism:doi>10.3390/informatics13030042</prism:doi>
	<prism:url>https://www.mdpi.com/2227-9709/13/3/42</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-9709/13/3/41">

	<title>Informatics, Vol. 13, Pages 41: Dr. Google vs. Dr. ChatGPT in Online Health Self-Consultation: A Scoping Review of Accuracy, Bias, and Actionability (2023&amp;ndash;2025)</title>
	<link>https://www.mdpi.com/2227-9709/13/3/41</link>
	<description>The rapid adoption of generative artificial intelligence (AI) systems has transformed health information seeking, raising questions about their role as intermediaries in non-professional health self-consultation. This study compares Google Search and ChatGPT as paradigmatic models of algorithmic mediation of health information, focusing on accuracy, biases, information quality and potential harms. A scoping review was conducted following the PRISMA-ScR framework. Empirical studies published between 2023 and 2025 were retrieved from PubMed/MEDLINE, Web of Science (WoS) and Scopus. After screening and eligibility assessment, 63 original empirical studies were included. The results indicate that ChatGPT consistently outperforms Google Search in terms of factual accuracy and information quality, achieving moderate to high DISCERN scores (4&amp;amp;ndash;5 out of 5) and showing moderate to strong correlations with expert clinical evaluations. Users also tend to value ChatGPT responses positively due to their clarity, coherence and perceived empathy. However, these advantages coexist with significant structural limitations. Hallucinations are reported in an estimated 31&amp;amp;ndash;45% of references, source provenance remains opaque, linguistic complexity is high, and actionability is limited, with only around 40% of responses providing clearly actionable guidance. In contrast, Google Search offers greater source traceability and verifiability, but at the cost of fragmented information and higher exposure to commercial content. The review identifies critical research gaps related to behavioural impacts, critical health literacy, equity of access, professional integration and vulnerable contexts. Overall, the findings highlight the need for hybrid human&amp;amp;ndash;AI models, professional mediation and critical AI literacy to ensure safe, equitable and trustworthy use of generative AI in public health communication.</description>
	<pubDate>2026-03-05</pubDate>

	<content:encoded><![CDATA[
	<p><b>Informatics, Vol. 13, Pages 41: Dr. Google vs. Dr. ChatGPT in Online Health Self-Consultation: A Scoping Review of Accuracy, Bias, and Actionability (2023&amp;ndash;2025)</b></p>
	<p>Informatics <a href="https://www.mdpi.com/2227-9709/13/3/41">doi: 10.3390/informatics13030041</a></p>
	<p>Authors:
		Magdalena Trillo-Domínguez
		Juan Ignacio Martin-Neira
		María Dolores Olvera-Lobo
		</p>
	<p>The rapid adoption of generative artificial intelligence (AI) systems has transformed health information seeking, raising questions about their role as intermediaries in non-professional health self-consultation. This study compares Google Search and ChatGPT as paradigmatic models of algorithmic mediation of health information, focusing on accuracy, biases, information quality and potential harms. A scoping review was conducted following the PRISMA-ScR framework. Empirical studies published between 2023 and 2025 were retrieved from PubMed/MEDLINE, Web of Science (WoS) and Scopus. After screening and eligibility assessment, 63 original empirical studies were included. The results indicate that ChatGPT consistently outperforms Google Search in terms of factual accuracy and information quality, achieving moderate to high DISCERN scores (4&amp;amp;ndash;5 out of 5) and showing moderate to strong correlations with expert clinical evaluations. Users also tend to value ChatGPT responses positively due to their clarity, coherence and perceived empathy. However, these advantages coexist with significant structural limitations. Hallucinations are reported in an estimated 31&amp;amp;ndash;45% of references, source provenance remains opaque, linguistic complexity is high, and actionability is limited, with only around 40% of responses providing clearly actionable guidance. In contrast, Google Search offers greater source traceability and verifiability, but at the cost of fragmented information and higher exposure to commercial content. The review identifies critical research gaps related to behavioural impacts, critical health literacy, equity of access, professional integration and vulnerable contexts. Overall, the findings highlight the need for hybrid human&amp;amp;ndash;AI models, professional mediation and critical AI literacy to ensure safe, equitable and trustworthy use of generative AI in public health communication.</p>
	]]></content:encoded>

	<dc:title>Dr. Google vs. Dr. ChatGPT in Online Health Self-Consultation: A Scoping Review of Accuracy, Bias, and Actionability (2023&amp;amp;ndash;2025)</dc:title>
			<dc:creator>Magdalena Trillo-Domínguez</dc:creator>
			<dc:creator>Juan Ignacio Martin-Neira</dc:creator>
			<dc:creator>María Dolores Olvera-Lobo</dc:creator>
		<dc:identifier>doi: 10.3390/informatics13030041</dc:identifier>
	<dc:source>Informatics</dc:source>
	<dc:date>2026-03-05</dc:date>

	<prism:publicationName>Informatics</prism:publicationName>
	<prism:publicationDate>2026-03-05</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Review</prism:section>
	<prism:startingPage>41</prism:startingPage>
		<prism:doi>10.3390/informatics13030041</prism:doi>
	<prism:url>https://www.mdpi.com/2227-9709/13/3/41</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-9709/13/3/40">

	<title>Informatics, Vol. 13, Pages 40: Organizational Characteristics Associated with Health Information Systems Adoption in Local Health Departments During the COVID-19 Pandemic</title>
	<link>https://www.mdpi.com/2227-9709/13/3/40</link>
	<description>Background: The COVID-19 pandemic revealed persistent gaps in local health department (LHD) health informatics capacity. This study examines organizational characteristics of LHDs associated with the adoption of six health information systems: electronic case reporting (eCR), electronic disease reporting systems (EDRS), electronic health records (EHR), electronic lab reporting (ELR), health information exchange (HIE), and immunization registries (IR). Methods: We used a mixed-methods design, including multinomial or binary logistic regression analyses of quantitative data from the 2022 NACCHO National Profile of Local Health Departments (n = 441) and thematic analysis of semi-structured interviews with five LHD staff members. Results: About half (49.9%) of LHDs had implemented eCR, while higher proportions had implemented EDRS (78.0%), EHR (62.4%), ELR (57.2%), HIE (92.6%), and IR (92.6%). Workforce size was associated with the implementation of eCR, EHR, and IR. The number of vacant staff positions was associated with a lower odds of IR implementation; compared with medium-sized LHDs, both small and large LHDs had higher odds of IR implementation. Shared-governance LHDs had higher odds of adopting ELR and HIE than state-governed LHDs. Qualitative themes highlighted challenges, including staff burnout, high turnover, pay inequities, role ambiguity, political pressures, rapid changes in informatics, and interoperability problems. Conclusions: Findings underscore the need to improve LHD workforce capacity and governance structures to support a resilient public health informatics infrastructure.</description>
	<pubDate>2026-03-04</pubDate>

	<content:encoded><![CDATA[
	<p><b>Informatics, Vol. 13, Pages 40: Organizational Characteristics Associated with Health Information Systems Adoption in Local Health Departments During the COVID-19 Pandemic</b></p>
	<p>Informatics <a href="https://www.mdpi.com/2227-9709/13/3/40">doi: 10.3390/informatics13030040</a></p>
	<p>Authors:
		Nardeen Shafik
		Gulzar H. Shah
		Timothy C. McCall
		Bettye A. Apenteng
		Mansoor Abro
		William A. Mase
		</p>
	<p>Background: The COVID-19 pandemic revealed persistent gaps in local health department (LHD) health informatics capacity. This study examines organizational characteristics of LHDs associated with the adoption of six health information systems: electronic case reporting (eCR), electronic disease reporting systems (EDRS), electronic health records (EHR), electronic lab reporting (ELR), health information exchange (HIE), and immunization registries (IR). Methods: We used a mixed-methods design, including multinomial or binary logistic regression analyses of quantitative data from the 2022 NACCHO National Profile of Local Health Departments (n = 441) and thematic analysis of semi-structured interviews with five LHD staff members. Results: About half (49.9%) of LHDs had implemented eCR, while higher proportions had implemented EDRS (78.0%), EHR (62.4%), ELR (57.2%), HIE (92.6%), and IR (92.6%). Workforce size was associated with the implementation of eCR, EHR, and IR. The number of vacant staff positions was associated with a lower odds of IR implementation; compared with medium-sized LHDs, both small and large LHDs had higher odds of IR implementation. Shared-governance LHDs had higher odds of adopting ELR and HIE than state-governed LHDs. Qualitative themes highlighted challenges, including staff burnout, high turnover, pay inequities, role ambiguity, political pressures, rapid changes in informatics, and interoperability problems. Conclusions: Findings underscore the need to improve LHD workforce capacity and governance structures to support a resilient public health informatics infrastructure.</p>
	]]></content:encoded>

	<dc:title>Organizational Characteristics Associated with Health Information Systems Adoption in Local Health Departments During the COVID-19 Pandemic</dc:title>
			<dc:creator>Nardeen Shafik</dc:creator>
			<dc:creator>Gulzar H. Shah</dc:creator>
			<dc:creator>Timothy C. McCall</dc:creator>
			<dc:creator>Bettye A. Apenteng</dc:creator>
			<dc:creator>Mansoor Abro</dc:creator>
			<dc:creator>William A. Mase</dc:creator>
		<dc:identifier>doi: 10.3390/informatics13030040</dc:identifier>
	<dc:source>Informatics</dc:source>
	<dc:date>2026-03-04</dc:date>

	<prism:publicationName>Informatics</prism:publicationName>
	<prism:publicationDate>2026-03-04</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>40</prism:startingPage>
		<prism:doi>10.3390/informatics13030040</prism:doi>
	<prism:url>https://www.mdpi.com/2227-9709/13/3/40</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-9709/13/3/39">

	<title>Informatics, Vol. 13, Pages 39: Integrating Agentic Artificial Intelligence to Automate International Classification of Diseases, Tenth Revision, Medical Coding</title>
	<link>https://www.mdpi.com/2227-9709/13/3/39</link>
	<description>Automating ICD-10 coding from discharge summaries remains demanding because coders analyze clinical narratives while justifying decisions. This study compares three automation patterns: PLM-ICD as a standalone deep learning system emitting 15 codes per case, LLM-only generation with full autonomy, and a hybrid approach where PLM-ICD drafts candidates for an agentic LLM audit to accept or reject. All strategies were evaluated on 19,801 MIMIC-IV summaries using four LLMs spanning compact (Qwen2.5-3B-Instruct, Llama-3.2-3B-Instruct, Phi-4-mini-instruct) to large-scale (Sonnet-4.5). Precision guided evaluation because coders still supply any missing diagnoses. PLM-ICD alone reached 55.8% precision while always surfacing 15 suggestions. LLM-only generation lagged severely (1.5&amp;amp;ndash;34.6% precision) and produced inconsistent output sizes. The agentic audit delivered the best trade-off: compact LLMs reviewed the 15 candidates, discarded weak evidence, and returned 2&amp;amp;ndash;8 high-confidence codes. Llama-3.2-3B-Instruct, for example, improved from 1.5% as a generator to 55.1% as a verifier while trimming false positives by 73%. These results show that positioning LLMs as quality controllers, rather than primary generators, yields reliable support for clinical coding teams, while formal recall/F1 reporting remains future work for fully autonomous implementations.</description>
	<pubDate>2026-03-04</pubDate>

	<content:encoded><![CDATA[
	<p><b>Informatics, Vol. 13, Pages 39: Integrating Agentic Artificial Intelligence to Automate International Classification of Diseases, Tenth Revision, Medical Coding</b></p>
	<p>Informatics <a href="https://www.mdpi.com/2227-9709/13/3/39">doi: 10.3390/informatics13030039</a></p>
	<p>Authors:
		Kitti Akkhawatthanakun
		Lalita Narupiyakul
		Konlakorn Wongpatikaseree
		Narit Hnoohom
		Chakkrit Termritthikun
		Paisarn Muneesawang
		</p>
	<p>Automating ICD-10 coding from discharge summaries remains demanding because coders analyze clinical narratives while justifying decisions. This study compares three automation patterns: PLM-ICD as a standalone deep learning system emitting 15 codes per case, LLM-only generation with full autonomy, and a hybrid approach where PLM-ICD drafts candidates for an agentic LLM audit to accept or reject. All strategies were evaluated on 19,801 MIMIC-IV summaries using four LLMs spanning compact (Qwen2.5-3B-Instruct, Llama-3.2-3B-Instruct, Phi-4-mini-instruct) to large-scale (Sonnet-4.5). Precision guided evaluation because coders still supply any missing diagnoses. PLM-ICD alone reached 55.8% precision while always surfacing 15 suggestions. LLM-only generation lagged severely (1.5&amp;amp;ndash;34.6% precision) and produced inconsistent output sizes. The agentic audit delivered the best trade-off: compact LLMs reviewed the 15 candidates, discarded weak evidence, and returned 2&amp;amp;ndash;8 high-confidence codes. Llama-3.2-3B-Instruct, for example, improved from 1.5% as a generator to 55.1% as a verifier while trimming false positives by 73%. These results show that positioning LLMs as quality controllers, rather than primary generators, yields reliable support for clinical coding teams, while formal recall/F1 reporting remains future work for fully autonomous implementations.</p>
	]]></content:encoded>

	<dc:title>Integrating Agentic Artificial Intelligence to Automate International Classification of Diseases, Tenth Revision, Medical Coding</dc:title>
			<dc:creator>Kitti Akkhawatthanakun</dc:creator>
			<dc:creator>Lalita Narupiyakul</dc:creator>
			<dc:creator>Konlakorn Wongpatikaseree</dc:creator>
			<dc:creator>Narit Hnoohom</dc:creator>
			<dc:creator>Chakkrit Termritthikun</dc:creator>
			<dc:creator>Paisarn Muneesawang</dc:creator>
		<dc:identifier>doi: 10.3390/informatics13030039</dc:identifier>
	<dc:source>Informatics</dc:source>
	<dc:date>2026-03-04</dc:date>

	<prism:publicationName>Informatics</prism:publicationName>
	<prism:publicationDate>2026-03-04</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>39</prism:startingPage>
		<prism:doi>10.3390/informatics13030039</prism:doi>
	<prism:url>https://www.mdpi.com/2227-9709/13/3/39</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-9709/13/3/38">

	<title>Informatics, Vol. 13, Pages 38: Learning to Live with Gen-AI</title>
	<link>https://www.mdpi.com/2227-9709/13/3/38</link>
	<description>In 2023, in the wake of the launch of ChatGPT, based on GPT-3, we invited contributions on the Topic AI chatbots: threat or opportunity [...]</description>
	<pubDate>2026-03-04</pubDate>

	<content:encoded><![CDATA[
	<p><b>Informatics, Vol. 13, Pages 38: Learning to Live with Gen-AI</b></p>
	<p>Informatics <a href="https://www.mdpi.com/2227-9709/13/3/38">doi: 10.3390/informatics13030038</a></p>
	<p>Authors:
		Antony Bryant
		</p>
	<p>In 2023, in the wake of the launch of ChatGPT, based on GPT-3, we invited contributions on the Topic AI chatbots: threat or opportunity [...]</p>
	]]></content:encoded>

	<dc:title>Learning to Live with Gen-AI</dc:title>
			<dc:creator>Antony Bryant</dc:creator>
		<dc:identifier>doi: 10.3390/informatics13030038</dc:identifier>
	<dc:source>Informatics</dc:source>
	<dc:date>2026-03-04</dc:date>

	<prism:publicationName>Informatics</prism:publicationName>
	<prism:publicationDate>2026-03-04</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Editorial</prism:section>
	<prism:startingPage>38</prism:startingPage>
		<prism:doi>10.3390/informatics13030038</prism:doi>
	<prism:url>https://www.mdpi.com/2227-9709/13/3/38</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-9709/13/3/37">

	<title>Informatics, Vol. 13, Pages 37: Enhancing Causal Text Detection Using Uncertainty-Weighted Machine Learning Ensembles</title>
	<link>https://www.mdpi.com/2227-9709/13/3/37</link>
	<description>Causal inference in text data has been a demanding objective in the field of natural language processing, mainly due to the intrinsic ambiguity and context sensitivity inherent in data, inducing uncertainty. Diminishing this uncertainty is essential in identifying reliable causal connections and advancing predictive consistency. In this research, we introduce an uncertainty-aware ensemble architecture that combines multiple text embedding schemes with both linear and nonlinear classifiers to boost causal text detection. Both sparse and neural-level embeddings were employed, and then combined it with an ensemble weighting approach based on two uncertainty estimation techniques, namely entropy-based and KL divergence-based. Unlike conventional ensemble methods with uniform or fixed voting strategies, our approach assigns weights inversely proportional to classifier uncertainty, ensuring that confident models exert greater influence on the final decisions. Our results show that TF-IDF, through its effective word frequency weighting scheme, consistently outperforms other embedding techniques, achieving better performance across both linear and nonlinear classifiers on both datasets (News Corpus and CausalLM&amp;amp;ndash;Adjective group). The experimental results show that our uncertainty-aware ensemble approach enhances both calibration and confidence predictions. Entropy-based weighting improves confidence in the case of linear classifiers with accuracy, F1-score, entropy and prediction confidence values of 94.3%, 94.0%, 0.382 and 0.774, respectively, while in the case of nonlinear classifiers the KL divergence-based weighting acquires a better performance with an accuracy of 97.6%, F1-score of 97.2%, KL Mean value of around 0.055 and LogLoss of 0.221.</description>
	<pubDate>2026-03-02</pubDate>

	<content:encoded><![CDATA[
	<p><b>Informatics, Vol. 13, Pages 37: Enhancing Causal Text Detection Using Uncertainty-Weighted Machine Learning Ensembles</b></p>
	<p>Informatics <a href="https://www.mdpi.com/2227-9709/13/3/37">doi: 10.3390/informatics13030037</a></p>
	<p>Authors:
		Sivachandra K B
		Neethu Mohan
		Mithun Kumar Kar
		Sikha O K
		Sachin Kumar S
		</p>
	<p>Causal inference in text data has been a demanding objective in the field of natural language processing, mainly due to the intrinsic ambiguity and context sensitivity inherent in data, inducing uncertainty. Diminishing this uncertainty is essential in identifying reliable causal connections and advancing predictive consistency. In this research, we introduce an uncertainty-aware ensemble architecture that combines multiple text embedding schemes with both linear and nonlinear classifiers to boost causal text detection. Both sparse and neural-level embeddings were employed, and then combined it with an ensemble weighting approach based on two uncertainty estimation techniques, namely entropy-based and KL divergence-based. Unlike conventional ensemble methods with uniform or fixed voting strategies, our approach assigns weights inversely proportional to classifier uncertainty, ensuring that confident models exert greater influence on the final decisions. Our results show that TF-IDF, through its effective word frequency weighting scheme, consistently outperforms other embedding techniques, achieving better performance across both linear and nonlinear classifiers on both datasets (News Corpus and CausalLM&amp;amp;ndash;Adjective group). The experimental results show that our uncertainty-aware ensemble approach enhances both calibration and confidence predictions. Entropy-based weighting improves confidence in the case of linear classifiers with accuracy, F1-score, entropy and prediction confidence values of 94.3%, 94.0%, 0.382 and 0.774, respectively, while in the case of nonlinear classifiers the KL divergence-based weighting acquires a better performance with an accuracy of 97.6%, F1-score of 97.2%, KL Mean value of around 0.055 and LogLoss of 0.221.</p>
	]]></content:encoded>

	<dc:title>Enhancing Causal Text Detection Using Uncertainty-Weighted Machine Learning Ensembles</dc:title>
			<dc:creator>Sivachandra K B</dc:creator>
			<dc:creator>Neethu Mohan</dc:creator>
			<dc:creator>Mithun Kumar Kar</dc:creator>
			<dc:creator>Sikha O K</dc:creator>
			<dc:creator>Sachin Kumar S</dc:creator>
		<dc:identifier>doi: 10.3390/informatics13030037</dc:identifier>
	<dc:source>Informatics</dc:source>
	<dc:date>2026-03-02</dc:date>

	<prism:publicationName>Informatics</prism:publicationName>
	<prism:publicationDate>2026-03-02</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>37</prism:startingPage>
		<prism:doi>10.3390/informatics13030037</prism:doi>
	<prism:url>https://www.mdpi.com/2227-9709/13/3/37</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-9709/13/3/36">

	<title>Informatics, Vol. 13, Pages 36: Mapping 3D Digital Heritage at Scale: A ChatGPT-Assisted Analysis of Sketchfab&amp;rsquo;s &amp;ldquo;Cultural Heritage &amp;amp; History&amp;rdquo; Models</title>
	<link>https://www.mdpi.com/2227-9709/13/3/36</link>
	<description>This paper evaluates the platform-mediated importance and impact of 3D cultural heritage models stored on Sketchfab by analyzing user engagement and retention metrics (views, likes, and comments), and provides a comparative assessment across other major 3D platforms. Our primary goal is to understand how cultural heritage content performs in terms of reach, engagement, and reuse conditions, and how platform design and taxonomies shape what becomes visible and measurable. We map Sketchfab&amp;amp;rsquo;s Cultural Heritage &amp;amp;amp; History ecosystem through a reproducible, API-driven workflow built on public metadata for over 1.37 million models (views, likes, comments, tags, and licences). The results depict a domain in rapid expansion between 2018 and 2025, while also revealing a strongly unequal attention economy: most models receive limited interaction, whereas a small minority concentrates visibility and engagement. The category Cultural Heritage &amp;amp;amp; History shows high endorsement relative to reach, consistent with &amp;amp;ldquo;high-value&amp;amp;rdquo; engagement once content is discovered. Methodologically, large-scale harvesting required automation to manage cursor pagination, intermittent failures, and rate limits (e.g., HTTP 429). In this context, ChatGPT provided essential support by assisting the design and refinement of the extraction and counting algorithm, replacing what would otherwise have required extensive manual counting and verification at a scale that could plausibly take months.</description>
	<pubDate>2026-03-02</pubDate>

	<content:encoded><![CDATA[
	<p><b>Informatics, Vol. 13, Pages 36: Mapping 3D Digital Heritage at Scale: A ChatGPT-Assisted Analysis of Sketchfab&amp;rsquo;s &amp;ldquo;Cultural Heritage &amp;amp; History&amp;rdquo; Models</b></p>
	<p>Informatics <a href="https://www.mdpi.com/2227-9709/13/3/36">doi: 10.3390/informatics13030036</a></p>
	<p>Authors:
		Massimiliano Pepe
		Andrei Crisan
		Emmanuel Maravelakis
		Donato Palumbo
		Ahmed Kamal Hamed Dewedar
		Przemysław Klapa
		</p>
	<p>This paper evaluates the platform-mediated importance and impact of 3D cultural heritage models stored on Sketchfab by analyzing user engagement and retention metrics (views, likes, and comments), and provides a comparative assessment across other major 3D platforms. Our primary goal is to understand how cultural heritage content performs in terms of reach, engagement, and reuse conditions, and how platform design and taxonomies shape what becomes visible and measurable. We map Sketchfab&amp;amp;rsquo;s Cultural Heritage &amp;amp;amp; History ecosystem through a reproducible, API-driven workflow built on public metadata for over 1.37 million models (views, likes, comments, tags, and licences). The results depict a domain in rapid expansion between 2018 and 2025, while also revealing a strongly unequal attention economy: most models receive limited interaction, whereas a small minority concentrates visibility and engagement. The category Cultural Heritage &amp;amp;amp; History shows high endorsement relative to reach, consistent with &amp;amp;ldquo;high-value&amp;amp;rdquo; engagement once content is discovered. Methodologically, large-scale harvesting required automation to manage cursor pagination, intermittent failures, and rate limits (e.g., HTTP 429). In this context, ChatGPT provided essential support by assisting the design and refinement of the extraction and counting algorithm, replacing what would otherwise have required extensive manual counting and verification at a scale that could plausibly take months.</p>
	]]></content:encoded>

	<dc:title>Mapping 3D Digital Heritage at Scale: A ChatGPT-Assisted Analysis of Sketchfab&amp;amp;rsquo;s &amp;amp;ldquo;Cultural Heritage &amp;amp;amp; History&amp;amp;rdquo; Models</dc:title>
			<dc:creator>Massimiliano Pepe</dc:creator>
			<dc:creator>Andrei Crisan</dc:creator>
			<dc:creator>Emmanuel Maravelakis</dc:creator>
			<dc:creator>Donato Palumbo</dc:creator>
			<dc:creator>Ahmed Kamal Hamed Dewedar</dc:creator>
			<dc:creator>Przemysław Klapa</dc:creator>
		<dc:identifier>doi: 10.3390/informatics13030036</dc:identifier>
	<dc:source>Informatics</dc:source>
	<dc:date>2026-03-02</dc:date>

	<prism:publicationName>Informatics</prism:publicationName>
	<prism:publicationDate>2026-03-02</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>36</prism:startingPage>
		<prism:doi>10.3390/informatics13030036</prism:doi>
	<prism:url>https://www.mdpi.com/2227-9709/13/3/36</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-9709/13/3/35">

	<title>Informatics, Vol. 13, Pages 35: Ontology-Based Digital Preservation Framework for Phum Riang Silk Heritage</title>
	<link>https://www.mdpi.com/2227-9709/13/3/35</link>
	<description>Traditional textile crafts face significant challenges in preserving and transferring knowledge due to the aging of expert artisans and declining community engagement. The Phum Riang silk-weaving tradition in Suratthani Province is a critical example of indigenous knowledge systems that require systematic documentation and digital conservation strategies. This research aims to develop a comprehensive ontological framework to support the capture, organization, and preservation of traditional knowledge related to Phum Riang silk production processes, establishing practical methodologies applicable to broader cultural heritage craft digitization and knowledge management systems. The research methodology employs ontology engineering principles, using the Web Ontology Language to create structured knowledge representation systems. Data collection was conducted through ethnographic fieldwork, in-depth interviews with expert craftspeople, and systematic documentation covering production processes, materials, tools, and cultural practices. The developed ontology encompasses five primary knowledge domains: production processes, raw materials, traditional tools, geographical context, and cultural significance. The framework comprises 23 distinct classes organized in hierarchical structures, 15 object properties, and 12 data properties, complemented by business rules ensuring authenticity and quality control mechanisms. This framework has significant implications for cultural heritage digitization, indigenous intellectual property protection, systematic knowledge transfer across generations, cultural authenticity preservation, and traditional craft community economic sustainability.</description>
	<pubDate>2026-02-27</pubDate>

	<content:encoded><![CDATA[
	<p><b>Informatics, Vol. 13, Pages 35: Ontology-Based Digital Preservation Framework for Phum Riang Silk Heritage</b></p>
	<p>Informatics <a href="https://www.mdpi.com/2227-9709/13/3/35">doi: 10.3390/informatics13030035</a></p>
	<p>Authors:
		A-Phorn Molee
		Thana Charuphanthuset
		Wittawat Kunnu
		Supaporn Chairungsee
		</p>
	<p>Traditional textile crafts face significant challenges in preserving and transferring knowledge due to the aging of expert artisans and declining community engagement. The Phum Riang silk-weaving tradition in Suratthani Province is a critical example of indigenous knowledge systems that require systematic documentation and digital conservation strategies. This research aims to develop a comprehensive ontological framework to support the capture, organization, and preservation of traditional knowledge related to Phum Riang silk production processes, establishing practical methodologies applicable to broader cultural heritage craft digitization and knowledge management systems. The research methodology employs ontology engineering principles, using the Web Ontology Language to create structured knowledge representation systems. Data collection was conducted through ethnographic fieldwork, in-depth interviews with expert craftspeople, and systematic documentation covering production processes, materials, tools, and cultural practices. The developed ontology encompasses five primary knowledge domains: production processes, raw materials, traditional tools, geographical context, and cultural significance. The framework comprises 23 distinct classes organized in hierarchical structures, 15 object properties, and 12 data properties, complemented by business rules ensuring authenticity and quality control mechanisms. This framework has significant implications for cultural heritage digitization, indigenous intellectual property protection, systematic knowledge transfer across generations, cultural authenticity preservation, and traditional craft community economic sustainability.</p>
	]]></content:encoded>

	<dc:title>Ontology-Based Digital Preservation Framework for Phum Riang Silk Heritage</dc:title>
			<dc:creator>A-Phorn Molee</dc:creator>
			<dc:creator>Thana Charuphanthuset</dc:creator>
			<dc:creator>Wittawat Kunnu</dc:creator>
			<dc:creator>Supaporn Chairungsee</dc:creator>
		<dc:identifier>doi: 10.3390/informatics13030035</dc:identifier>
	<dc:source>Informatics</dc:source>
	<dc:date>2026-02-27</dc:date>

	<prism:publicationName>Informatics</prism:publicationName>
	<prism:publicationDate>2026-02-27</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>35</prism:startingPage>
		<prism:doi>10.3390/informatics13030035</prism:doi>
	<prism:url>https://www.mdpi.com/2227-9709/13/3/35</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-9709/13/3/34">

	<title>Informatics, Vol. 13, Pages 34: Personalized Canine Diet Generation Using Machine Learning and Constraint Optimization</title>
	<link>https://www.mdpi.com/2227-9709/13/3/34</link>
	<description>The growing demand for customized pet diets highlights the shortcomings of commercial dog foods designed for all breeds, especially when it comes to addressing breed-specific diseases, metabolic disorders, and health risks. This research presents the development and evaluation of a hybrid system for formulating wet canine food recipes. The system combines data on ingredients, veterinary feeds, and breed-related diseases; the architecture includes a recommendation module for ingredient selection and a linear programming block for recipe optimization, considering veterinary nutrient restrictions. The evaluation of the system included automatic classification of foods by specialization, visual analysis of recipe clustering, and comparison of formulas obtained by different models. The average precision of label recovery was 85.4% for TF-IDF and 88.2% for the E5 model. A comparison of ingredient extraction methods showed that machine learning produces more stable recipes, while the statistical approach provides greater variability. The developed system demonstrates potential for automating recipe creation, filling in missing data, and developing veterinary decision support platforms aimed at personalized diet selection based on the physiological needs of animals.</description>
	<pubDate>2026-02-25</pubDate>

	<content:encoded><![CDATA[
	<p><b>Informatics, Vol. 13, Pages 34: Personalized Canine Diet Generation Using Machine Learning and Constraint Optimization</b></p>
	<p>Informatics <a href="https://www.mdpi.com/2227-9709/13/3/34">doi: 10.3390/informatics13030034</a></p>
	<p>Authors:
		Aliya Kalykulova
		Kuanysh Bakirov
		Aruzhan Shoman
		Kadyrzhan Makangali
		Gulzhan Tokysheva
		</p>
	<p>The growing demand for customized pet diets highlights the shortcomings of commercial dog foods designed for all breeds, especially when it comes to addressing breed-specific diseases, metabolic disorders, and health risks. This research presents the development and evaluation of a hybrid system for formulating wet canine food recipes. The system combines data on ingredients, veterinary feeds, and breed-related diseases; the architecture includes a recommendation module for ingredient selection and a linear programming block for recipe optimization, considering veterinary nutrient restrictions. The evaluation of the system included automatic classification of foods by specialization, visual analysis of recipe clustering, and comparison of formulas obtained by different models. The average precision of label recovery was 85.4% for TF-IDF and 88.2% for the E5 model. A comparison of ingredient extraction methods showed that machine learning produces more stable recipes, while the statistical approach provides greater variability. The developed system demonstrates potential for automating recipe creation, filling in missing data, and developing veterinary decision support platforms aimed at personalized diet selection based on the physiological needs of animals.</p>
	]]></content:encoded>

	<dc:title>Personalized Canine Diet Generation Using Machine Learning and Constraint Optimization</dc:title>
			<dc:creator>Aliya Kalykulova</dc:creator>
			<dc:creator>Kuanysh Bakirov</dc:creator>
			<dc:creator>Aruzhan Shoman</dc:creator>
			<dc:creator>Kadyrzhan Makangali</dc:creator>
			<dc:creator>Gulzhan Tokysheva</dc:creator>
		<dc:identifier>doi: 10.3390/informatics13030034</dc:identifier>
	<dc:source>Informatics</dc:source>
	<dc:date>2026-02-25</dc:date>

	<prism:publicationName>Informatics</prism:publicationName>
	<prism:publicationDate>2026-02-25</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>34</prism:startingPage>
		<prism:doi>10.3390/informatics13030034</prism:doi>
	<prism:url>https://www.mdpi.com/2227-9709/13/3/34</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-9709/13/2/33">

	<title>Informatics, Vol. 13, Pages 33: Equity, Function, and Data: A Review of Social and Functional Representation in AI Datasets for Traumatic Brain Injury</title>
	<link>https://www.mdpi.com/2227-9709/13/2/33</link>
	<description>Traumatic brain injury (TBI) is a leading cause of long-term disability worldwide, and each person&amp;amp;rsquo;s recovery looks different. Artificial intelligence (AI) offers promising tools to project individual outcomes. However, these models are impacted by the quality and inclusiveness of the dataset on which they are trained, having major implications for clinical value. This scoping review evaluated publicly available datasets that use AI modeling to predict outcomes from TBI. It examined how the literature derived from these datasets captures functional and social variables. Following PRISMA guidelines, 24 studies were identified, yielding 19 distinct datasets. While most datasets emphasized biomedical and injury severity metrics, few incorporated communication, cognition, and relevant social determinants of health. Nearly all studies included age and sex, but fewer than half reported race or ethnicity, and only a small subset integrated broader contextual indicators. Results suggest that outcome modeling continues to rely heavily on global scales, with limited use of domain-specific measurements. Another limiting factor is poor use of longitudinal measures, often not extending follow-up past the six-month post-injury time. These findings point to a need for inclusive, functionally rich, and ethically transparent data practices to aid AI systems in promoting equitable and clinically meaningful care.</description>
	<pubDate>2026-02-13</pubDate>

	<content:encoded><![CDATA[
	<p><b>Informatics, Vol. 13, Pages 33: Equity, Function, and Data: A Review of Social and Functional Representation in AI Datasets for Traumatic Brain Injury</b></p>
	<p>Informatics <a href="https://www.mdpi.com/2227-9709/13/2/33">doi: 10.3390/informatics13020033</a></p>
	<p>Authors:
		Leslie W. Johnson
		Kellyn D. Hall
		</p>
	<p>Traumatic brain injury (TBI) is a leading cause of long-term disability worldwide, and each person&amp;amp;rsquo;s recovery looks different. Artificial intelligence (AI) offers promising tools to project individual outcomes. However, these models are impacted by the quality and inclusiveness of the dataset on which they are trained, having major implications for clinical value. This scoping review evaluated publicly available datasets that use AI modeling to predict outcomes from TBI. It examined how the literature derived from these datasets captures functional and social variables. Following PRISMA guidelines, 24 studies were identified, yielding 19 distinct datasets. While most datasets emphasized biomedical and injury severity metrics, few incorporated communication, cognition, and relevant social determinants of health. Nearly all studies included age and sex, but fewer than half reported race or ethnicity, and only a small subset integrated broader contextual indicators. Results suggest that outcome modeling continues to rely heavily on global scales, with limited use of domain-specific measurements. Another limiting factor is poor use of longitudinal measures, often not extending follow-up past the six-month post-injury time. These findings point to a need for inclusive, functionally rich, and ethically transparent data practices to aid AI systems in promoting equitable and clinically meaningful care.</p>
	]]></content:encoded>

	<dc:title>Equity, Function, and Data: A Review of Social and Functional Representation in AI Datasets for Traumatic Brain Injury</dc:title>
			<dc:creator>Leslie W. Johnson</dc:creator>
			<dc:creator>Kellyn D. Hall</dc:creator>
		<dc:identifier>doi: 10.3390/informatics13020033</dc:identifier>
	<dc:source>Informatics</dc:source>
	<dc:date>2026-02-13</dc:date>

	<prism:publicationName>Informatics</prism:publicationName>
	<prism:publicationDate>2026-02-13</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Review</prism:section>
	<prism:startingPage>33</prism:startingPage>
		<prism:doi>10.3390/informatics13020033</prism:doi>
	<prism:url>https://www.mdpi.com/2227-9709/13/2/33</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-9709/13/2/32">

	<title>Informatics, Vol. 13, Pages 32: A Model-Driven Engineering Approach to AI-Powered Healthcare Platforms</title>
	<link>https://www.mdpi.com/2227-9709/13/2/32</link>
	<description>Artificial intelligence (AI) has the potential to transform healthcare by supporting more accurate diagnoses and personalized treatments. However, its adoption in practice remains constrained by fragmented data sources, strict privacy rules, and the technical complexity of building reliable clinical systems. To address these challenges, we introduce a model-driven engineering (MDE) framework designed specifically for healthcare AI. The framework relies on formal metamodels, domain-specific languages (DSLs), and automated transformations to move from high-level specifications to running software. At its core is the Medical Interoperability Language (MILA), a graphical DSL that enables clinicians and data scientists to define queries and machine learning pipelines using shared ontologies. When combined with a federated learning architecture, MILA allows institutions to collaborate without exchanging raw patient data, ensuring semantic consistency across sites while preserving privacy. We evaluate this approach in a multi-center cancer immunotherapy study. The generated pipelines delivered strong predictive performance, with best-performing models achieving up to 98.5% accuracy on selected prediction tasks, while substantially reducing manual coding effort. These findings suggest that MDE principles&amp;amp;mdash;metamodeling, semantic integration, and automated code generation&amp;amp;mdash;can provide a practical path toward interoperable, reproducible, and reliable digital health platforms.</description>
	<pubDate>2026-02-11</pubDate>

	<content:encoded><![CDATA[
	<p><b>Informatics, Vol. 13, Pages 32: A Model-Driven Engineering Approach to AI-Powered Healthcare Platforms</b></p>
	<p>Informatics <a href="https://www.mdpi.com/2227-9709/13/2/32">doi: 10.3390/informatics13020032</a></p>
	<p>Authors:
		Mira Raheem
		Neamat Eltazi
		Michael Papazoglou
		Bernd Krämer
		Amal Elgammal
		</p>
	<p>Artificial intelligence (AI) has the potential to transform healthcare by supporting more accurate diagnoses and personalized treatments. However, its adoption in practice remains constrained by fragmented data sources, strict privacy rules, and the technical complexity of building reliable clinical systems. To address these challenges, we introduce a model-driven engineering (MDE) framework designed specifically for healthcare AI. The framework relies on formal metamodels, domain-specific languages (DSLs), and automated transformations to move from high-level specifications to running software. At its core is the Medical Interoperability Language (MILA), a graphical DSL that enables clinicians and data scientists to define queries and machine learning pipelines using shared ontologies. When combined with a federated learning architecture, MILA allows institutions to collaborate without exchanging raw patient data, ensuring semantic consistency across sites while preserving privacy. We evaluate this approach in a multi-center cancer immunotherapy study. The generated pipelines delivered strong predictive performance, with best-performing models achieving up to 98.5% accuracy on selected prediction tasks, while substantially reducing manual coding effort. These findings suggest that MDE principles&amp;amp;mdash;metamodeling, semantic integration, and automated code generation&amp;amp;mdash;can provide a practical path toward interoperable, reproducible, and reliable digital health platforms.</p>
	]]></content:encoded>

	<dc:title>A Model-Driven Engineering Approach to AI-Powered Healthcare Platforms</dc:title>
			<dc:creator>Mira Raheem</dc:creator>
			<dc:creator>Neamat Eltazi</dc:creator>
			<dc:creator>Michael Papazoglou</dc:creator>
			<dc:creator>Bernd Krämer</dc:creator>
			<dc:creator>Amal Elgammal</dc:creator>
		<dc:identifier>doi: 10.3390/informatics13020032</dc:identifier>
	<dc:source>Informatics</dc:source>
	<dc:date>2026-02-11</dc:date>

	<prism:publicationName>Informatics</prism:publicationName>
	<prism:publicationDate>2026-02-11</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>32</prism:startingPage>
		<prism:doi>10.3390/informatics13020032</prism:doi>
	<prism:url>https://www.mdpi.com/2227-9709/13/2/32</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-9709/13/2/31">

	<title>Informatics, Vol. 13, Pages 31: LinguoNER: A Language-Agnostic Framework for Named Entity Recognition in Low-Resource Languages with a Focus on Yambeta</title>
	<link>https://www.mdpi.com/2227-9709/13/2/31</link>
	<description>This paper presents LinguoNER, a practical and extensible framework for bootstrapping Named Entity Recognition (NER) in extremely low-resource languages, demonstrated on Yambeta, a Bantu language spoken by a minority community in Cameroon. Due to scarce digital resources and the absence of annotated corpora, Yambeta has remained largely underrepresented in Natural Language Processing (NLP). LinguoNER addresses this gap by providing a methodologically transparent end-to-end workflow that integrates corpus acquisition, gazetteer-driven automatic annotation, tokenizer training, transformer fine-tuning, and multi-level evaluation in settings where large-scale manual annotation is infeasible. Using a Bible-derived corpus as a linguistically stable starting point, we release the first publicly available Yambeta NER dataset (&amp;amp;asymp;25,000 tokens) annotated with the CoNLL BIO scheme and a restricted entity schema (PER/LOC/ORG). Because labels are generated via dictionary-based annotation, the corpus is best characterized as silver-standard; credibility is strengthened through recorded dictionaries, transparency logs, expert-in-the-loop validation on sampled subsets, and complementary qualitative error analysis. We additionally train a dedicated Yambeta WordPiece tokenizer that preserves tone markers and diacritics, and fine-tune a bert-base-cased transformer for token classification. On a held-out test split, LinguoNER achieves strong token-level performance (Precision = 0.989, Recall = 0.981, F1 = 0.985), substantially outperforming a dictionary-only gazetteer baseline (&amp;amp;Delta;F1 &amp;amp;asymp; 0.36). Per-entity-type evaluation further indicates improvements beyond surface-form matching, while remaining errors are linguistically motivated and primarily involve multi-word entity boundaries, agglutinative constructions, and tone-/diacritic-sensitive tokenization. We emphasize that results are restricted to a Bible domain and a limited label space, and should be interpreted as proof-of-concept evidence rather than claims of broad out-of-domain generalization. Overall, LinguoNER provides a reproducible blueprint for bootstrapping NER resources in underrepresented languages and supports future work on broader corpora sources (e.g., news, OPUS, JW300), additional African languages (e.g., Yoruba, Igbo, Bassa), and the iterative creation of expert-refined datasets and gold-standard subsets.</description>
	<pubDate>2026-02-11</pubDate>

	<content:encoded><![CDATA[
	<p><b>Informatics, Vol. 13, Pages 31: LinguoNER: A Language-Agnostic Framework for Named Entity Recognition in Low-Resource Languages with a Focus on Yambeta</b></p>
	<p>Informatics <a href="https://www.mdpi.com/2227-9709/13/2/31">doi: 10.3390/informatics13020031</a></p>
	<p>Authors:
		Philippe Tamla
		Stephane Donna
		Tobias Bigala
		Dilan Nde
		Maxime Yves Julien Manifi Abouh
		Florian Freund
		</p>
	<p>This paper presents LinguoNER, a practical and extensible framework for bootstrapping Named Entity Recognition (NER) in extremely low-resource languages, demonstrated on Yambeta, a Bantu language spoken by a minority community in Cameroon. Due to scarce digital resources and the absence of annotated corpora, Yambeta has remained largely underrepresented in Natural Language Processing (NLP). LinguoNER addresses this gap by providing a methodologically transparent end-to-end workflow that integrates corpus acquisition, gazetteer-driven automatic annotation, tokenizer training, transformer fine-tuning, and multi-level evaluation in settings where large-scale manual annotation is infeasible. Using a Bible-derived corpus as a linguistically stable starting point, we release the first publicly available Yambeta NER dataset (&amp;amp;asymp;25,000 tokens) annotated with the CoNLL BIO scheme and a restricted entity schema (PER/LOC/ORG). Because labels are generated via dictionary-based annotation, the corpus is best characterized as silver-standard; credibility is strengthened through recorded dictionaries, transparency logs, expert-in-the-loop validation on sampled subsets, and complementary qualitative error analysis. We additionally train a dedicated Yambeta WordPiece tokenizer that preserves tone markers and diacritics, and fine-tune a bert-base-cased transformer for token classification. On a held-out test split, LinguoNER achieves strong token-level performance (Precision = 0.989, Recall = 0.981, F1 = 0.985), substantially outperforming a dictionary-only gazetteer baseline (&amp;amp;Delta;F1 &amp;amp;asymp; 0.36). Per-entity-type evaluation further indicates improvements beyond surface-form matching, while remaining errors are linguistically motivated and primarily involve multi-word entity boundaries, agglutinative constructions, and tone-/diacritic-sensitive tokenization. We emphasize that results are restricted to a Bible domain and a limited label space, and should be interpreted as proof-of-concept evidence rather than claims of broad out-of-domain generalization. Overall, LinguoNER provides a reproducible blueprint for bootstrapping NER resources in underrepresented languages and supports future work on broader corpora sources (e.g., news, OPUS, JW300), additional African languages (e.g., Yoruba, Igbo, Bassa), and the iterative creation of expert-refined datasets and gold-standard subsets.</p>
	]]></content:encoded>

	<dc:title>LinguoNER: A Language-Agnostic Framework for Named Entity Recognition in Low-Resource Languages with a Focus on Yambeta</dc:title>
			<dc:creator>Philippe Tamla</dc:creator>
			<dc:creator>Stephane Donna</dc:creator>
			<dc:creator>Tobias Bigala</dc:creator>
			<dc:creator>Dilan Nde</dc:creator>
			<dc:creator>Maxime Yves Julien Manifi Abouh</dc:creator>
			<dc:creator>Florian Freund</dc:creator>
		<dc:identifier>doi: 10.3390/informatics13020031</dc:identifier>
	<dc:source>Informatics</dc:source>
	<dc:date>2026-02-11</dc:date>

	<prism:publicationName>Informatics</prism:publicationName>
	<prism:publicationDate>2026-02-11</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>31</prism:startingPage>
		<prism:doi>10.3390/informatics13020031</prism:doi>
	<prism:url>https://www.mdpi.com/2227-9709/13/2/31</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-9709/13/2/30">

	<title>Informatics, Vol. 13, Pages 30: HierFinRAG&amp;mdash;Hierarchical Multimodal RAG for Financial Document Understanding</title>
	<link>https://www.mdpi.com/2227-9709/13/2/30</link>
	<description>Financial document understanding remains a critical challenge for Large Language Models, primarily due to the complex interplay between narrative text and structured numerical tables. Existing Retrieval-Augmented Generation (RAG) systems often treat these modalities in isolation, leading to significant failures in tasks requiring joint reasoning. This study introduces HierFinRAG, a novel hierarchical multimodal framework designed to unify tabular and textual data processing. Our approach employs a Table-Text Graph Neural Network (TTGNN) to explicitly model semantic and structural dependencies between table cells and corresponding text, coupled with a Symbolic&amp;amp;ndash;Neural Fusion module that routes queries between a neural generator and a symbolic calculator for precise arithmetic operations. We evaluate the system on the FinQA and FinanceBench datasets, comparing performance against strong baselines including Vanilla RAG and GPT-4o with Code Interpreter. Results demonstrate that HierFinRAG achieves an Exact Match score of 82.5% on FinQA, surpassing the best baseline by 6.5 percentage points, while maintaining a 3.5&amp;amp;times; faster inference latency than agentic approaches. These findings indicate that integrating hierarchical structural awareness with hybrid reasoning significantly enhances the accuracy and interpretability of financial artificial intelligence systems.</description>
	<pubDate>2026-02-10</pubDate>

	<content:encoded><![CDATA[
	<p><b>Informatics, Vol. 13, Pages 30: HierFinRAG&amp;mdash;Hierarchical Multimodal RAG for Financial Document Understanding</b></p>
	<p>Informatics <a href="https://www.mdpi.com/2227-9709/13/2/30">doi: 10.3390/informatics13020030</a></p>
	<p>Authors:
		Quang-Vinh Dang
		Ngoc-Son-An Nguyen
		Thi-Bich-Diem Vo
		</p>
	<p>Financial document understanding remains a critical challenge for Large Language Models, primarily due to the complex interplay between narrative text and structured numerical tables. Existing Retrieval-Augmented Generation (RAG) systems often treat these modalities in isolation, leading to significant failures in tasks requiring joint reasoning. This study introduces HierFinRAG, a novel hierarchical multimodal framework designed to unify tabular and textual data processing. Our approach employs a Table-Text Graph Neural Network (TTGNN) to explicitly model semantic and structural dependencies between table cells and corresponding text, coupled with a Symbolic&amp;amp;ndash;Neural Fusion module that routes queries between a neural generator and a symbolic calculator for precise arithmetic operations. We evaluate the system on the FinQA and FinanceBench datasets, comparing performance against strong baselines including Vanilla RAG and GPT-4o with Code Interpreter. Results demonstrate that HierFinRAG achieves an Exact Match score of 82.5% on FinQA, surpassing the best baseline by 6.5 percentage points, while maintaining a 3.5&amp;amp;times; faster inference latency than agentic approaches. These findings indicate that integrating hierarchical structural awareness with hybrid reasoning significantly enhances the accuracy and interpretability of financial artificial intelligence systems.</p>
	]]></content:encoded>

	<dc:title>HierFinRAG&amp;amp;mdash;Hierarchical Multimodal RAG for Financial Document Understanding</dc:title>
			<dc:creator>Quang-Vinh Dang</dc:creator>
			<dc:creator>Ngoc-Son-An Nguyen</dc:creator>
			<dc:creator>Thi-Bich-Diem Vo</dc:creator>
		<dc:identifier>doi: 10.3390/informatics13020030</dc:identifier>
	<dc:source>Informatics</dc:source>
	<dc:date>2026-02-10</dc:date>

	<prism:publicationName>Informatics</prism:publicationName>
	<prism:publicationDate>2026-02-10</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>30</prism:startingPage>
		<prism:doi>10.3390/informatics13020030</prism:doi>
	<prism:url>https://www.mdpi.com/2227-9709/13/2/30</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-9709/13/2/29">

	<title>Informatics, Vol. 13, Pages 29: CPG-EVAL: Evaluating the Readiness of Large Language Models as Assistants and Teammates in Language Teaching</title>
	<link>https://www.mdpi.com/2227-9709/13/2/29</link>
	<description>Large language models (LLMs) have begun to function as assistants or teammates in language learning, teaching, and research. However, what prerequisites are required for LLMs to reliably play these roles, and how such prerequisites should be measured, remains under-discussed. This study focuses on measuring Pedagogical Grammar Pattern Recognition (P-GPR) and establishes the Chinese Pedagogical Grammar Evaluation (CPG-EVAL), a multi-tiered benchmark designed to evaluate P-GPR within International Chinese Language Education. CPG-EVAL operationalizes grammar&amp;amp;ndash;instance correspondence through five task types that progressively increase contextual load and interference. We evaluate multiple proprietary and open-source LLMs as well as human participants. Results show a monotonic ordering across groups (humans &amp;amp;gt; larger-scale models &amp;amp;gt; semi-larger-scale models &amp;amp;gt; smaller-scale models). In comparison with human participants, LLM performance is more sensitive to task-format complexity. In addition, we identify a set of completely failed items that consistently mislead all evaluated LLMs, exposing shared and systematic weaknesses in current models&amp;amp;rsquo; pedagogical grammar recognition. Overall, this study provides an operational framework for diagnosing the capabilities and risks of LLMs when they are deployed as assistants or teammates in grammar-related language-education tasks and offers empirical reference for safer and more syllabus-aligned use of LLMs in educational settings.</description>
	<pubDate>2026-02-06</pubDate>

	<content:encoded><![CDATA[
	<p><b>Informatics, Vol. 13, Pages 29: CPG-EVAL: Evaluating the Readiness of Large Language Models as Assistants and Teammates in Language Teaching</b></p>
	<p>Informatics <a href="https://www.mdpi.com/2227-9709/13/2/29">doi: 10.3390/informatics13020029</a></p>
	<p>Authors:
		Dong Wang
		</p>
	<p>Large language models (LLMs) have begun to function as assistants or teammates in language learning, teaching, and research. However, what prerequisites are required for LLMs to reliably play these roles, and how such prerequisites should be measured, remains under-discussed. This study focuses on measuring Pedagogical Grammar Pattern Recognition (P-GPR) and establishes the Chinese Pedagogical Grammar Evaluation (CPG-EVAL), a multi-tiered benchmark designed to evaluate P-GPR within International Chinese Language Education. CPG-EVAL operationalizes grammar&amp;amp;ndash;instance correspondence through five task types that progressively increase contextual load and interference. We evaluate multiple proprietary and open-source LLMs as well as human participants. Results show a monotonic ordering across groups (humans &amp;amp;gt; larger-scale models &amp;amp;gt; semi-larger-scale models &amp;amp;gt; smaller-scale models). In comparison with human participants, LLM performance is more sensitive to task-format complexity. In addition, we identify a set of completely failed items that consistently mislead all evaluated LLMs, exposing shared and systematic weaknesses in current models&amp;amp;rsquo; pedagogical grammar recognition. Overall, this study provides an operational framework for diagnosing the capabilities and risks of LLMs when they are deployed as assistants or teammates in grammar-related language-education tasks and offers empirical reference for safer and more syllabus-aligned use of LLMs in educational settings.</p>
	]]></content:encoded>

	<dc:title>CPG-EVAL: Evaluating the Readiness of Large Language Models as Assistants and Teammates in Language Teaching</dc:title>
			<dc:creator>Dong Wang</dc:creator>
		<dc:identifier>doi: 10.3390/informatics13020029</dc:identifier>
	<dc:source>Informatics</dc:source>
	<dc:date>2026-02-06</dc:date>

	<prism:publicationName>Informatics</prism:publicationName>
	<prism:publicationDate>2026-02-06</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>29</prism:startingPage>
		<prism:doi>10.3390/informatics13020029</prism:doi>
	<prism:url>https://www.mdpi.com/2227-9709/13/2/29</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-9709/13/2/28">

	<title>Informatics, Vol. 13, Pages 28: Using Process Mining Techniques to Enhance the Patient Journey in an Oncology Clinic</title>
	<link>https://www.mdpi.com/2227-9709/13/2/28</link>
	<description>The cancer care pathway comprises several stages encompassing diagnosis, treatment, and follow-up. Studies show that delays in treatment initiation are associated with worse outcomes, including increased mortality, reduced progression-free survival, and diminished post-treatment quality of life. To address this, patient navigation tools have emerged as a strategy to identify bottlenecks and mitigate delays. In this context, process mining offers a promising approach to discover, model, and optimize workflows using real data from hospital information systems. This paper presents a case study on the application of process mining to analyze care pathways in an oncology clinic. The focus was on identifying critical pathways and delays in the treatment journey to support the patient navigation program. Based on the insights gained, targeted improvement actions were proposed to enhance the patient journey. Using the PM2 methodology, event data were extracted and processed from the clinic&amp;amp;rsquo;s information systems to model and analyze two key processes: (i) departmental workflows related to ambulatory care and (ii) longitudinal treatment pathways from initial evaluation to discharge. The results confirm the value of process mining for improving oncology patient journey and highlight its potential as a decision-support tool for healthcare administrators and clinical leaders.</description>
	<pubDate>2026-02-05</pubDate>

	<content:encoded><![CDATA[
	<p><b>Informatics, Vol. 13, Pages 28: Using Process Mining Techniques to Enhance the Patient Journey in an Oncology Clinic</b></p>
	<p>Informatics <a href="https://www.mdpi.com/2227-9709/13/2/28">doi: 10.3390/informatics13020028</a></p>
	<p>Authors:
		Ricardo S. Santos
		Jaqueline B. Braz
		Michelle Capelli
		Alvaro O. I. Rodrigues
		José M. Parente de Oliveira
		</p>
	<p>The cancer care pathway comprises several stages encompassing diagnosis, treatment, and follow-up. Studies show that delays in treatment initiation are associated with worse outcomes, including increased mortality, reduced progression-free survival, and diminished post-treatment quality of life. To address this, patient navigation tools have emerged as a strategy to identify bottlenecks and mitigate delays. In this context, process mining offers a promising approach to discover, model, and optimize workflows using real data from hospital information systems. This paper presents a case study on the application of process mining to analyze care pathways in an oncology clinic. The focus was on identifying critical pathways and delays in the treatment journey to support the patient navigation program. Based on the insights gained, targeted improvement actions were proposed to enhance the patient journey. Using the PM2 methodology, event data were extracted and processed from the clinic&amp;amp;rsquo;s information systems to model and analyze two key processes: (i) departmental workflows related to ambulatory care and (ii) longitudinal treatment pathways from initial evaluation to discharge. The results confirm the value of process mining for improving oncology patient journey and highlight its potential as a decision-support tool for healthcare administrators and clinical leaders.</p>
	]]></content:encoded>

	<dc:title>Using Process Mining Techniques to Enhance the Patient Journey in an Oncology Clinic</dc:title>
			<dc:creator>Ricardo S. Santos</dc:creator>
			<dc:creator>Jaqueline B. Braz</dc:creator>
			<dc:creator>Michelle Capelli</dc:creator>
			<dc:creator>Alvaro O. I. Rodrigues</dc:creator>
			<dc:creator>José M. Parente de Oliveira</dc:creator>
		<dc:identifier>doi: 10.3390/informatics13020028</dc:identifier>
	<dc:source>Informatics</dc:source>
	<dc:date>2026-02-05</dc:date>

	<prism:publicationName>Informatics</prism:publicationName>
	<prism:publicationDate>2026-02-05</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>28</prism:startingPage>
		<prism:doi>10.3390/informatics13020028</prism:doi>
	<prism:url>https://www.mdpi.com/2227-9709/13/2/28</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-9709/13/2/27">

	<title>Informatics, Vol. 13, Pages 27: Comparative Evaluation of LSTM and 3D CNN Models in a Hybrid System for IoT-Enabled Sign-to-Text Translation in Deaf Communities</title>
	<link>https://www.mdpi.com/2227-9709/13/2/27</link>
	<description>This paper presents a hybrid deep learning framework for real-time sign language recognition (SLR) tailored to Internet of Things (IoT)-enabled environments, enhancing accessibility for Deaf communities. The proposed system integrates a Long Short-Term Memory (LSTM) network for static gesture recognition and a 3D Convolutional Neural Network (3D CNN) for dynamic gesture recognition. Implemented on a Raspberry Pi device using MediaPipe for landmark extraction, the system supports low-latency, on-device inference suitable for resource-constrained edge computing. Experimental results demonstrate that the LSTM model achieves its highest stability and performance for static signs at 1000 training epochs, yielding an average F1-score of 0.938 and an accuracy of 86.67%. In contrast, at 2000 epochs, the model exhibits a catastrophic performance collapse (F1-score of 0.088) due to overfitting and weight instability, highlighting the necessity of careful training regulation. Despite this, the overall system achieves consistently high classification performance under controlled conditions. In contrast, the 3D CNN component maintains robust and consistent performance across all evaluated training phases (500&amp;amp;ndash;2000 epochs), achieving up to 99.6% accuracy on dynamic signs. When deployed on a Raspberry Pi platform, the system achieves real-time performance with a frame rate of 12&amp;amp;ndash;15 FPS and an average inference latency of approximately 65 ms per frame. The hybrid architecture effectively balances recognition accuracy with computational efficiency by routing static gestures to the LSTM and dynamic gestures to the 3D CNN. This work presents a detailed epoch-wise comparative analysis of model stability and computational feasibility, contributing a practical and scalable IoT-enabled solution for inclusive, real-time sign-to-text communication in intelligent environments.</description>
	<pubDate>2026-02-05</pubDate>

	<content:encoded><![CDATA[
	<p><b>Informatics, Vol. 13, Pages 27: Comparative Evaluation of LSTM and 3D CNN Models in a Hybrid System for IoT-Enabled Sign-to-Text Translation in Deaf Communities</b></p>
	<p>Informatics <a href="https://www.mdpi.com/2227-9709/13/2/27">doi: 10.3390/informatics13020027</a></p>
	<p>Authors:
		Samar Mouti
		Hani Al Chalabi
		Mohammed Abushohada
		Samer Rihawi
		Sulafa Abdalla
		</p>
	<p>This paper presents a hybrid deep learning framework for real-time sign language recognition (SLR) tailored to Internet of Things (IoT)-enabled environments, enhancing accessibility for Deaf communities. The proposed system integrates a Long Short-Term Memory (LSTM) network for static gesture recognition and a 3D Convolutional Neural Network (3D CNN) for dynamic gesture recognition. Implemented on a Raspberry Pi device using MediaPipe for landmark extraction, the system supports low-latency, on-device inference suitable for resource-constrained edge computing. Experimental results demonstrate that the LSTM model achieves its highest stability and performance for static signs at 1000 training epochs, yielding an average F1-score of 0.938 and an accuracy of 86.67%. In contrast, at 2000 epochs, the model exhibits a catastrophic performance collapse (F1-score of 0.088) due to overfitting and weight instability, highlighting the necessity of careful training regulation. Despite this, the overall system achieves consistently high classification performance under controlled conditions. In contrast, the 3D CNN component maintains robust and consistent performance across all evaluated training phases (500&amp;amp;ndash;2000 epochs), achieving up to 99.6% accuracy on dynamic signs. When deployed on a Raspberry Pi platform, the system achieves real-time performance with a frame rate of 12&amp;amp;ndash;15 FPS and an average inference latency of approximately 65 ms per frame. The hybrid architecture effectively balances recognition accuracy with computational efficiency by routing static gestures to the LSTM and dynamic gestures to the 3D CNN. This work presents a detailed epoch-wise comparative analysis of model stability and computational feasibility, contributing a practical and scalable IoT-enabled solution for inclusive, real-time sign-to-text communication in intelligent environments.</p>
	]]></content:encoded>

	<dc:title>Comparative Evaluation of LSTM and 3D CNN Models in a Hybrid System for IoT-Enabled Sign-to-Text Translation in Deaf Communities</dc:title>
			<dc:creator>Samar Mouti</dc:creator>
			<dc:creator>Hani Al Chalabi</dc:creator>
			<dc:creator>Mohammed Abushohada</dc:creator>
			<dc:creator>Samer Rihawi</dc:creator>
			<dc:creator>Sulafa Abdalla</dc:creator>
		<dc:identifier>doi: 10.3390/informatics13020027</dc:identifier>
	<dc:source>Informatics</dc:source>
	<dc:date>2026-02-05</dc:date>

	<prism:publicationName>Informatics</prism:publicationName>
	<prism:publicationDate>2026-02-05</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>27</prism:startingPage>
		<prism:doi>10.3390/informatics13020027</prism:doi>
	<prism:url>https://www.mdpi.com/2227-9709/13/2/27</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-9709/13/2/26">

	<title>Informatics, Vol. 13, Pages 26: Bridging Europe&amp;rsquo;s Digital Divide: Macro-Digital Preconditions for Sustainable LLM Adoption in Retail</title>
	<link>https://www.mdpi.com/2227-9709/13/2/26</link>
	<description>The deployment of large language models (LLMs) in commercial environments depends critically on the availability of robust digital infrastructure, scalable computing resources, and mature cloud architectures. This study examines how macro-level digital infrastructure, in particular cloud computing adoption, conditions the ability of the European retail sector to deploy and benefit from large language models (LLMs). Using a country-year panel of EU member states from 2017 to 2023, we estimate fixed-effects regressions to quantify the association between enterprise cloud use and retail trade volume growth, and implement an event-study design to explore dynamic responses around changes in cloud uptake. The results show that increases in cloud adoption are significantly associated with higher retail trade growth added and productivity, with especially strong effects in emerging Eastern European markets. We identify a digital threshold of around 20% of enterprises using cloud services, above which the marginal impact on retail performance becomes notably larger. These findings highlight cloud infrastructure as a key enabling condition for LLM-enabled retail applications and inform EU digital and industrial policy targeting regional digital disparities.</description>
	<pubDate>2026-02-04</pubDate>

	<content:encoded><![CDATA[
	<p><b>Informatics, Vol. 13, Pages 26: Bridging Europe&amp;rsquo;s Digital Divide: Macro-Digital Preconditions for Sustainable LLM Adoption in Retail</b></p>
	<p>Informatics <a href="https://www.mdpi.com/2227-9709/13/2/26">doi: 10.3390/informatics13020026</a></p>
	<p>Authors:
		Mieta Bobanović Dasko
		</p>
	<p>The deployment of large language models (LLMs) in commercial environments depends critically on the availability of robust digital infrastructure, scalable computing resources, and mature cloud architectures. This study examines how macro-level digital infrastructure, in particular cloud computing adoption, conditions the ability of the European retail sector to deploy and benefit from large language models (LLMs). Using a country-year panel of EU member states from 2017 to 2023, we estimate fixed-effects regressions to quantify the association between enterprise cloud use and retail trade volume growth, and implement an event-study design to explore dynamic responses around changes in cloud uptake. The results show that increases in cloud adoption are significantly associated with higher retail trade growth added and productivity, with especially strong effects in emerging Eastern European markets. We identify a digital threshold of around 20% of enterprises using cloud services, above which the marginal impact on retail performance becomes notably larger. These findings highlight cloud infrastructure as a key enabling condition for LLM-enabled retail applications and inform EU digital and industrial policy targeting regional digital disparities.</p>
	]]></content:encoded>

	<dc:title>Bridging Europe&amp;amp;rsquo;s Digital Divide: Macro-Digital Preconditions for Sustainable LLM Adoption in Retail</dc:title>
			<dc:creator>Mieta Bobanović Dasko</dc:creator>
		<dc:identifier>doi: 10.3390/informatics13020026</dc:identifier>
	<dc:source>Informatics</dc:source>
	<dc:date>2026-02-04</dc:date>

	<prism:publicationName>Informatics</prism:publicationName>
	<prism:publicationDate>2026-02-04</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>26</prism:startingPage>
		<prism:doi>10.3390/informatics13020026</prism:doi>
	<prism:url>https://www.mdpi.com/2227-9709/13/2/26</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-9709/13/2/25">

	<title>Informatics, Vol. 13, Pages 25: Virtualizing of Team Processes and Team Performance</title>
	<link>https://www.mdpi.com/2227-9709/13/2/25</link>
	<description>This study explores the virtualizability of team processes and their implications for team performance during the COVID-19 pandemic. The main research question was: What is the effect of the ease of virtualizing team processes on the outcomes of teams that have shifted from in-person to virtual work? A survey method was employed, and the data were analyzed using Structural Equation Modeling (SEM). Building on the frameworks based on literature review, the study defined sensory, relational, and synchronization requirements, along with the mechanisms of reach and representation. Results show that sensory requirements negatively influence the virtualizability of team processes, while relational and synchronization requirements do not have a statistically significant impact. Although the mechanisms of reach and representation do not moderate the relationships between constructs, they do have a direct positive effect on susceptibility to virtualization. Contrary to initial expectations, virtualizability positively affects both tangible and emotional outcomes, indicating that cohesion and satisfaction can be maintained&amp;amp;mdash;or even improved&amp;amp;mdash;in virtual teams. These findings enhance the theoretical understanding of team processes and virtualizability and offer practical insights for managing distributed teams.</description>
	<pubDate>2026-02-03</pubDate>

	<content:encoded><![CDATA[
	<p><b>Informatics, Vol. 13, Pages 25: Virtualizing of Team Processes and Team Performance</b></p>
	<p>Informatics <a href="https://www.mdpi.com/2227-9709/13/2/25">doi: 10.3390/informatics13020025</a></p>
	<p>Authors:
		Henrique Takashi Adati Tomomitsu
		Renato de Oliveira Moraes
		</p>
	<p>This study explores the virtualizability of team processes and their implications for team performance during the COVID-19 pandemic. The main research question was: What is the effect of the ease of virtualizing team processes on the outcomes of teams that have shifted from in-person to virtual work? A survey method was employed, and the data were analyzed using Structural Equation Modeling (SEM). Building on the frameworks based on literature review, the study defined sensory, relational, and synchronization requirements, along with the mechanisms of reach and representation. Results show that sensory requirements negatively influence the virtualizability of team processes, while relational and synchronization requirements do not have a statistically significant impact. Although the mechanisms of reach and representation do not moderate the relationships between constructs, they do have a direct positive effect on susceptibility to virtualization. Contrary to initial expectations, virtualizability positively affects both tangible and emotional outcomes, indicating that cohesion and satisfaction can be maintained&amp;amp;mdash;or even improved&amp;amp;mdash;in virtual teams. These findings enhance the theoretical understanding of team processes and virtualizability and offer practical insights for managing distributed teams.</p>
	]]></content:encoded>

	<dc:title>Virtualizing of Team Processes and Team Performance</dc:title>
			<dc:creator>Henrique Takashi Adati Tomomitsu</dc:creator>
			<dc:creator>Renato de Oliveira Moraes</dc:creator>
		<dc:identifier>doi: 10.3390/informatics13020025</dc:identifier>
	<dc:source>Informatics</dc:source>
	<dc:date>2026-02-03</dc:date>

	<prism:publicationName>Informatics</prism:publicationName>
	<prism:publicationDate>2026-02-03</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>25</prism:startingPage>
		<prism:doi>10.3390/informatics13020025</prism:doi>
	<prism:url>https://www.mdpi.com/2227-9709/13/2/25</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-9709/13/2/24">

	<title>Informatics, Vol. 13, Pages 24: Exploring Scientific Literature Using Topic Modeling: A Practical Framework for Discovery and Classification</title>
	<link>https://www.mdpi.com/2227-9709/13/2/24</link>
	<description>The increasing volume and diversity of scientific publications poses challenges for scalable and interpretable topic discovery and automated document categorization. This study proposes an integrated framework that combines probabilistic topic modeling with supervised classification to support large-scale scientific literature analysis. Using 3689 abstracts from the Journal of Forensic Sciences (2009&amp;amp;ndash;2022), Latent Dirichlet Allocation (LDA) is applied to uncover latent thematic structures, assess topic diagnosticity across forensic disciplines, and analyze temporal research trends. Bayesian model selection with repeated resampling identifies a stable topic resolution, with the number of topics T lying in the range 83&amp;amp;ndash;88, yielding semantically coherent and discipline-aligned topics. The resulting document&amp;amp;ndash;topic representations are then used for supervised abstract classification. Across multiple models and resampling scenarios, the strongest and most stable performance is achieved under a Grouped Category configuration. In particular, XGBoost attains an Accuracy of 0.754 and a Macro-averaged F1 score of 0.737 at T=88, with comparable results at neighboring topic counts, indicating robustness to topic granularity. Overall, the proposed framework provides a reproducible, interpretable, and computationally efficient pipeline for literature organization, trend analysis, and metadata enhancement in scientific domains.</description>
	<pubDate>2026-01-30</pubDate>

	<content:encoded><![CDATA[
	<p><b>Informatics, Vol. 13, Pages 24: Exploring Scientific Literature Using Topic Modeling: A Practical Framework for Discovery and Classification</b></p>
	<p>Informatics <a href="https://www.mdpi.com/2227-9709/13/2/24">doi: 10.3390/informatics13020024</a></p>
	<p>Authors:
		Amir Alipour Yengejeh
		Larry Tang
		Candice M. Bridge
		Chandra Kundu
		</p>
	<p>The increasing volume and diversity of scientific publications poses challenges for scalable and interpretable topic discovery and automated document categorization. This study proposes an integrated framework that combines probabilistic topic modeling with supervised classification to support large-scale scientific literature analysis. Using 3689 abstracts from the Journal of Forensic Sciences (2009&amp;amp;ndash;2022), Latent Dirichlet Allocation (LDA) is applied to uncover latent thematic structures, assess topic diagnosticity across forensic disciplines, and analyze temporal research trends. Bayesian model selection with repeated resampling identifies a stable topic resolution, with the number of topics T lying in the range 83&amp;amp;ndash;88, yielding semantically coherent and discipline-aligned topics. The resulting document&amp;amp;ndash;topic representations are then used for supervised abstract classification. Across multiple models and resampling scenarios, the strongest and most stable performance is achieved under a Grouped Category configuration. In particular, XGBoost attains an Accuracy of 0.754 and a Macro-averaged F1 score of 0.737 at T=88, with comparable results at neighboring topic counts, indicating robustness to topic granularity. Overall, the proposed framework provides a reproducible, interpretable, and computationally efficient pipeline for literature organization, trend analysis, and metadata enhancement in scientific domains.</p>
	]]></content:encoded>

	<dc:title>Exploring Scientific Literature Using Topic Modeling: A Practical Framework for Discovery and Classification</dc:title>
			<dc:creator>Amir Alipour Yengejeh</dc:creator>
			<dc:creator>Larry Tang</dc:creator>
			<dc:creator>Candice M. Bridge</dc:creator>
			<dc:creator>Chandra Kundu</dc:creator>
		<dc:identifier>doi: 10.3390/informatics13020024</dc:identifier>
	<dc:source>Informatics</dc:source>
	<dc:date>2026-01-30</dc:date>

	<prism:publicationName>Informatics</prism:publicationName>
	<prism:publicationDate>2026-01-30</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>24</prism:startingPage>
		<prism:doi>10.3390/informatics13020024</prism:doi>
	<prism:url>https://www.mdpi.com/2227-9709/13/2/24</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-9709/13/2/23">

	<title>Informatics, Vol. 13, Pages 23: Leveraging Informatics to Manage Lifelong Monitoring in Childhood Cancer Survivors</title>
	<link>https://www.mdpi.com/2227-9709/13/2/23</link>
	<description>Background: Electronic health records (EHR) have long held promise for sharing information efficiently, but this remains challenging. This quality improvement initiative sought to improve the accurate documentation of anthracycline and radiation therapy exposures in pediatric oncology patients who were treated at different institutions through a quality improvement methodology and EHR tools. Methods: A custom-built EHR smartform was previously created. Modifications were made to the smartform, and quality improvement methods were utilized to improve receipt of radiation summaries from other institutions and documentation of chemotherapeutic doses. Results: Three months after interventions, including clinician education and smartform updates, accurate anthracycline documentation improved from &amp;amp;le;60% to 100%. At 12 months post-intervention, accurate anthracycline documentation remained &amp;amp;gt; 90%. Documentation of radiation therapy improved similarly at 3 months post-intervention, with sustained improvement to 81% at 12 months post-intervention. Conclusions: Accurate documentation of radiation and chemotherapeutic exposures for pediatric oncology patients improved with education and changes to an EHR smartform. A central data location with quality assurance tools to ensure accuracy is one solution enabling accurate tracking of exposures and care plans for children with chronic illnesses.</description>
	<pubDate>2026-01-29</pubDate>

	<content:encoded><![CDATA[
	<p><b>Informatics, Vol. 13, Pages 23: Leveraging Informatics to Manage Lifelong Monitoring in Childhood Cancer Survivors</b></p>
	<p>Informatics <a href="https://www.mdpi.com/2227-9709/13/2/23">doi: 10.3390/informatics13020023</a></p>
	<p>Authors:
		Kimberly Ann Davidow
		Renee Gresh
		E. Anders Kolb
		Ellen Guarnieri
		Mary R. Cooper
		</p>
	<p>Background: Electronic health records (EHR) have long held promise for sharing information efficiently, but this remains challenging. This quality improvement initiative sought to improve the accurate documentation of anthracycline and radiation therapy exposures in pediatric oncology patients who were treated at different institutions through a quality improvement methodology and EHR tools. Methods: A custom-built EHR smartform was previously created. Modifications were made to the smartform, and quality improvement methods were utilized to improve receipt of radiation summaries from other institutions and documentation of chemotherapeutic doses. Results: Three months after interventions, including clinician education and smartform updates, accurate anthracycline documentation improved from &amp;amp;le;60% to 100%. At 12 months post-intervention, accurate anthracycline documentation remained &amp;amp;gt; 90%. Documentation of radiation therapy improved similarly at 3 months post-intervention, with sustained improvement to 81% at 12 months post-intervention. Conclusions: Accurate documentation of radiation and chemotherapeutic exposures for pediatric oncology patients improved with education and changes to an EHR smartform. A central data location with quality assurance tools to ensure accuracy is one solution enabling accurate tracking of exposures and care plans for children with chronic illnesses.</p>
	]]></content:encoded>

	<dc:title>Leveraging Informatics to Manage Lifelong Monitoring in Childhood Cancer Survivors</dc:title>
			<dc:creator>Kimberly Ann Davidow</dc:creator>
			<dc:creator>Renee Gresh</dc:creator>
			<dc:creator>E. Anders Kolb</dc:creator>
			<dc:creator>Ellen Guarnieri</dc:creator>
			<dc:creator>Mary R. Cooper</dc:creator>
		<dc:identifier>doi: 10.3390/informatics13020023</dc:identifier>
	<dc:source>Informatics</dc:source>
	<dc:date>2026-01-29</dc:date>

	<prism:publicationName>Informatics</prism:publicationName>
	<prism:publicationDate>2026-01-29</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Brief Report</prism:section>
	<prism:startingPage>23</prism:startingPage>
		<prism:doi>10.3390/informatics13020023</prism:doi>
	<prism:url>https://www.mdpi.com/2227-9709/13/2/23</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-9709/13/2/21">

	<title>Informatics, Vol. 13, Pages 21: A Highly Robust Approach to NFC Authentication for Privacy-Sensitive Mobile Payment Services</title>
	<link>https://www.mdpi.com/2227-9709/13/2/21</link>
	<description>The rapid growth of mobile payment systems has positioned Near Field Communication (NFC) as a core enabling technology. However, conventional NFC protocols primarily emphasize transmission efficiency rather than robust authentication and privacy protection, which exposes users to threats such as eavesdropping, replay, and tracking attacks. In this study, a lightweight and privacy-preserving authentication protocol is proposed for NFC-based mobile payment services. The protocol integrates anonymous authentication, replay resistance, and tracking protection while maintaining low computational overhead suitable for resource-constrained devices. A secure offline session key generation mechanism is incorporated to enhance transaction reliability without increasing system complexity. Formal security verification using the Scyther tool (version 1.1.3) confirms resistance against major attack vectors, including impersonation, man-in-the-middle, and replay attacks. Comparative performance analysis further demonstrates that the proposed scheme achieves superior efficiency and stronger security guarantees compared with existing approaches. These results indicate that the protocol provides a practical and scalable solution for secure and privacy-aware NFC mobile payment environments.</description>
	<pubDate>2026-01-28</pubDate>

	<content:encoded><![CDATA[
	<p><b>Informatics, Vol. 13, Pages 21: A Highly Robust Approach to NFC Authentication for Privacy-Sensitive Mobile Payment Services</b></p>
	<p>Informatics <a href="https://www.mdpi.com/2227-9709/13/2/21">doi: 10.3390/informatics13020021</a></p>
	<p>Authors:
		Rerkchai Fooprateepsiri
		U-Koj Plangprasopchoke
		</p>
	<p>The rapid growth of mobile payment systems has positioned Near Field Communication (NFC) as a core enabling technology. However, conventional NFC protocols primarily emphasize transmission efficiency rather than robust authentication and privacy protection, which exposes users to threats such as eavesdropping, replay, and tracking attacks. In this study, a lightweight and privacy-preserving authentication protocol is proposed for NFC-based mobile payment services. The protocol integrates anonymous authentication, replay resistance, and tracking protection while maintaining low computational overhead suitable for resource-constrained devices. A secure offline session key generation mechanism is incorporated to enhance transaction reliability without increasing system complexity. Formal security verification using the Scyther tool (version 1.1.3) confirms resistance against major attack vectors, including impersonation, man-in-the-middle, and replay attacks. Comparative performance analysis further demonstrates that the proposed scheme achieves superior efficiency and stronger security guarantees compared with existing approaches. These results indicate that the protocol provides a practical and scalable solution for secure and privacy-aware NFC mobile payment environments.</p>
	]]></content:encoded>

	<dc:title>A Highly Robust Approach to NFC Authentication for Privacy-Sensitive Mobile Payment Services</dc:title>
			<dc:creator>Rerkchai Fooprateepsiri</dc:creator>
			<dc:creator>U-Koj Plangprasopchoke</dc:creator>
		<dc:identifier>doi: 10.3390/informatics13020021</dc:identifier>
	<dc:source>Informatics</dc:source>
	<dc:date>2026-01-28</dc:date>

	<prism:publicationName>Informatics</prism:publicationName>
	<prism:publicationDate>2026-01-28</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>21</prism:startingPage>
		<prism:doi>10.3390/informatics13020021</prism:doi>
	<prism:url>https://www.mdpi.com/2227-9709/13/2/21</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-9709/13/2/22">

	<title>Informatics, Vol. 13, Pages 22: Generative AI in Developing Countries: Adoption Dynamics in Vietnamese Local Government</title>
	<link>https://www.mdpi.com/2227-9709/13/2/22</link>
	<description>Generative Artificial Intelligence (GenAI) is rapidly reshaping public-sector operations, yet its adoption in developing countries remains poorly understood. Existing research focuses largely on traditional AI in developed contexts, leaving unanswered questions about how GenAI interacts with institutional, organizational, and governance constraints in resource-limited settings. This study examines the organizational factors shaping GenAI adoption in Vietnamese local government using 25 semi-structured interviews analyzed through the Technology&amp;amp;ndash;Organization&amp;amp;ndash;Environment (TOE) framework. Findings reveal three central dynamics: (1) the emergence of informal, voluntary, and bottom-up experimentation with GenAI among civil servants; (2) significant institutional capacity constraints&amp;amp;mdash;including absent strategies, limited budgets, weak integration, and inadequate training&amp;amp;mdash;that prevent formal adoption; and (3) an &amp;amp;ldquo;AI accountability vacuum&amp;amp;rdquo; characterized by data security concerns, regulatory ambiguity, and unclear responsibility for AI-generated errors. Together, these factors create a state of governance paralysis in which GenAI is simultaneously encouraged and discouraged. The study contributes to theory by extending the TOE framework with an environment-specific construct&amp;amp;mdash;the AI accountability vacuum&amp;amp;mdash;and by reframing resistance as a rational response to structural gaps rather than technophobia. Practical implications highlight the need for capacity-building, regulatory guidance, accountable governance structures, and leadership-driven institutional support to enable safe and effective GenAI adoption in developing-country public sectors.</description>
	<pubDate>2026-01-28</pubDate>

	<content:encoded><![CDATA[
	<p><b>Informatics, Vol. 13, Pages 22: Generative AI in Developing Countries: Adoption Dynamics in Vietnamese Local Government</b></p>
	<p>Informatics <a href="https://www.mdpi.com/2227-9709/13/2/22">doi: 10.3390/informatics13020022</a></p>
	<p>Authors:
		Phu Nguyen Duy
		Charles Ruangthamsing
		Peerasit Kamnuansilpa
		Grichawat Lowatcharin
		Prasongchai Setthasuravich
		</p>
	<p>Generative Artificial Intelligence (GenAI) is rapidly reshaping public-sector operations, yet its adoption in developing countries remains poorly understood. Existing research focuses largely on traditional AI in developed contexts, leaving unanswered questions about how GenAI interacts with institutional, organizational, and governance constraints in resource-limited settings. This study examines the organizational factors shaping GenAI adoption in Vietnamese local government using 25 semi-structured interviews analyzed through the Technology&amp;amp;ndash;Organization&amp;amp;ndash;Environment (TOE) framework. Findings reveal three central dynamics: (1) the emergence of informal, voluntary, and bottom-up experimentation with GenAI among civil servants; (2) significant institutional capacity constraints&amp;amp;mdash;including absent strategies, limited budgets, weak integration, and inadequate training&amp;amp;mdash;that prevent formal adoption; and (3) an &amp;amp;ldquo;AI accountability vacuum&amp;amp;rdquo; characterized by data security concerns, regulatory ambiguity, and unclear responsibility for AI-generated errors. Together, these factors create a state of governance paralysis in which GenAI is simultaneously encouraged and discouraged. The study contributes to theory by extending the TOE framework with an environment-specific construct&amp;amp;mdash;the AI accountability vacuum&amp;amp;mdash;and by reframing resistance as a rational response to structural gaps rather than technophobia. Practical implications highlight the need for capacity-building, regulatory guidance, accountable governance structures, and leadership-driven institutional support to enable safe and effective GenAI adoption in developing-country public sectors.</p>
	]]></content:encoded>

	<dc:title>Generative AI in Developing Countries: Adoption Dynamics in Vietnamese Local Government</dc:title>
			<dc:creator>Phu Nguyen Duy</dc:creator>
			<dc:creator>Charles Ruangthamsing</dc:creator>
			<dc:creator>Peerasit Kamnuansilpa</dc:creator>
			<dc:creator>Grichawat Lowatcharin</dc:creator>
			<dc:creator>Prasongchai Setthasuravich</dc:creator>
		<dc:identifier>doi: 10.3390/informatics13020022</dc:identifier>
	<dc:source>Informatics</dc:source>
	<dc:date>2026-01-28</dc:date>

	<prism:publicationName>Informatics</prism:publicationName>
	<prism:publicationDate>2026-01-28</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>22</prism:startingPage>
		<prism:doi>10.3390/informatics13020022</prism:doi>
	<prism:url>https://www.mdpi.com/2227-9709/13/2/22</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-9709/13/2/20">

	<title>Informatics, Vol. 13, Pages 20: AI-Enhanced Skill Assessment in Higher Vocational Education: A Systematic Review and Meta-Analysis</title>
	<link>https://www.mdpi.com/2227-9709/13/2/20</link>
	<description>This study synthesizes empirical evidence on AI-supported skill assessment systems in higher vocational education through a systematic review and meta-analysis. Despite growing interest in generative AI within higher education, empirical research on AI-enabled assessment remains fragmented and methodologically uneven, particularly in vocational contexts. Following PRISMA 2020 guidelines, 27 peer-reviewed empirical studies published between 2010 and 2024 were identified from major international and Chinese databases and included in the analysis. Using a random-effects model, the meta-analysis indicates a moderate positive association between AI-supported assessment systems and skill-related learning outcomes (Hedges&amp;amp;rsquo; g = 0.72), alongside substantial heterogeneity across study designs, outcome measures, and implementation contexts. Subgroup analyses suggest variation across regional and institutional settings, which should be interpreted cautiously given small sample sizes and diverse methodological approaches. Based on the synthesized evidence, the study proposes a conceptual AI-supported skill assessment framework that distinguishes empirically grounded components from forward-looking extensions related to generative AI. Rather than offering prescriptive solutions, the framework provides an evidence-informed baseline to support future research, system design, and responsible integration of generative AI in higher education assessment. Overall, the findings highlight both the potential and the current empirical limitations of AI-enabled assessment, underscoring the need for more robust, theory-informed, and transparent studies as generative AI applications continue to evolve.</description>
	<pubDate>2026-01-28</pubDate>

	<content:encoded><![CDATA[
	<p><b>Informatics, Vol. 13, Pages 20: AI-Enhanced Skill Assessment in Higher Vocational Education: A Systematic Review and Meta-Analysis</b></p>
	<p>Informatics <a href="https://www.mdpi.com/2227-9709/13/2/20">doi: 10.3390/informatics13020020</a></p>
	<p>Authors:
		Xia Sun
		Haoheng Tian
		</p>
	<p>This study synthesizes empirical evidence on AI-supported skill assessment systems in higher vocational education through a systematic review and meta-analysis. Despite growing interest in generative AI within higher education, empirical research on AI-enabled assessment remains fragmented and methodologically uneven, particularly in vocational contexts. Following PRISMA 2020 guidelines, 27 peer-reviewed empirical studies published between 2010 and 2024 were identified from major international and Chinese databases and included in the analysis. Using a random-effects model, the meta-analysis indicates a moderate positive association between AI-supported assessment systems and skill-related learning outcomes (Hedges&amp;amp;rsquo; g = 0.72), alongside substantial heterogeneity across study designs, outcome measures, and implementation contexts. Subgroup analyses suggest variation across regional and institutional settings, which should be interpreted cautiously given small sample sizes and diverse methodological approaches. Based on the synthesized evidence, the study proposes a conceptual AI-supported skill assessment framework that distinguishes empirically grounded components from forward-looking extensions related to generative AI. Rather than offering prescriptive solutions, the framework provides an evidence-informed baseline to support future research, system design, and responsible integration of generative AI in higher education assessment. Overall, the findings highlight both the potential and the current empirical limitations of AI-enabled assessment, underscoring the need for more robust, theory-informed, and transparent studies as generative AI applications continue to evolve.</p>
	]]></content:encoded>

	<dc:title>AI-Enhanced Skill Assessment in Higher Vocational Education: A Systematic Review and Meta-Analysis</dc:title>
			<dc:creator>Xia Sun</dc:creator>
			<dc:creator>Haoheng Tian</dc:creator>
		<dc:identifier>doi: 10.3390/informatics13020020</dc:identifier>
	<dc:source>Informatics</dc:source>
	<dc:date>2026-01-28</dc:date>

	<prism:publicationName>Informatics</prism:publicationName>
	<prism:publicationDate>2026-01-28</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Systematic Review</prism:section>
	<prism:startingPage>20</prism:startingPage>
		<prism:doi>10.3390/informatics13020020</prism:doi>
	<prism:url>https://www.mdpi.com/2227-9709/13/2/20</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-9709/13/2/19">

	<title>Informatics, Vol. 13, Pages 19: An AIoT-Based Framework for Automated English-Speaking Assessment: Architecture, Benchmarking, and Reliability Analysis of Open-Source ASR</title>
	<link>https://www.mdpi.com/2227-9709/13/2/19</link>
	<description>The emergence of low-cost edge devices has enabled the integration of automatic speech recognition (ASR) into IoT environments, creating new opportunities for real-time language assessment. However, achieving reliable performance on resource-constrained hardware remains a significant challenge, especially on the Artificial Internet of Things (AIoT). This study presents an AIoT-based framework for automated English-speaking assessment that integrates architecture and system design, ASR benchmarking, and reliability analysis on edge devices. The proposed AIoT-oriented architecture incorporates a lightweight scoring framework capable of analyzing pronunciation, fluency, prosody, and CEFR-aligned speaking proficiency within an automated assessment system. Seven open-source ASR models&amp;amp;mdash;four Whisper variants (tiny, base, small, and medium) and three Vosk models&amp;amp;mdash;were systematically benchmarked in terms of recognition accuracy, inference latency, and computational efficiency. Experimental results indicate that Whisper-medium deployed on the Raspberry Pi 5 achieved the strongest overall performance, reducing inference latency by 42&amp;amp;ndash;48% compared with the Raspberry Pi 4 and attaining the lowest Word Error Rate (WER) of 6.8%. In contrast, smaller models such as Whisper-tiny, with a WER of 26.7%, exhibited two- to threefold higher scoring variability, demonstrating how recognition errors propagate into automated assessment reliability. System-level testing revealed that the Raspberry Pi 5 can sustain near real-time processing with approximately 58% CPU utilization and around 1.2 GB of memory, whereas the Raspberry Pi 4 frequently approaches practical operational limits under comparable workloads. Validation using real learner speech data (approximately 100 sessions) confirmed that the proposed system delivers accurate, portable, and privacy-preserving speaking assessment using low-power edge hardware. Overall, this work introduces a practical AIoT-based assessment framework, provides a comprehensive benchmark of open-source ASR models on edge platforms, and offers empirical insights into the trade-offs among recognition accuracy, inference latency, and scoring stability in edge-based ASR deployments.</description>
	<pubDate>2026-01-26</pubDate>

	<content:encoded><![CDATA[
	<p><b>Informatics, Vol. 13, Pages 19: An AIoT-Based Framework for Automated English-Speaking Assessment: Architecture, Benchmarking, and Reliability Analysis of Open-Source ASR</b></p>
	<p>Informatics <a href="https://www.mdpi.com/2227-9709/13/2/19">doi: 10.3390/informatics13020019</a></p>
	<p>Authors:
		Paniti Netinant
		Rerkchai Fooprateepsiri
		Ajjima Rukhiran
		Meennapa Rukhiran
		</p>
	<p>The emergence of low-cost edge devices has enabled the integration of automatic speech recognition (ASR) into IoT environments, creating new opportunities for real-time language assessment. However, achieving reliable performance on resource-constrained hardware remains a significant challenge, especially on the Artificial Internet of Things (AIoT). This study presents an AIoT-based framework for automated English-speaking assessment that integrates architecture and system design, ASR benchmarking, and reliability analysis on edge devices. The proposed AIoT-oriented architecture incorporates a lightweight scoring framework capable of analyzing pronunciation, fluency, prosody, and CEFR-aligned speaking proficiency within an automated assessment system. Seven open-source ASR models&amp;amp;mdash;four Whisper variants (tiny, base, small, and medium) and three Vosk models&amp;amp;mdash;were systematically benchmarked in terms of recognition accuracy, inference latency, and computational efficiency. Experimental results indicate that Whisper-medium deployed on the Raspberry Pi 5 achieved the strongest overall performance, reducing inference latency by 42&amp;amp;ndash;48% compared with the Raspberry Pi 4 and attaining the lowest Word Error Rate (WER) of 6.8%. In contrast, smaller models such as Whisper-tiny, with a WER of 26.7%, exhibited two- to threefold higher scoring variability, demonstrating how recognition errors propagate into automated assessment reliability. System-level testing revealed that the Raspberry Pi 5 can sustain near real-time processing with approximately 58% CPU utilization and around 1.2 GB of memory, whereas the Raspberry Pi 4 frequently approaches practical operational limits under comparable workloads. Validation using real learner speech data (approximately 100 sessions) confirmed that the proposed system delivers accurate, portable, and privacy-preserving speaking assessment using low-power edge hardware. Overall, this work introduces a practical AIoT-based assessment framework, provides a comprehensive benchmark of open-source ASR models on edge platforms, and offers empirical insights into the trade-offs among recognition accuracy, inference latency, and scoring stability in edge-based ASR deployments.</p>
	]]></content:encoded>

	<dc:title>An AIoT-Based Framework for Automated English-Speaking Assessment: Architecture, Benchmarking, and Reliability Analysis of Open-Source ASR</dc:title>
			<dc:creator>Paniti Netinant</dc:creator>
			<dc:creator>Rerkchai Fooprateepsiri</dc:creator>
			<dc:creator>Ajjima Rukhiran</dc:creator>
			<dc:creator>Meennapa Rukhiran</dc:creator>
		<dc:identifier>doi: 10.3390/informatics13020019</dc:identifier>
	<dc:source>Informatics</dc:source>
	<dc:date>2026-01-26</dc:date>

	<prism:publicationName>Informatics</prism:publicationName>
	<prism:publicationDate>2026-01-26</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>19</prism:startingPage>
		<prism:doi>10.3390/informatics13020019</prism:doi>
	<prism:url>https://www.mdpi.com/2227-9709/13/2/19</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-9709/13/2/18">

	<title>Informatics, Vol. 13, Pages 18: Investigating the Impact of Education 4.0 and Digital Learning on Students&amp;rsquo; Learning Outcomes in Engineering: A Four-Year Multiple-Case Study</title>
	<link>https://www.mdpi.com/2227-9709/13/2/18</link>
	<description>Education 4.0 and digital learning have led to a technology-driven transformation in educational methodologies and the roles of teachers, primarily at Higher Education Institutions (HEIs). From an educational standpoint, the extant literature on Education 4.0 highlights its technological features and benefits; however, there is a lack of studies that assess its impact on students&amp;amp;rsquo; learning outcomes. Seemingly, Education 4.0 features are taken for granted, as if the technology in itself were enough to guarantee students&amp;amp;rsquo; learning, self-efficacy, and engagement. Seeking to address this lack, this study describes the implications of tailoring Education 4.0 tenets and digital learning in an engineering curriculum. Four case studies conducted in the last four years with 119 students are presented, in which technologies such as digital twins, a Modular Production System (MPS), low-cost robotics, 3D printing, generative AI, machine learning, and mobile learning were integrated. With these case studies, an educational methodology with active learning, hands-on activities, and continuous teacher support was designed and deployed to foster cognitive and affective learning outcomes. A mixed-methods study was conducted, utilizing students&amp;amp;rsquo; grades, surveys, and semi-structured interviews to assess the approach&amp;amp;rsquo;s impact. The outcomes suggest that including Education 4.0 tenets and digital learning can enhance discipline-based skills, creativity, self-efficacy, collaboration, and self-directed learning. These results were obtained not only via the technological features but also through the incorporation of reflective teaching that provided several educational resources and oriented the methodology for students&amp;amp;rsquo; learning and engagement. The results of this study can help complement the concept of Education 4.0, helping to find a student-centered approach and conceiving a balance between technology, teaching practices, and cognitive and affective learning outcomes.</description>
	<pubDate>2026-01-23</pubDate>

	<content:encoded><![CDATA[
	<p><b>Informatics, Vol. 13, Pages 18: Investigating the Impact of Education 4.0 and Digital Learning on Students&amp;rsquo; Learning Outcomes in Engineering: A Four-Year Multiple-Case Study</b></p>
	<p>Informatics <a href="https://www.mdpi.com/2227-9709/13/2/18">doi: 10.3390/informatics13020018</a></p>
	<p>Authors:
		Jonathan Álvarez Ariza
		Carola Hernández Hernández
		</p>
	<p>Education 4.0 and digital learning have led to a technology-driven transformation in educational methodologies and the roles of teachers, primarily at Higher Education Institutions (HEIs). From an educational standpoint, the extant literature on Education 4.0 highlights its technological features and benefits; however, there is a lack of studies that assess its impact on students&amp;amp;rsquo; learning outcomes. Seemingly, Education 4.0 features are taken for granted, as if the technology in itself were enough to guarantee students&amp;amp;rsquo; learning, self-efficacy, and engagement. Seeking to address this lack, this study describes the implications of tailoring Education 4.0 tenets and digital learning in an engineering curriculum. Four case studies conducted in the last four years with 119 students are presented, in which technologies such as digital twins, a Modular Production System (MPS), low-cost robotics, 3D printing, generative AI, machine learning, and mobile learning were integrated. With these case studies, an educational methodology with active learning, hands-on activities, and continuous teacher support was designed and deployed to foster cognitive and affective learning outcomes. A mixed-methods study was conducted, utilizing students&amp;amp;rsquo; grades, surveys, and semi-structured interviews to assess the approach&amp;amp;rsquo;s impact. The outcomes suggest that including Education 4.0 tenets and digital learning can enhance discipline-based skills, creativity, self-efficacy, collaboration, and self-directed learning. These results were obtained not only via the technological features but also through the incorporation of reflective teaching that provided several educational resources and oriented the methodology for students&amp;amp;rsquo; learning and engagement. The results of this study can help complement the concept of Education 4.0, helping to find a student-centered approach and conceiving a balance between technology, teaching practices, and cognitive and affective learning outcomes.</p>
	]]></content:encoded>

	<dc:title>Investigating the Impact of Education 4.0 and Digital Learning on Students&amp;amp;rsquo; Learning Outcomes in Engineering: A Four-Year Multiple-Case Study</dc:title>
			<dc:creator>Jonathan Álvarez Ariza</dc:creator>
			<dc:creator>Carola Hernández Hernández</dc:creator>
		<dc:identifier>doi: 10.3390/informatics13020018</dc:identifier>
	<dc:source>Informatics</dc:source>
	<dc:date>2026-01-23</dc:date>

	<prism:publicationName>Informatics</prism:publicationName>
	<prism:publicationDate>2026-01-23</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>18</prism:startingPage>
		<prism:doi>10.3390/informatics13020018</prism:doi>
	<prism:url>https://www.mdpi.com/2227-9709/13/2/18</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-9709/13/1/17">

	<title>Informatics, Vol. 13, Pages 17: Digital Skills and Employer Transparency: Two Key Drivers Reinforcing Positive AI Attitudes and Perception Among Europeans</title>
	<link>https://www.mdpi.com/2227-9709/13/1/17</link>
	<description>Using 2024 Eurobarometer survey data from 26,415 workers in 27 EU countries, this study examines how digital skills and employer transparency shape workers&amp;amp;rsquo; attitudes toward and perception of artificial intelligence (AI). Drawing on information systems and behavioral theories, regression analyses reveal that digital skills strongly predict augmentation-dominant attitude. Workers with higher digital skills view AI as complementary rather than threatening, with an augmentation attitude mediating 56% of the skills&amp;amp;ndash;perception relationship. Adjacently, employer transparency attenuates the translation of replacement attitude into a negative perception of AI in the workplace. Organizations and policymakers should prioritize digital upskilling and ensure workplace AI transparency requirements to foster a positive attitude and perception, recognizing that skills development and organizational communication are equally vital for the successful integration of AI in the workplace.</description>
	<pubDate>2026-01-22</pubDate>

	<content:encoded><![CDATA[
	<p><b>Informatics, Vol. 13, Pages 17: Digital Skills and Employer Transparency: Two Key Drivers Reinforcing Positive AI Attitudes and Perception Among Europeans</b></p>
	<p>Informatics <a href="https://www.mdpi.com/2227-9709/13/1/17">doi: 10.3390/informatics13010017</a></p>
	<p>Authors:
		Dharan Bharti
		Cristian Balducci
		Salvatore Zappalà
		</p>
	<p>Using 2024 Eurobarometer survey data from 26,415 workers in 27 EU countries, this study examines how digital skills and employer transparency shape workers&amp;amp;rsquo; attitudes toward and perception of artificial intelligence (AI). Drawing on information systems and behavioral theories, regression analyses reveal that digital skills strongly predict augmentation-dominant attitude. Workers with higher digital skills view AI as complementary rather than threatening, with an augmentation attitude mediating 56% of the skills&amp;amp;ndash;perception relationship. Adjacently, employer transparency attenuates the translation of replacement attitude into a negative perception of AI in the workplace. Organizations and policymakers should prioritize digital upskilling and ensure workplace AI transparency requirements to foster a positive attitude and perception, recognizing that skills development and organizational communication are equally vital for the successful integration of AI in the workplace.</p>
	]]></content:encoded>

	<dc:title>Digital Skills and Employer Transparency: Two Key Drivers Reinforcing Positive AI Attitudes and Perception Among Europeans</dc:title>
			<dc:creator>Dharan Bharti</dc:creator>
			<dc:creator>Cristian Balducci</dc:creator>
			<dc:creator>Salvatore Zappalà</dc:creator>
		<dc:identifier>doi: 10.3390/informatics13010017</dc:identifier>
	<dc:source>Informatics</dc:source>
	<dc:date>2026-01-22</dc:date>

	<prism:publicationName>Informatics</prism:publicationName>
	<prism:publicationDate>2026-01-22</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>17</prism:startingPage>
		<prism:doi>10.3390/informatics13010017</prism:doi>
	<prism:url>https://www.mdpi.com/2227-9709/13/1/17</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-9709/13/1/16">

	<title>Informatics, Vol. 13, Pages 16: Design and Evaluation of a Generative AI-Enhanced Serious Game for Digital Literacy: An AI-Driven NPC Approach</title>
	<link>https://www.mdpi.com/2227-9709/13/1/16</link>
	<description>The rapid proliferation of misinformation on social media underscores the urgent need for scalable digital-literacy instruction. This study presents the design and evaluation of a Generative AI-enhanced serious game system that integrates Large Language Models (LLMs) to drive adaptive non-player characters (NPCs). Unlike traditional scripted interactions, the system employs role-based prompt engineering to align real-time AI dialogue with the Currency, Relevance, Authority, Accuracy, and Purpose (CRAAP) framework, enabling dynamic scaffolding and authentic misinformation scenarios. A mixed-method experiment with 60 undergraduate students compared this AI-driven approach to traditional instruction using a 40-item digital-literacy pre/post test, the Intrinsic Motivation Inventory (IMI), and open-ended reflections. Results indicated that while both groups improved significantly, the game-based group achieved larger gains in credibility-evaluation performance and reported higher perceived competence, interest, and effort. Qualitative analysis highlighted the HCI trade-off between the high pedagogical value of adaptive AI guidance and technical constraints such as system latency. The findings demonstrate that Generative AI can be effectively operationalized as a dynamic interface layer in serious games to strengthen critical reasoning. This study provides practical guidelines for architecting AI-NPC interactions and advances the theoretical understanding of AI-supported educational informatics.</description>
	<pubDate>2026-01-21</pubDate>

	<content:encoded><![CDATA[
	<p><b>Informatics, Vol. 13, Pages 16: Design and Evaluation of a Generative AI-Enhanced Serious Game for Digital Literacy: An AI-Driven NPC Approach</b></p>
	<p>Informatics <a href="https://www.mdpi.com/2227-9709/13/1/16">doi: 10.3390/informatics13010016</a></p>
	<p>Authors:
		Suepphong Chernbumroong
		Kannikar Intawong
		Udomchoke Asawimalkit
		Kitti Puritat
		Phichete Julrode
		</p>
	<p>The rapid proliferation of misinformation on social media underscores the urgent need for scalable digital-literacy instruction. This study presents the design and evaluation of a Generative AI-enhanced serious game system that integrates Large Language Models (LLMs) to drive adaptive non-player characters (NPCs). Unlike traditional scripted interactions, the system employs role-based prompt engineering to align real-time AI dialogue with the Currency, Relevance, Authority, Accuracy, and Purpose (CRAAP) framework, enabling dynamic scaffolding and authentic misinformation scenarios. A mixed-method experiment with 60 undergraduate students compared this AI-driven approach to traditional instruction using a 40-item digital-literacy pre/post test, the Intrinsic Motivation Inventory (IMI), and open-ended reflections. Results indicated that while both groups improved significantly, the game-based group achieved larger gains in credibility-evaluation performance and reported higher perceived competence, interest, and effort. Qualitative analysis highlighted the HCI trade-off between the high pedagogical value of adaptive AI guidance and technical constraints such as system latency. The findings demonstrate that Generative AI can be effectively operationalized as a dynamic interface layer in serious games to strengthen critical reasoning. This study provides practical guidelines for architecting AI-NPC interactions and advances the theoretical understanding of AI-supported educational informatics.</p>
	]]></content:encoded>

	<dc:title>Design and Evaluation of a Generative AI-Enhanced Serious Game for Digital Literacy: An AI-Driven NPC Approach</dc:title>
			<dc:creator>Suepphong Chernbumroong</dc:creator>
			<dc:creator>Kannikar Intawong</dc:creator>
			<dc:creator>Udomchoke Asawimalkit</dc:creator>
			<dc:creator>Kitti Puritat</dc:creator>
			<dc:creator>Phichete Julrode</dc:creator>
		<dc:identifier>doi: 10.3390/informatics13010016</dc:identifier>
	<dc:source>Informatics</dc:source>
	<dc:date>2026-01-21</dc:date>

	<prism:publicationName>Informatics</prism:publicationName>
	<prism:publicationDate>2026-01-21</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>16</prism:startingPage>
		<prism:doi>10.3390/informatics13010016</prism:doi>
	<prism:url>https://www.mdpi.com/2227-9709/13/1/16</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-9709/13/1/15">

	<title>Informatics, Vol. 13, Pages 15: Sensor-Drift Compensation in Electronic-Nose-Based Gas Recognition Using Knowledge Distillation</title>
	<link>https://www.mdpi.com/2227-9709/13/1/15</link>
	<description>Environmental changes and sensor aging can cause sensor drift in sensor array responses (i.e., a shift in the measured signal/feature distribution over time), which in turn degrades gas classification performance in real-world deployments of electronic-nose systems. Previous studies using the UCI Gas Sensor Array Drift Dataset as a benchmark reported promising drift compensation results but often lacked robust statistical validation and may overcompensate for drift by suppressing class-discriminative variance. To address these limitations and rigorously evaluate improvements in sensor-drift compensation, we designed two domain adaptation tasks based on the UCI electronic-nose dataset: (1) using the first batch to predict remaining batches, simulating a controlled laboratory setting, and (2) using Batches 1 through n&amp;amp;minus;1 to predict Batch n, simulating continuous training data updates for online training. Then, we systematically tested three methods&amp;amp;mdash;our semi-supervised knowledge distillation method (KD) for sensor-drift compensation; a previously benchmarked method, Domain-Regularized Component Analysis (DRCA); and a hybrid method, KD&amp;amp;ndash;DRCA&amp;amp;mdash;across 30 random test-set partitions on the UCI dataset. We showed that semi-supervised KD consistently outperformed both DRCA and KD&amp;amp;ndash;DRCA, achieving up to 18% and 15% relative improvements in accuracy and F1-score, respectively, over the baseline, proving KD&amp;amp;rsquo;s superior effectiveness in electronic-nose drift compensation. This work provides a rigorous statistical validation of KD for electronic-nose drift compensation under long-term temporal drift, with repeated randomized evaluation and significance testing, and demonstrates consistent improvements over DRCA on the UCI drift benchmark.</description>
	<pubDate>2026-01-20</pubDate>

	<content:encoded><![CDATA[
	<p><b>Informatics, Vol. 13, Pages 15: Sensor-Drift Compensation in Electronic-Nose-Based Gas Recognition Using Knowledge Distillation</b></p>
	<p>Informatics <a href="https://www.mdpi.com/2227-9709/13/1/15">doi: 10.3390/informatics13010015</a></p>
	<p>Authors:
		Juntao Lin
		Xianghao Zhan
		</p>
	<p>Environmental changes and sensor aging can cause sensor drift in sensor array responses (i.e., a shift in the measured signal/feature distribution over time), which in turn degrades gas classification performance in real-world deployments of electronic-nose systems. Previous studies using the UCI Gas Sensor Array Drift Dataset as a benchmark reported promising drift compensation results but often lacked robust statistical validation and may overcompensate for drift by suppressing class-discriminative variance. To address these limitations and rigorously evaluate improvements in sensor-drift compensation, we designed two domain adaptation tasks based on the UCI electronic-nose dataset: (1) using the first batch to predict remaining batches, simulating a controlled laboratory setting, and (2) using Batches 1 through n&amp;amp;minus;1 to predict Batch n, simulating continuous training data updates for online training. Then, we systematically tested three methods&amp;amp;mdash;our semi-supervised knowledge distillation method (KD) for sensor-drift compensation; a previously benchmarked method, Domain-Regularized Component Analysis (DRCA); and a hybrid method, KD&amp;amp;ndash;DRCA&amp;amp;mdash;across 30 random test-set partitions on the UCI dataset. We showed that semi-supervised KD consistently outperformed both DRCA and KD&amp;amp;ndash;DRCA, achieving up to 18% and 15% relative improvements in accuracy and F1-score, respectively, over the baseline, proving KD&amp;amp;rsquo;s superior effectiveness in electronic-nose drift compensation. This work provides a rigorous statistical validation of KD for electronic-nose drift compensation under long-term temporal drift, with repeated randomized evaluation and significance testing, and demonstrates consistent improvements over DRCA on the UCI drift benchmark.</p>
	]]></content:encoded>

	<dc:title>Sensor-Drift Compensation in Electronic-Nose-Based Gas Recognition Using Knowledge Distillation</dc:title>
			<dc:creator>Juntao Lin</dc:creator>
			<dc:creator>Xianghao Zhan</dc:creator>
		<dc:identifier>doi: 10.3390/informatics13010015</dc:identifier>
	<dc:source>Informatics</dc:source>
	<dc:date>2026-01-20</dc:date>

	<prism:publicationName>Informatics</prism:publicationName>
	<prism:publicationDate>2026-01-20</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>15</prism:startingPage>
		<prism:doi>10.3390/informatics13010015</prism:doi>
	<prism:url>https://www.mdpi.com/2227-9709/13/1/15</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-9709/13/1/14">

	<title>Informatics, Vol. 13, Pages 14: The Validation&amp;ndash;Deployment Gap in Agricultural Information Systems: A Systematic Technology Readiness Assessment</title>
	<link>https://www.mdpi.com/2227-9709/13/1/14</link>
	<description>Agricultural marketing increasingly integrates Agriculture 4.0 technologies&amp;amp;mdash;Blockchain, AI/ML, IoT, and recommendation systems&amp;amp;mdash;yet systematic evaluations of computational maturity and deployment readiness remain limited. This Systematic Literature Review (SLR) examined 99 peer-reviewed studies (2019&amp;amp;ndash;2025) from Scopus, Web of Science, and IEEE Xplore following PRISMA protocols to assess algorithmic performance, evaluation methods, and Technology Readiness Levels (TRLs) for agricultural marketing applications. Hybrid recommendation systems dominate current research (28.3%), achieving accuracies of 80&amp;amp;ndash;92%, while blockchain implementations (15.2%) show fast transaction times (&amp;amp;lt;2 s) but limited real-world adoption. Machine learning models using Random Forest, Gradient Boosting, and CNNs reach 85&amp;amp;ndash;95% predictive accuracy, and IoT systems report &amp;amp;gt;95% data transmission reliability. However, 77.8% of technologies remain at validation stages (TRL &amp;amp;le; 5), and only 3% demonstrate operational deployment beyond one year. The findings reveal an &amp;amp;ldquo;efficiency paradox&amp;amp;rdquo;: strong technical performance (75&amp;amp;ndash;97/100) contrasts with weak economic validation (&amp;amp;le;20% include cost&amp;amp;ndash;benefit analysis). Most studies overlook temporal, geographic, and economic generalization, prioritizing computational metrics over implementation viability. This review highlights the persistent validation&amp;amp;ndash;deployment gap in digital agriculture, urging a shift toward multi-tier evaluation frameworks that include contextual, adoption, and impact validation under real deployment conditions.</description>
	<pubDate>2026-01-19</pubDate>

	<content:encoded><![CDATA[
	<p><b>Informatics, Vol. 13, Pages 14: The Validation&amp;ndash;Deployment Gap in Agricultural Information Systems: A Systematic Technology Readiness Assessment</b></p>
	<p>Informatics <a href="https://www.mdpi.com/2227-9709/13/1/14">doi: 10.3390/informatics13010014</a></p>
	<p>Authors:
		Mary Elsy Arzuaga-Ochoa
		Melisa Acosta-Coll
		Mauricio Barrios Barrios
		</p>
	<p>Agricultural marketing increasingly integrates Agriculture 4.0 technologies&amp;amp;mdash;Blockchain, AI/ML, IoT, and recommendation systems&amp;amp;mdash;yet systematic evaluations of computational maturity and deployment readiness remain limited. This Systematic Literature Review (SLR) examined 99 peer-reviewed studies (2019&amp;amp;ndash;2025) from Scopus, Web of Science, and IEEE Xplore following PRISMA protocols to assess algorithmic performance, evaluation methods, and Technology Readiness Levels (TRLs) for agricultural marketing applications. Hybrid recommendation systems dominate current research (28.3%), achieving accuracies of 80&amp;amp;ndash;92%, while blockchain implementations (15.2%) show fast transaction times (&amp;amp;lt;2 s) but limited real-world adoption. Machine learning models using Random Forest, Gradient Boosting, and CNNs reach 85&amp;amp;ndash;95% predictive accuracy, and IoT systems report &amp;amp;gt;95% data transmission reliability. However, 77.8% of technologies remain at validation stages (TRL &amp;amp;le; 5), and only 3% demonstrate operational deployment beyond one year. The findings reveal an &amp;amp;ldquo;efficiency paradox&amp;amp;rdquo;: strong technical performance (75&amp;amp;ndash;97/100) contrasts with weak economic validation (&amp;amp;le;20% include cost&amp;amp;ndash;benefit analysis). Most studies overlook temporal, geographic, and economic generalization, prioritizing computational metrics over implementation viability. This review highlights the persistent validation&amp;amp;ndash;deployment gap in digital agriculture, urging a shift toward multi-tier evaluation frameworks that include contextual, adoption, and impact validation under real deployment conditions.</p>
	]]></content:encoded>

	<dc:title>The Validation&amp;amp;ndash;Deployment Gap in Agricultural Information Systems: A Systematic Technology Readiness Assessment</dc:title>
			<dc:creator>Mary Elsy Arzuaga-Ochoa</dc:creator>
			<dc:creator>Melisa Acosta-Coll</dc:creator>
			<dc:creator>Mauricio Barrios Barrios</dc:creator>
		<dc:identifier>doi: 10.3390/informatics13010014</dc:identifier>
	<dc:source>Informatics</dc:source>
	<dc:date>2026-01-19</dc:date>

	<prism:publicationName>Informatics</prism:publicationName>
	<prism:publicationDate>2026-01-19</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Review</prism:section>
	<prism:startingPage>14</prism:startingPage>
		<prism:doi>10.3390/informatics13010014</prism:doi>
	<prism:url>https://www.mdpi.com/2227-9709/13/1/14</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-9709/13/1/13">

	<title>Informatics, Vol. 13, Pages 13: New Concept of Digital Learning Space for Health Professional Students: Quantitative Research Analysis on Perceptions</title>
	<link>https://www.mdpi.com/2227-9709/13/1/13</link>
	<description>The Immersive Decentralized Digital space (IDDs), derived from blockchain technology and Massively Multiplayer Online Games (MMOGs), enables real-time multisensory interactions that support social connection under metaverse concepts. Although recognized as a technology with significant potential for educational innovation, IDDs remain underutilized in health professions education. Health profession students are often unaware of how IDDs&amp;amp;rsquo; features can be applied to their learning through in- or after-classroom activities. This study employs a quantitative research design to evaluate students&amp;amp;rsquo; perceptions of next-generation digital learning without any prior exposure to IDDs. An electronic survey was developed to examine four dimensions of learning facilitation: &amp;amp;ldquo;Remote Learning&amp;amp;rdquo; for capturing past experiences with digital competence during the COVID-19 era; &amp;amp;ldquo;Digital Evolution,&amp;amp;rdquo; reflecting preferences in utilizing digital spaces; &amp;amp;ldquo;Interactive Communication&amp;amp;rdquo; and &amp;amp;ldquo;Knowledge Application&amp;amp;rdquo; for applicability of IDDs in the health professions education. Statistical analyses revealed no significant differences in perceptions based on gender or major on all factors. Nevertheless, significant differences emerged based on nationality in &amp;amp;ldquo;Digital Evolution&amp;amp;rdquo;, &amp;amp;ldquo;Interactive Communication&amp;amp;rdquo;, and &amp;amp;ldquo;Knowledge Application&amp;amp;rdquo;, highlighting the influence of cultural and educational backgrounds on receptiveness to virtual learning environments. By recognizing the discrepancies and addressing barriers to digital inclusion, IDDs hold strong potential to enhance health professional learning experiences and educational outcomes.</description>
	<pubDate>2026-01-15</pubDate>

	<content:encoded><![CDATA[
	<p><b>Informatics, Vol. 13, Pages 13: New Concept of Digital Learning Space for Health Professional Students: Quantitative Research Analysis on Perceptions</b></p>
	<p>Informatics <a href="https://www.mdpi.com/2227-9709/13/1/13">doi: 10.3390/informatics13010013</a></p>
	<p>Authors:
		Joshua Mincheol Kim
		Provides Tsing Yin Ng
		Netaniah Kisha Pinto
		Kenneth Chung Hin Lai
		Evan Yu Tseng Wu
		Olivia Miu Yung Ngan
		Charis Yuk Man Li
		Florence Mei Kuen Tang
		</p>
	<p>The Immersive Decentralized Digital space (IDDs), derived from blockchain technology and Massively Multiplayer Online Games (MMOGs), enables real-time multisensory interactions that support social connection under metaverse concepts. Although recognized as a technology with significant potential for educational innovation, IDDs remain underutilized in health professions education. Health profession students are often unaware of how IDDs&amp;amp;rsquo; features can be applied to their learning through in- or after-classroom activities. This study employs a quantitative research design to evaluate students&amp;amp;rsquo; perceptions of next-generation digital learning without any prior exposure to IDDs. An electronic survey was developed to examine four dimensions of learning facilitation: &amp;amp;ldquo;Remote Learning&amp;amp;rdquo; for capturing past experiences with digital competence during the COVID-19 era; &amp;amp;ldquo;Digital Evolution,&amp;amp;rdquo; reflecting preferences in utilizing digital spaces; &amp;amp;ldquo;Interactive Communication&amp;amp;rdquo; and &amp;amp;ldquo;Knowledge Application&amp;amp;rdquo; for applicability of IDDs in the health professions education. Statistical analyses revealed no significant differences in perceptions based on gender or major on all factors. Nevertheless, significant differences emerged based on nationality in &amp;amp;ldquo;Digital Evolution&amp;amp;rdquo;, &amp;amp;ldquo;Interactive Communication&amp;amp;rdquo;, and &amp;amp;ldquo;Knowledge Application&amp;amp;rdquo;, highlighting the influence of cultural and educational backgrounds on receptiveness to virtual learning environments. By recognizing the discrepancies and addressing barriers to digital inclusion, IDDs hold strong potential to enhance health professional learning experiences and educational outcomes.</p>
	]]></content:encoded>

	<dc:title>New Concept of Digital Learning Space for Health Professional Students: Quantitative Research Analysis on Perceptions</dc:title>
			<dc:creator>Joshua Mincheol Kim</dc:creator>
			<dc:creator>Provides Tsing Yin Ng</dc:creator>
			<dc:creator>Netaniah Kisha Pinto</dc:creator>
			<dc:creator>Kenneth Chung Hin Lai</dc:creator>
			<dc:creator>Evan Yu Tseng Wu</dc:creator>
			<dc:creator>Olivia Miu Yung Ngan</dc:creator>
			<dc:creator>Charis Yuk Man Li</dc:creator>
			<dc:creator>Florence Mei Kuen Tang</dc:creator>
		<dc:identifier>doi: 10.3390/informatics13010013</dc:identifier>
	<dc:source>Informatics</dc:source>
	<dc:date>2026-01-15</dc:date>

	<prism:publicationName>Informatics</prism:publicationName>
	<prism:publicationDate>2026-01-15</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>13</prism:startingPage>
		<prism:doi>10.3390/informatics13010013</prism:doi>
	<prism:url>https://www.mdpi.com/2227-9709/13/1/13</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-9709/13/1/12">

	<title>Informatics, Vol. 13, Pages 12: Can Location-Based Augmented Reality Support Cultural-Heritage Experience in Real-World Settings? Age-Related Engagement Patterns and a Field-Based Evaluation</title>
	<link>https://www.mdpi.com/2227-9709/13/1/12</link>
	<description>The Wua-Lai silvercraft community in Chiang Mai is experiencing a widening disconnect with younger visitors, raising concerns about the erosion of intangible cultural heritage. This study evaluates &amp;amp;ldquo;Silver Craft Journey,&amp;amp;rdquo; a location-based augmented reality (LBAR) system designed to revitalize cultural engagement and enhance cultural-heritage experience through context-aware, gamified exploration. A quasi-experimental field study with 254 participants across three age groups examined the system&amp;amp;rsquo;s impact on cultural-heritage experience, knowledge acquisition, and real-world engagement. Results demonstrate substantial knowledge gains, with a mean increase of 7.74 points (SD = 4.37) and a large effect size (Cohen&amp;amp;rsquo;s d = 1.77), supporting the effectiveness of LBAR in supporting tangible and intangible heritage understanding. Behavioral log data reveal clear age-related engagement patterns: older participants (41&amp;amp;ndash;51) showed declining mission completion rates and reduced interaction times at later points of interest, which may reflect increased cognitive and physical demands during extended AR navigation under real-world conditions. These findings underscore the potential of location-based AR to enhance cultural-heritage experience in real-world settings while highlighting the importance of age-adaptive interaction and route-design strategies. The study contributes a replicable model for integrating digital tourism, embodied AR experience, and community-based heritage preservation.</description>
	<pubDate>2026-01-15</pubDate>

	<content:encoded><![CDATA[
	<p><b>Informatics, Vol. 13, Pages 12: Can Location-Based Augmented Reality Support Cultural-Heritage Experience in Real-World Settings? Age-Related Engagement Patterns and a Field-Based Evaluation</b></p>
	<p>Informatics <a href="https://www.mdpi.com/2227-9709/13/1/12">doi: 10.3390/informatics13010012</a></p>
	<p>Authors:
		Phichete Julrode
		Darin Poollapalin
		Sumalee Sangamuang
		Kannikar Intawong
		Kitti Puritat
		</p>
	<p>The Wua-Lai silvercraft community in Chiang Mai is experiencing a widening disconnect with younger visitors, raising concerns about the erosion of intangible cultural heritage. This study evaluates &amp;amp;ldquo;Silver Craft Journey,&amp;amp;rdquo; a location-based augmented reality (LBAR) system designed to revitalize cultural engagement and enhance cultural-heritage experience through context-aware, gamified exploration. A quasi-experimental field study with 254 participants across three age groups examined the system&amp;amp;rsquo;s impact on cultural-heritage experience, knowledge acquisition, and real-world engagement. Results demonstrate substantial knowledge gains, with a mean increase of 7.74 points (SD = 4.37) and a large effect size (Cohen&amp;amp;rsquo;s d = 1.77), supporting the effectiveness of LBAR in supporting tangible and intangible heritage understanding. Behavioral log data reveal clear age-related engagement patterns: older participants (41&amp;amp;ndash;51) showed declining mission completion rates and reduced interaction times at later points of interest, which may reflect increased cognitive and physical demands during extended AR navigation under real-world conditions. These findings underscore the potential of location-based AR to enhance cultural-heritage experience in real-world settings while highlighting the importance of age-adaptive interaction and route-design strategies. The study contributes a replicable model for integrating digital tourism, embodied AR experience, and community-based heritage preservation.</p>
	]]></content:encoded>

	<dc:title>Can Location-Based Augmented Reality Support Cultural-Heritage Experience in Real-World Settings? Age-Related Engagement Patterns and a Field-Based Evaluation</dc:title>
			<dc:creator>Phichete Julrode</dc:creator>
			<dc:creator>Darin Poollapalin</dc:creator>
			<dc:creator>Sumalee Sangamuang</dc:creator>
			<dc:creator>Kannikar Intawong</dc:creator>
			<dc:creator>Kitti Puritat</dc:creator>
		<dc:identifier>doi: 10.3390/informatics13010012</dc:identifier>
	<dc:source>Informatics</dc:source>
	<dc:date>2026-01-15</dc:date>

	<prism:publicationName>Informatics</prism:publicationName>
	<prism:publicationDate>2026-01-15</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>12</prism:startingPage>
		<prism:doi>10.3390/informatics13010012</prism:doi>
	<prism:url>https://www.mdpi.com/2227-9709/13/1/12</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-9709/13/1/11">

	<title>Informatics, Vol. 13, Pages 11: Enhancing Interactive Teaching for the Next Generation of Nurses: Generative-AI-Assisted Design of a Full-Day Professional Development Workshop</title>
	<link>https://www.mdpi.com/2227-9709/13/1/11</link>
	<description>Introduction: Nursing educators and clinical leaders face persistent challenges in engaging the next generation of nurses, often characterized by short attention spans, frequent phone use, and underdeveloped communication skills. This article describes the design and delivery of a full-day interactive teaching workshop for nursing faculty, senior clinical nurses, and nurse leaders, developed using a design-thinking approach supported by generative AI. Methods: The workshop comprised four thematic sessions: (1) Learning styles across generations, (2) Interactive teaching methods, (3) Application of interactive teaching strategies, and (4) Lesson planning and transfer. Generative AI was used during planning to create icebreakers, discussion prompts, clinical teaching scenarios, and application templates. Design decisions emphasized low-tech, low-prep strategies suitable for spontaneous clinical teaching, thereby reducing barriers to adoption. Activities included emoji-card introductions, quick generational polls, colored-paper reflections, portable whiteboard brainstorming, role plays, fishbowl discussions, gallery walks, and movement-based group exercises. Participants (N = 37) were predominantly female (95%) and represented multiple generations of X, Y, and Z. Mid- and end-of-workshop reflection prompts were embedded within Sessions 2 and 4, with participants recording their responses on colored papers, which were then compiled into a single Word document for thematic analysis. Results: Thematic analysis of 59 mid- and end-workshop reflections revealed six interconnected themes, grouped into three categories: (1) engagement and experiential learning, (2) practical applicability and generational awareness, and (3) facilitation, environment, and motivation. Participants emphasized the workshop&amp;amp;rsquo;s lively pace and hands-on design. Experiencing strategies firsthand built confidence for application, while generational awareness encouraged reflection on adapting methods for younger learners. The facilitator&amp;amp;rsquo;s passion, personable approach, and structured use of peer learning created a psychologically safe and motivating climate, leaving participants recharged and inspired to integrate interactive methods. Discussion: The workshop illustrates how AI-assisted, design-thinking-driven professional development can model effective strategies for next-generation learners. When paired with skilled facilitation, AI-supported planning enhances engagement, fosters reflective practice, and promotes immediate transfer of interactive strategies into diverse teaching settings.</description>
	<pubDate>2026-01-15</pubDate>

	<content:encoded><![CDATA[
	<p><b>Informatics, Vol. 13, Pages 11: Enhancing Interactive Teaching for the Next Generation of Nurses: Generative-AI-Assisted Design of a Full-Day Professional Development Workshop</b></p>
	<p>Informatics <a href="https://www.mdpi.com/2227-9709/13/1/11">doi: 10.3390/informatics13010011</a></p>
	<p>Authors:
		Su-I Hou
		</p>
	<p>Introduction: Nursing educators and clinical leaders face persistent challenges in engaging the next generation of nurses, often characterized by short attention spans, frequent phone use, and underdeveloped communication skills. This article describes the design and delivery of a full-day interactive teaching workshop for nursing faculty, senior clinical nurses, and nurse leaders, developed using a design-thinking approach supported by generative AI. Methods: The workshop comprised four thematic sessions: (1) Learning styles across generations, (2) Interactive teaching methods, (3) Application of interactive teaching strategies, and (4) Lesson planning and transfer. Generative AI was used during planning to create icebreakers, discussion prompts, clinical teaching scenarios, and application templates. Design decisions emphasized low-tech, low-prep strategies suitable for spontaneous clinical teaching, thereby reducing barriers to adoption. Activities included emoji-card introductions, quick generational polls, colored-paper reflections, portable whiteboard brainstorming, role plays, fishbowl discussions, gallery walks, and movement-based group exercises. Participants (N = 37) were predominantly female (95%) and represented multiple generations of X, Y, and Z. Mid- and end-of-workshop reflection prompts were embedded within Sessions 2 and 4, with participants recording their responses on colored papers, which were then compiled into a single Word document for thematic analysis. Results: Thematic analysis of 59 mid- and end-workshop reflections revealed six interconnected themes, grouped into three categories: (1) engagement and experiential learning, (2) practical applicability and generational awareness, and (3) facilitation, environment, and motivation. Participants emphasized the workshop&amp;amp;rsquo;s lively pace and hands-on design. Experiencing strategies firsthand built confidence for application, while generational awareness encouraged reflection on adapting methods for younger learners. The facilitator&amp;amp;rsquo;s passion, personable approach, and structured use of peer learning created a psychologically safe and motivating climate, leaving participants recharged and inspired to integrate interactive methods. Discussion: The workshop illustrates how AI-assisted, design-thinking-driven professional development can model effective strategies for next-generation learners. When paired with skilled facilitation, AI-supported planning enhances engagement, fosters reflective practice, and promotes immediate transfer of interactive strategies into diverse teaching settings.</p>
	]]></content:encoded>

	<dc:title>Enhancing Interactive Teaching for the Next Generation of Nurses: Generative-AI-Assisted Design of a Full-Day Professional Development Workshop</dc:title>
			<dc:creator>Su-I Hou</dc:creator>
		<dc:identifier>doi: 10.3390/informatics13010011</dc:identifier>
	<dc:source>Informatics</dc:source>
	<dc:date>2026-01-15</dc:date>

	<prism:publicationName>Informatics</prism:publicationName>
	<prism:publicationDate>2026-01-15</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Brief Report</prism:section>
	<prism:startingPage>11</prism:startingPage>
		<prism:doi>10.3390/informatics13010011</prism:doi>
	<prism:url>https://www.mdpi.com/2227-9709/13/1/11</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-9709/13/1/10">

	<title>Informatics, Vol. 13, Pages 10: A Review of Multimodal Sentiment Analysis in Online Public Opinion Monitoring</title>
	<link>https://www.mdpi.com/2227-9709/13/1/10</link>
	<description>With the rapid development of the Internet, online public opinion monitoring has emerged as a crucial task in the information era. Multimodal sentiment analysis, through the integration of multiple modalities such as text, images, and audio, combined with technologies including natural language processing and computer vision, offers novel technical means for online public opinion monitoring. Nevertheless, current research still faces many challenges, such as the scarcity of high-quality datasets, limited model generalization ability, and difficulties with cross-modal feature fusion. This paper reviews the current research progress of multimodal sentiment analysis in online public opinion monitoring, including its development history, key technologies, and application scenarios. Existing problems are analyzed and future research directions are discussed. In particular, we emphasize a fusion-architecture-centric comparison under online public opinion monitoring, and discuss cross-lingual differences that affect multimodal alignment and evaluation.</description>
	<pubDate>2026-01-14</pubDate>

	<content:encoded><![CDATA[
	<p><b>Informatics, Vol. 13, Pages 10: A Review of Multimodal Sentiment Analysis in Online Public Opinion Monitoring</b></p>
	<p>Informatics <a href="https://www.mdpi.com/2227-9709/13/1/10">doi: 10.3390/informatics13010010</a></p>
	<p>Authors:
		Shuxian Liu
		Tianyi Li
		</p>
	<p>With the rapid development of the Internet, online public opinion monitoring has emerged as a crucial task in the information era. Multimodal sentiment analysis, through the integration of multiple modalities such as text, images, and audio, combined with technologies including natural language processing and computer vision, offers novel technical means for online public opinion monitoring. Nevertheless, current research still faces many challenges, such as the scarcity of high-quality datasets, limited model generalization ability, and difficulties with cross-modal feature fusion. This paper reviews the current research progress of multimodal sentiment analysis in online public opinion monitoring, including its development history, key technologies, and application scenarios. Existing problems are analyzed and future research directions are discussed. In particular, we emphasize a fusion-architecture-centric comparison under online public opinion monitoring, and discuss cross-lingual differences that affect multimodal alignment and evaluation.</p>
	]]></content:encoded>

	<dc:title>A Review of Multimodal Sentiment Analysis in Online Public Opinion Monitoring</dc:title>
			<dc:creator>Shuxian Liu</dc:creator>
			<dc:creator>Tianyi Li</dc:creator>
		<dc:identifier>doi: 10.3390/informatics13010010</dc:identifier>
	<dc:source>Informatics</dc:source>
	<dc:date>2026-01-14</dc:date>

	<prism:publicationName>Informatics</prism:publicationName>
	<prism:publicationDate>2026-01-14</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Review</prism:section>
	<prism:startingPage>10</prism:startingPage>
		<prism:doi>10.3390/informatics13010010</prism:doi>
	<prism:url>https://www.mdpi.com/2227-9709/13/1/10</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-9709/13/1/9">

	<title>Informatics, Vol. 13, Pages 9: Knowledge Organization of Buddhist Learning Resources for Tourism: Virtual Tour of Wat Phra Pathom Chedi</title>
	<link>https://www.mdpi.com/2227-9709/13/1/9</link>
	<description>This study curates and structures knowledge concerning Buddhist learning resources for tourism, presenting it through a virtual tour of Wat Phra Pathom Chedi Ratchaworamahawihan in Nakhon Pathom Province. Employing a mixed-methods approach that integrates both qualitative and quantitative methodologies, the research first establishes a structured knowledge base. This involves developing a comprehensive metadata schema for cataloging the temple&amp;amp;rsquo;s diverse resources, including both sacred sites and artifacts, to enhance their searchability and accessibility. Subsequently, this knowledge is rendered into a virtual tour, which serves as an exemplary model of a Buddhist digital learning resource for tourism. The findings reveal the extensive diversity of resources within the temple. The developed virtual tour platform allows users an immersive exploration of the site via 360-degree panoramic views. This research presents significant implications for relevant agencies, offering a scalable model for the digital dissemination of cultural heritage. It is anticipated that this initiative will expand global access to and appreciation of the temple&amp;amp;rsquo;s cultural value, thereby fostering international interest in visitation. Such engagement is poised to stimulate the local economy and bolster Thailand&amp;amp;rsquo;s image as a premier cultural tourism destination.</description>
	<pubDate>2026-01-13</pubDate>

	<content:encoded><![CDATA[
	<p><b>Informatics, Vol. 13, Pages 9: Knowledge Organization of Buddhist Learning Resources for Tourism: Virtual Tour of Wat Phra Pathom Chedi</b></p>
	<p>Informatics <a href="https://www.mdpi.com/2227-9709/13/1/9">doi: 10.3390/informatics13010009</a></p>
	<p>Authors:
		Bulan Kulavijit
		Wirapong Chansanam
		Kannikar Intawong
		Kitti Puritat
		</p>
	<p>This study curates and structures knowledge concerning Buddhist learning resources for tourism, presenting it through a virtual tour of Wat Phra Pathom Chedi Ratchaworamahawihan in Nakhon Pathom Province. Employing a mixed-methods approach that integrates both qualitative and quantitative methodologies, the research first establishes a structured knowledge base. This involves developing a comprehensive metadata schema for cataloging the temple&amp;amp;rsquo;s diverse resources, including both sacred sites and artifacts, to enhance their searchability and accessibility. Subsequently, this knowledge is rendered into a virtual tour, which serves as an exemplary model of a Buddhist digital learning resource for tourism. The findings reveal the extensive diversity of resources within the temple. The developed virtual tour platform allows users an immersive exploration of the site via 360-degree panoramic views. This research presents significant implications for relevant agencies, offering a scalable model for the digital dissemination of cultural heritage. It is anticipated that this initiative will expand global access to and appreciation of the temple&amp;amp;rsquo;s cultural value, thereby fostering international interest in visitation. Such engagement is poised to stimulate the local economy and bolster Thailand&amp;amp;rsquo;s image as a premier cultural tourism destination.</p>
	]]></content:encoded>

	<dc:title>Knowledge Organization of Buddhist Learning Resources for Tourism: Virtual Tour of Wat Phra Pathom Chedi</dc:title>
			<dc:creator>Bulan Kulavijit</dc:creator>
			<dc:creator>Wirapong Chansanam</dc:creator>
			<dc:creator>Kannikar Intawong</dc:creator>
			<dc:creator>Kitti Puritat</dc:creator>
		<dc:identifier>doi: 10.3390/informatics13010009</dc:identifier>
	<dc:source>Informatics</dc:source>
	<dc:date>2026-01-13</dc:date>

	<prism:publicationName>Informatics</prism:publicationName>
	<prism:publicationDate>2026-01-13</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>9</prism:startingPage>
		<prism:doi>10.3390/informatics13010009</prism:doi>
	<prism:url>https://www.mdpi.com/2227-9709/13/1/9</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-9709/13/1/8">

	<title>Informatics, Vol. 13, Pages 8: Depression Detection Method Based on Multi-Modal Multi-Layer Collaborative Perception Attention Mechanism of Symmetric Structure</title>
	<link>https://www.mdpi.com/2227-9709/13/1/8</link>
	<description>Depression is a mental illness with hidden characteristics that affects human physical and mental health. In severe cases, it may lead to suicidal behavior (for example, among college students and social groups). Therefore, it has attracted widespread attention. Scholars have developed numerous models and methods for depression detection. However, most of these methods focus on a single modality and do not consider the influence of gender on depression, while the existing models have limitations such as complex structures. To solve this problem, we propose a symmetric-structured, multi-modal, multi-layer cooperative perception model for depression detection that dynamically focuses on critical features. First, the double-branch symmetric structure of the proposed model is designed to account for gender-based variations in emotional factors. Second, we introduce a stacked multi-head attention (MHA) module and an interactive cross-attention module to comprehensively extract key features while suppressing irrelevant information. A bidirectional long short-term memory network (BiLSTM) module enhances depression detection accuracy. To verify the effectiveness and feasibility of the model, we conducted a series of experiments using the proposed method on the AVEC 2014 dataset. Compared with the most advanced HMTL-IMHAFF model, our model improves the accuracy by 0.0308. The results indicate that the proposed framework demonstrates superior performance.</description>
	<pubDate>2026-01-12</pubDate>

	<content:encoded><![CDATA[
	<p><b>Informatics, Vol. 13, Pages 8: Depression Detection Method Based on Multi-Modal Multi-Layer Collaborative Perception Attention Mechanism of Symmetric Structure</b></p>
	<p>Informatics <a href="https://www.mdpi.com/2227-9709/13/1/8">doi: 10.3390/informatics13010008</a></p>
	<p>Authors:
		Shaorong Jiang
		Chengjun Xu
		Xiuya Fang
		</p>
	<p>Depression is a mental illness with hidden characteristics that affects human physical and mental health. In severe cases, it may lead to suicidal behavior (for example, among college students and social groups). Therefore, it has attracted widespread attention. Scholars have developed numerous models and methods for depression detection. However, most of these methods focus on a single modality and do not consider the influence of gender on depression, while the existing models have limitations such as complex structures. To solve this problem, we propose a symmetric-structured, multi-modal, multi-layer cooperative perception model for depression detection that dynamically focuses on critical features. First, the double-branch symmetric structure of the proposed model is designed to account for gender-based variations in emotional factors. Second, we introduce a stacked multi-head attention (MHA) module and an interactive cross-attention module to comprehensively extract key features while suppressing irrelevant information. A bidirectional long short-term memory network (BiLSTM) module enhances depression detection accuracy. To verify the effectiveness and feasibility of the model, we conducted a series of experiments using the proposed method on the AVEC 2014 dataset. Compared with the most advanced HMTL-IMHAFF model, our model improves the accuracy by 0.0308. The results indicate that the proposed framework demonstrates superior performance.</p>
	]]></content:encoded>

	<dc:title>Depression Detection Method Based on Multi-Modal Multi-Layer Collaborative Perception Attention Mechanism of Symmetric Structure</dc:title>
			<dc:creator>Shaorong Jiang</dc:creator>
			<dc:creator>Chengjun Xu</dc:creator>
			<dc:creator>Xiuya Fang</dc:creator>
		<dc:identifier>doi: 10.3390/informatics13010008</dc:identifier>
	<dc:source>Informatics</dc:source>
	<dc:date>2026-01-12</dc:date>

	<prism:publicationName>Informatics</prism:publicationName>
	<prism:publicationDate>2026-01-12</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>8</prism:startingPage>
		<prism:doi>10.3390/informatics13010008</prism:doi>
	<prism:url>https://www.mdpi.com/2227-9709/13/1/8</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-9709/13/1/7">

	<title>Informatics, Vol. 13, Pages 7: A Novel MBPSO&amp;ndash;BDGWO Ensemble Feature Selection Method for High-Dimensional Classification Data</title>
	<link>https://www.mdpi.com/2227-9709/13/1/7</link>
	<description>In a high-dimensional classification dataset, feature selection is crucial for improving classification performance and computational efficiency by identifying an informative subset of features while reducing noise, redundancy, and overfitting. This study proposes a novel metaheuristic-based ensemble feature selection approach by combining the complementary strengths of Modified Binary Particle Swarm Optimization (MBPSO) and Binary Dynamic Grey Wolf Optimization (BDGWO). The proposed MBPSO&amp;amp;ndash;BDGWO ensemble method is specifically designed for high-dimensional classification problems. The performance of the proposed MBPSO&amp;amp;ndash;BDGWO ensemble method was rigorously evaluated through an extensive simulation study under multiple high-dimensional scenarios with varying correlation structures. The ensemble method was further validated on several real datasets. Comparative analyses were conducted against single-stage feature selection methods, including BPSO, BGWO, MBPSO, and BDGWO, using evaluation metrics such as accuracy, the F1-score, the true positive rate (TPR), the false positive rate (FPR), the AUC, precision, and the Jaccard stability index. Simulation studies conducted under various dimensionality and correlation scenarios show that the proposed ensemble method achieves a low FPR, a high TPR/Precision/F1/AUC, and strong selection stability, clearly outperforming both classical and advanced single-stage methods, even as dimensionality and collinearity increase. In contrast, single-stage methods typically experience substantial performance degradation in high-correlation and high-dimensional settings, particularly BPSO and BGWO. Moreover, on the real datasets, the ensemble method outperformed all compared single-stage methods and produced consistently low MAD values across repetitions, indicating robustness and stability even in ultra-high-dimensional genomic datasets. Overall, the findings indicate that the proposed ensemble method demonstrates consistent performance across the evaluated scenarios and achieves higher selection stability compared with the single-stage methods.</description>
	<pubDate>2026-01-12</pubDate>

	<content:encoded><![CDATA[
	<p><b>Informatics, Vol. 13, Pages 7: A Novel MBPSO&amp;ndash;BDGWO Ensemble Feature Selection Method for High-Dimensional Classification Data</b></p>
	<p>Informatics <a href="https://www.mdpi.com/2227-9709/13/1/7">doi: 10.3390/informatics13010007</a></p>
	<p>Authors:
		Nuriye Sancar
		</p>
	<p>In a high-dimensional classification dataset, feature selection is crucial for improving classification performance and computational efficiency by identifying an informative subset of features while reducing noise, redundancy, and overfitting. This study proposes a novel metaheuristic-based ensemble feature selection approach by combining the complementary strengths of Modified Binary Particle Swarm Optimization (MBPSO) and Binary Dynamic Grey Wolf Optimization (BDGWO). The proposed MBPSO&amp;amp;ndash;BDGWO ensemble method is specifically designed for high-dimensional classification problems. The performance of the proposed MBPSO&amp;amp;ndash;BDGWO ensemble method was rigorously evaluated through an extensive simulation study under multiple high-dimensional scenarios with varying correlation structures. The ensemble method was further validated on several real datasets. Comparative analyses were conducted against single-stage feature selection methods, including BPSO, BGWO, MBPSO, and BDGWO, using evaluation metrics such as accuracy, the F1-score, the true positive rate (TPR), the false positive rate (FPR), the AUC, precision, and the Jaccard stability index. Simulation studies conducted under various dimensionality and correlation scenarios show that the proposed ensemble method achieves a low FPR, a high TPR/Precision/F1/AUC, and strong selection stability, clearly outperforming both classical and advanced single-stage methods, even as dimensionality and collinearity increase. In contrast, single-stage methods typically experience substantial performance degradation in high-correlation and high-dimensional settings, particularly BPSO and BGWO. Moreover, on the real datasets, the ensemble method outperformed all compared single-stage methods and produced consistently low MAD values across repetitions, indicating robustness and stability even in ultra-high-dimensional genomic datasets. Overall, the findings indicate that the proposed ensemble method demonstrates consistent performance across the evaluated scenarios and achieves higher selection stability compared with the single-stage methods.</p>
	]]></content:encoded>

	<dc:title>A Novel MBPSO&amp;amp;ndash;BDGWO Ensemble Feature Selection Method for High-Dimensional Classification Data</dc:title>
			<dc:creator>Nuriye Sancar</dc:creator>
		<dc:identifier>doi: 10.3390/informatics13010007</dc:identifier>
	<dc:source>Informatics</dc:source>
	<dc:date>2026-01-12</dc:date>

	<prism:publicationName>Informatics</prism:publicationName>
	<prism:publicationDate>2026-01-12</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>7</prism:startingPage>
		<prism:doi>10.3390/informatics13010007</prism:doi>
	<prism:url>https://www.mdpi.com/2227-9709/13/1/7</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-9709/13/1/6">

	<title>Informatics, Vol. 13, Pages 6: Second-Opinion Systems for Rare Diseases: A Scoping Review of Digital Workflows and Networks</title>
	<link>https://www.mdpi.com/2227-9709/13/1/6</link>
	<description>Introduction: Rare diseases disperse expertise across institutions and borders, making structured second-opinion systems a pragmatic way to concentrate subspecialty knowledge and reduce diagnostic delays. This scoping review mapped the design, governance, adoption, and impacts of such services across implementation scales. Objectives: To describe how second-opinion services for rare diseases are organized and governed, to characterize technological and workflow models, to summarize benefits and barriers, and to identify priority evidence gaps for implementation. Methods: Using a population&amp;amp;ndash;concept&amp;amp;ndash;context approach, we included peer-reviewed studies describing implemented second-opinion systems for rare diseases and excluded isolated case reports, purely conceptual proposals, and work outside this focus. Searches in August 2025 covered PubMed/MEDLINE, Scopus, Web of Science Core Collection, Cochrane Library, IEEE Xplore, ACM Digital Library, and LILACS without date limits and were restricted to English, Portuguese, or Spanish. Two reviewers screened independently, and the data were charted with a standardized, piloted form. No formal critical appraisal was undertaken, and the synthesis was descriptive. Results: Initiatives were clustered by scale (European networks, national programs, regional systems, international collaborations) and favored hybrid models over asynchronous and synchronous ones. Across settings, services shared reproducible workflows and provided faster access to expertise, quicker decision-making, and more frequent clarification of care plans. These improvements were enabled by transparent governance and dedicated support but were constrained by platform complexity, the effort required to assemble panels, uneven incentives, interoperability gaps, and medico-legal uncertainty. Conclusions: Systematized second-opinion services for rare diseases are feasible and clinically relevant. Progress hinges on usability, aligned incentives, and pragmatic interoperability, advancing from registries toward bidirectional electronic health record connections, alongside prospective evaluations of outcomes, equity, experience, effectiveness, and costs.</description>
	<pubDate>2026-01-10</pubDate>

	<content:encoded><![CDATA[
	<p><b>Informatics, Vol. 13, Pages 6: Second-Opinion Systems for Rare Diseases: A Scoping Review of Digital Workflows and Networks</b></p>
	<p>Informatics <a href="https://www.mdpi.com/2227-9709/13/1/6">doi: 10.3390/informatics13010006</a></p>
	<p>Authors:
		Vinícius Lima
		Mariana Mozini
		Domingos Alves
		</p>
	<p>Introduction: Rare diseases disperse expertise across institutions and borders, making structured second-opinion systems a pragmatic way to concentrate subspecialty knowledge and reduce diagnostic delays. This scoping review mapped the design, governance, adoption, and impacts of such services across implementation scales. Objectives: To describe how second-opinion services for rare diseases are organized and governed, to characterize technological and workflow models, to summarize benefits and barriers, and to identify priority evidence gaps for implementation. Methods: Using a population&amp;amp;ndash;concept&amp;amp;ndash;context approach, we included peer-reviewed studies describing implemented second-opinion systems for rare diseases and excluded isolated case reports, purely conceptual proposals, and work outside this focus. Searches in August 2025 covered PubMed/MEDLINE, Scopus, Web of Science Core Collection, Cochrane Library, IEEE Xplore, ACM Digital Library, and LILACS without date limits and were restricted to English, Portuguese, or Spanish. Two reviewers screened independently, and the data were charted with a standardized, piloted form. No formal critical appraisal was undertaken, and the synthesis was descriptive. Results: Initiatives were clustered by scale (European networks, national programs, regional systems, international collaborations) and favored hybrid models over asynchronous and synchronous ones. Across settings, services shared reproducible workflows and provided faster access to expertise, quicker decision-making, and more frequent clarification of care plans. These improvements were enabled by transparent governance and dedicated support but were constrained by platform complexity, the effort required to assemble panels, uneven incentives, interoperability gaps, and medico-legal uncertainty. Conclusions: Systematized second-opinion services for rare diseases are feasible and clinically relevant. Progress hinges on usability, aligned incentives, and pragmatic interoperability, advancing from registries toward bidirectional electronic health record connections, alongside prospective evaluations of outcomes, equity, experience, effectiveness, and costs.</p>
	]]></content:encoded>

	<dc:title>Second-Opinion Systems for Rare Diseases: A Scoping Review of Digital Workflows and Networks</dc:title>
			<dc:creator>Vinícius Lima</dc:creator>
			<dc:creator>Mariana Mozini</dc:creator>
			<dc:creator>Domingos Alves</dc:creator>
		<dc:identifier>doi: 10.3390/informatics13010006</dc:identifier>
	<dc:source>Informatics</dc:source>
	<dc:date>2026-01-10</dc:date>

	<prism:publicationName>Informatics</prism:publicationName>
	<prism:publicationDate>2026-01-10</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Review</prism:section>
	<prism:startingPage>6</prism:startingPage>
		<prism:doi>10.3390/informatics13010006</prism:doi>
	<prism:url>https://www.mdpi.com/2227-9709/13/1/6</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-9709/13/1/5">

	<title>Informatics, Vol. 13, Pages 5: Visual Harmony Between Avatar Appearance and On-Avatar Text: Effects on Self-Expression Fit and Interpersonal Perception in Social VR</title>
	<link>https://www.mdpi.com/2227-9709/13/1/5</link>
	<description>In social virtual reality (VR) and metaverse platforms, users express their identity through both avatar appearance and on-avatar textual cues, such as speech balloons. However, little is known about how the harmony between these cues influences self-representation and social impressions. We propose that when avatar appearance and text design, including color, font, and tone, are consistent, users experience a stronger self-expression fit and elicit greater interpersonal affinity. A within-subject study (N=21) in VRChat manipulated the social context, color harmony between avatar hair and text, and style or content consistency between tone and font. Questionnaires provided composite indices for perceived congruence, self-expression fit, and affinity. Analyses included repeated-measures ANOVA, linear mixed-effects models, and mediation tests. Results showed that congruent pairings increased both self-expression fit and affinity compared to mismatches, with mediation analyses indicating that self-expression fit fully mediated the effect. These findings integrate theories of avatar influence and computer-mediated communication into a framework for metaverse design, highlighting the value of consistent avatar and text styling.</description>
	<pubDate>2026-01-07</pubDate>

	<content:encoded><![CDATA[
	<p><b>Informatics, Vol. 13, Pages 5: Visual Harmony Between Avatar Appearance and On-Avatar Text: Effects on Self-Expression Fit and Interpersonal Perception in Social VR</b></p>
	<p>Informatics <a href="https://www.mdpi.com/2227-9709/13/1/5">doi: 10.3390/informatics13010005</a></p>
	<p>Authors:
		Yang Guang
		Sho Sakurai
		Takuya Nojima
		Koichi Hirota
		</p>
	<p>In social virtual reality (VR) and metaverse platforms, users express their identity through both avatar appearance and on-avatar textual cues, such as speech balloons. However, little is known about how the harmony between these cues influences self-representation and social impressions. We propose that when avatar appearance and text design, including color, font, and tone, are consistent, users experience a stronger self-expression fit and elicit greater interpersonal affinity. A within-subject study (N=21) in VRChat manipulated the social context, color harmony between avatar hair and text, and style or content consistency between tone and font. Questionnaires provided composite indices for perceived congruence, self-expression fit, and affinity. Analyses included repeated-measures ANOVA, linear mixed-effects models, and mediation tests. Results showed that congruent pairings increased both self-expression fit and affinity compared to mismatches, with mediation analyses indicating that self-expression fit fully mediated the effect. These findings integrate theories of avatar influence and computer-mediated communication into a framework for metaverse design, highlighting the value of consistent avatar and text styling.</p>
	]]></content:encoded>

	<dc:title>Visual Harmony Between Avatar Appearance and On-Avatar Text: Effects on Self-Expression Fit and Interpersonal Perception in Social VR</dc:title>
			<dc:creator>Yang Guang</dc:creator>
			<dc:creator>Sho Sakurai</dc:creator>
			<dc:creator>Takuya Nojima</dc:creator>
			<dc:creator>Koichi Hirota</dc:creator>
		<dc:identifier>doi: 10.3390/informatics13010005</dc:identifier>
	<dc:source>Informatics</dc:source>
	<dc:date>2026-01-07</dc:date>

	<prism:publicationName>Informatics</prism:publicationName>
	<prism:publicationDate>2026-01-07</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>5</prism:startingPage>
		<prism:doi>10.3390/informatics13010005</prism:doi>
	<prism:url>https://www.mdpi.com/2227-9709/13/1/5</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-9709/13/1/4">

	<title>Informatics, Vol. 13, Pages 4: C-STEER: A Dynamic Sentiment-Aware Framework for Fake News Detection with Lifecycle Emotional Evolution</title>
	<link>https://www.mdpi.com/2227-9709/13/1/4</link>
	<description>The dynamic evolution of collective emotions across the news dissemination life-cycle is a powerful yet underexplored signal in affective computing. While phenomena like the spread of fake news depend on eliciting specific emotional trajectories, existing methods often fail to capture these crucial dynamic affective cues. Many approaches focus on static text or propagation topology, limiting their robustness and failing to model the complete emotional life-cycle for applications such as assessing veracity. This paper introduces C-STEER (Cycle-aware Sentiment-Temporal Emotion Evolution), a novel framework grounded in communication theory, designed to model the characteristic initiation, burst, and decay stages of these emotional arcs. Guided by Diffusion of Innovations Theory, C-STEER first segments an information cascade into its life-cycle phases. It then operationalizes insights from Uses and Gratifications Theory and Emotional Contagion Theory to extract stage-specific emotional features and model their temporal dependencies using a Bidirectional Long Short-Term Memory (BiLSTM). To validate the framework&amp;amp;rsquo;s descriptive and predictive power, we apply it to the challenging domain of fake news detection. Experiments on the Weibo21 and Twitter16 datasets demonstrate that modeling life-cycle emotion dynamics significantly improves detection performance, achieving F1-macro scores of 91.6% and 90.1%, respectively, outperforming state-of-the-art baselines by margins of 1.6% to 2.4%. This work validates the C-STEER framework as an effective approach for the computational modeling of collective emotion life-cycles.</description>
	<pubDate>2026-01-05</pubDate>

	<content:encoded><![CDATA[
	<p><b>Informatics, Vol. 13, Pages 4: C-STEER: A Dynamic Sentiment-Aware Framework for Fake News Detection with Lifecycle Emotional Evolution</b></p>
	<p>Informatics <a href="https://www.mdpi.com/2227-9709/13/1/4">doi: 10.3390/informatics13010004</a></p>
	<p>Authors:
		Ziyi Zhen
		Ying Li
		</p>
	<p>The dynamic evolution of collective emotions across the news dissemination life-cycle is a powerful yet underexplored signal in affective computing. While phenomena like the spread of fake news depend on eliciting specific emotional trajectories, existing methods often fail to capture these crucial dynamic affective cues. Many approaches focus on static text or propagation topology, limiting their robustness and failing to model the complete emotional life-cycle for applications such as assessing veracity. This paper introduces C-STEER (Cycle-aware Sentiment-Temporal Emotion Evolution), a novel framework grounded in communication theory, designed to model the characteristic initiation, burst, and decay stages of these emotional arcs. Guided by Diffusion of Innovations Theory, C-STEER first segments an information cascade into its life-cycle phases. It then operationalizes insights from Uses and Gratifications Theory and Emotional Contagion Theory to extract stage-specific emotional features and model their temporal dependencies using a Bidirectional Long Short-Term Memory (BiLSTM). To validate the framework&amp;amp;rsquo;s descriptive and predictive power, we apply it to the challenging domain of fake news detection. Experiments on the Weibo21 and Twitter16 datasets demonstrate that modeling life-cycle emotion dynamics significantly improves detection performance, achieving F1-macro scores of 91.6% and 90.1%, respectively, outperforming state-of-the-art baselines by margins of 1.6% to 2.4%. This work validates the C-STEER framework as an effective approach for the computational modeling of collective emotion life-cycles.</p>
	]]></content:encoded>

	<dc:title>C-STEER: A Dynamic Sentiment-Aware Framework for Fake News Detection with Lifecycle Emotional Evolution</dc:title>
			<dc:creator>Ziyi Zhen</dc:creator>
			<dc:creator>Ying Li</dc:creator>
		<dc:identifier>doi: 10.3390/informatics13010004</dc:identifier>
	<dc:source>Informatics</dc:source>
	<dc:date>2026-01-05</dc:date>

	<prism:publicationName>Informatics</prism:publicationName>
	<prism:publicationDate>2026-01-05</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>4</prism:startingPage>
		<prism:doi>10.3390/informatics13010004</prism:doi>
	<prism:url>https://www.mdpi.com/2227-9709/13/1/4</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-9709/13/1/3">

	<title>Informatics, Vol. 13, Pages 3: A Clustering Approach to Identify Risk Perception on Social Networks: A Study of Peruvian Children and Adolescents</title>
	<link>https://www.mdpi.com/2227-9709/13/1/3</link>
	<description>The excessive and inappropriate use of the internet by children and young people increases their exposure to risky situations, especially since the COVID-19 pandemic. This study analyzes risky situations on social media among children and adolescents. The objective of this work was to identify the risks associated with the use of social media. A comparative analysis of five clustering algorithms was applied to a dataset developed by eBiz Latin America in collaboration with La Salle University of Arequipa and the Institute of Christian Schools of the De La Salle Brothers of the Bolivia-Peru district. Among the results, it was shown that children around 11 years old display a high prevalence of digital risk behaviors such as adding strangers, followed by pretending to be someone else; adults around 43 years old exhibit a tendency to follow strangers and, even more so, to take photographs without permission; adolescents with an average age of 11 show a heavy use of YouTube, TikTok, and Instagram. It is concluded that among digital risks in children and adults, the clusters highlight shared vulnerabilities, such as the addition of strangers and exposure to requests for personal data, which persist throughout the life stages but intensify in early adulthood. These findings emphasize the urgency of preventive policies addressing generational differences in social network use to promote proactive responses to digital harassment.</description>
	<pubDate>2026-01-04</pubDate>

	<content:encoded><![CDATA[
	<p><b>Informatics, Vol. 13, Pages 3: A Clustering Approach to Identify Risk Perception on Social Networks: A Study of Peruvian Children and Adolescents</b></p>
	<p>Informatics <a href="https://www.mdpi.com/2227-9709/13/1/3">doi: 10.3390/informatics13010003</a></p>
	<p>Authors:
		Yasiel Pérez Vera
		Richart Smith Escobedo Quispe
		Patrick Andrés Ramírez Santos
		</p>
	<p>The excessive and inappropriate use of the internet by children and young people increases their exposure to risky situations, especially since the COVID-19 pandemic. This study analyzes risky situations on social media among children and adolescents. The objective of this work was to identify the risks associated with the use of social media. A comparative analysis of five clustering algorithms was applied to a dataset developed by eBiz Latin America in collaboration with La Salle University of Arequipa and the Institute of Christian Schools of the De La Salle Brothers of the Bolivia-Peru district. Among the results, it was shown that children around 11 years old display a high prevalence of digital risk behaviors such as adding strangers, followed by pretending to be someone else; adults around 43 years old exhibit a tendency to follow strangers and, even more so, to take photographs without permission; adolescents with an average age of 11 show a heavy use of YouTube, TikTok, and Instagram. It is concluded that among digital risks in children and adults, the clusters highlight shared vulnerabilities, such as the addition of strangers and exposure to requests for personal data, which persist throughout the life stages but intensify in early adulthood. These findings emphasize the urgency of preventive policies addressing generational differences in social network use to promote proactive responses to digital harassment.</p>
	]]></content:encoded>

	<dc:title>A Clustering Approach to Identify Risk Perception on Social Networks: A Study of Peruvian Children and Adolescents</dc:title>
			<dc:creator>Yasiel Pérez Vera</dc:creator>
			<dc:creator>Richart Smith Escobedo Quispe</dc:creator>
			<dc:creator>Patrick Andrés Ramírez Santos</dc:creator>
		<dc:identifier>doi: 10.3390/informatics13010003</dc:identifier>
	<dc:source>Informatics</dc:source>
	<dc:date>2026-01-04</dc:date>

	<prism:publicationName>Informatics</prism:publicationName>
	<prism:publicationDate>2026-01-04</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>3</prism:startingPage>
		<prism:doi>10.3390/informatics13010003</prism:doi>
	<prism:url>https://www.mdpi.com/2227-9709/13/1/3</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-9709/13/1/2">

	<title>Informatics, Vol. 13, Pages 2: AIMarkerFinder: AI-Assisted Marker Discovery Based on an Integrated Approach of Autoencoders and Kolmogorov&amp;ndash;Arnold Networks</title>
	<link>https://www.mdpi.com/2227-9709/13/1/2</link>
	<description>In modern bioinformatics, the analysis of high-dimensional data (genomic, metabolomic, etc.) remains a critical challenge due to the &amp;amp;ldquo;curse of dimensionality,&amp;amp;rdquo; where feature redundancy reduces classification efficiency and model interpretability. This study introduces a novel method, AIMarkerFinder (v0.1.0), for analyzing metabolomic data to identify key biomarkers. The method is based on a denoising autoencoder with an attention mechanism (DAE), enabling the extraction of informative features and the elimination of redundancy. Experiments on glioblastoma and adjacent tissue metabolomic data demonstrated that AIMarkerFinder reduces dimensionality from 446 to 4 key features while improving classification accuracy. Using the selected metabolites (Malonyl-CoA, Glycerophosphocholine, SM(d18:1/22:0 OH), GC(18:1/24:1)), the Random Forest and Kolmogorov&amp;amp;ndash;Arnold Networks (KAN) models achieved accuracies of 0.904 and 0.937, respectively. The analytical formulas derived by the KAN provide model interpretability, which is critical for biomedical research. The proposed approach is applicable to genomics, transcriptomics, proteomics, and the study of exogenous factors on biological processes. The study&amp;amp;rsquo;s results open new prospects for personalized medicine and early disease diagnosis.</description>
	<pubDate>2025-12-24</pubDate>

	<content:encoded><![CDATA[
	<p><b>Informatics, Vol. 13, Pages 2: AIMarkerFinder: AI-Assisted Marker Discovery Based on an Integrated Approach of Autoencoders and Kolmogorov&amp;ndash;Arnold Networks</b></p>
	<p>Informatics <a href="https://www.mdpi.com/2227-9709/13/1/2">doi: 10.3390/informatics13010002</a></p>
	<p>Authors:
		Pavel S. Demenkov
		Timofey V. Ivanisenko
		Vladimir A. Ivanisenko
		</p>
	<p>In modern bioinformatics, the analysis of high-dimensional data (genomic, metabolomic, etc.) remains a critical challenge due to the &amp;amp;ldquo;curse of dimensionality,&amp;amp;rdquo; where feature redundancy reduces classification efficiency and model interpretability. This study introduces a novel method, AIMarkerFinder (v0.1.0), for analyzing metabolomic data to identify key biomarkers. The method is based on a denoising autoencoder with an attention mechanism (DAE), enabling the extraction of informative features and the elimination of redundancy. Experiments on glioblastoma and adjacent tissue metabolomic data demonstrated that AIMarkerFinder reduces dimensionality from 446 to 4 key features while improving classification accuracy. Using the selected metabolites (Malonyl-CoA, Glycerophosphocholine, SM(d18:1/22:0 OH), GC(18:1/24:1)), the Random Forest and Kolmogorov&amp;amp;ndash;Arnold Networks (KAN) models achieved accuracies of 0.904 and 0.937, respectively. The analytical formulas derived by the KAN provide model interpretability, which is critical for biomedical research. The proposed approach is applicable to genomics, transcriptomics, proteomics, and the study of exogenous factors on biological processes. The study&amp;amp;rsquo;s results open new prospects for personalized medicine and early disease diagnosis.</p>
	]]></content:encoded>

	<dc:title>AIMarkerFinder: AI-Assisted Marker Discovery Based on an Integrated Approach of Autoencoders and Kolmogorov&amp;amp;ndash;Arnold Networks</dc:title>
			<dc:creator>Pavel S. Demenkov</dc:creator>
			<dc:creator>Timofey V. Ivanisenko</dc:creator>
			<dc:creator>Vladimir A. Ivanisenko</dc:creator>
		<dc:identifier>doi: 10.3390/informatics13010002</dc:identifier>
	<dc:source>Informatics</dc:source>
	<dc:date>2025-12-24</dc:date>

	<prism:publicationName>Informatics</prism:publicationName>
	<prism:publicationDate>2025-12-24</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>2</prism:startingPage>
		<prism:doi>10.3390/informatics13010002</prism:doi>
	<prism:url>https://www.mdpi.com/2227-9709/13/1/2</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-9709/13/1/1">

	<title>Informatics, Vol. 13, Pages 1: SAFE-GUARD: Semantic Access Control Framework Employing Generative User Assessment and Rule Decisions</title>
	<link>https://www.mdpi.com/2227-9709/13/1/1</link>
	<description>Healthcare faces a critical challenge: protecting sensitive medical data while enabling necessary clinical access. Evolving user behaviors, dynamic clinical contexts, and strict regulatory requirements demand adaptive access control mechanisms. Despite strict regulations, healthcare remains the most breached industry, consistently facing severe security risks related to unauthorized access. Traditional access control models cannot handle contextual variations, detect credential compromise, or provide transparent decision rationales. To address this, SAFE-GUARD (Semantic Access Control Framework Employing Generative User Assessment and Rule Decisions) is proposed as a two-layer framework that combines behavioral analysis with policy enforcement. The Behavioral Analysis Layer uses Retrieval-Augmented Generation (RAG) to detect contextual anomalies by comparing current requests against historical patterns. The Rule-Based Policy Evaluation Layer independently validates organizational procedures and regulatory requirements. Access is granted only when behavioral consistency and both organizational and regulatory policies are satisfied. We evaluate SAFE-GUARD using simulated healthcare scenarios with three LLMs (GPT-4o, Claude 3.5 Sonnet, and Gemini 2.5 Flash) achieving an anomaly detection accuracy of 95.2%, 94.1%, and 91.3%, respectively. The framework effectively identifies both compromised credentials and insider misuse by detecting deviations from established behavioral patterns, significantly outperforming conventional RBAC and ABAC approaches that rely solely on static rules.</description>
	<pubDate>2025-12-19</pubDate>

	<content:encoded><![CDATA[
	<p><b>Informatics, Vol. 13, Pages 1: SAFE-GUARD: Semantic Access Control Framework Employing Generative User Assessment and Rule Decisions</b></p>
	<p>Informatics <a href="https://www.mdpi.com/2227-9709/13/1/1">doi: 10.3390/informatics13010001</a></p>
	<p>Authors:
		Nastaran Farhadighalati
		Luis A. Estrada-Jimenez
		Sepideh Kalateh
		Sanaz Nikghadam-Hojjati
		Jose Barata
		</p>
	<p>Healthcare faces a critical challenge: protecting sensitive medical data while enabling necessary clinical access. Evolving user behaviors, dynamic clinical contexts, and strict regulatory requirements demand adaptive access control mechanisms. Despite strict regulations, healthcare remains the most breached industry, consistently facing severe security risks related to unauthorized access. Traditional access control models cannot handle contextual variations, detect credential compromise, or provide transparent decision rationales. To address this, SAFE-GUARD (Semantic Access Control Framework Employing Generative User Assessment and Rule Decisions) is proposed as a two-layer framework that combines behavioral analysis with policy enforcement. The Behavioral Analysis Layer uses Retrieval-Augmented Generation (RAG) to detect contextual anomalies by comparing current requests against historical patterns. The Rule-Based Policy Evaluation Layer independently validates organizational procedures and regulatory requirements. Access is granted only when behavioral consistency and both organizational and regulatory policies are satisfied. We evaluate SAFE-GUARD using simulated healthcare scenarios with three LLMs (GPT-4o, Claude 3.5 Sonnet, and Gemini 2.5 Flash) achieving an anomaly detection accuracy of 95.2%, 94.1%, and 91.3%, respectively. The framework effectively identifies both compromised credentials and insider misuse by detecting deviations from established behavioral patterns, significantly outperforming conventional RBAC and ABAC approaches that rely solely on static rules.</p>
	]]></content:encoded>

	<dc:title>SAFE-GUARD: Semantic Access Control Framework Employing Generative User Assessment and Rule Decisions</dc:title>
			<dc:creator>Nastaran Farhadighalati</dc:creator>
			<dc:creator>Luis A. Estrada-Jimenez</dc:creator>
			<dc:creator>Sepideh Kalateh</dc:creator>
			<dc:creator>Sanaz Nikghadam-Hojjati</dc:creator>
			<dc:creator>Jose Barata</dc:creator>
		<dc:identifier>doi: 10.3390/informatics13010001</dc:identifier>
	<dc:source>Informatics</dc:source>
	<dc:date>2025-12-19</dc:date>

	<prism:publicationName>Informatics</prism:publicationName>
	<prism:publicationDate>2025-12-19</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>1</prism:startingPage>
		<prism:doi>10.3390/informatics13010001</prism:doi>
	<prism:url>https://www.mdpi.com/2227-9709/13/1/1</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-9709/12/4/141">

	<title>Informatics, Vol. 12, Pages 141: Combining Fuzzy Cognitive Maps and Metaheuristic Algorithms to Predict Preeclampsia and Intrauterine Growth Restriction</title>
	<link>https://www.mdpi.com/2227-9709/12/4/141</link>
	<description>Preeclampsia (PE) and intrauterine growth restriction (IUGR) are obstetric complications associated with placental dysfunction, which represent a public health problem due to high maternal and fetal morbidity and mortality. Early detection is crucial for timely interventions. Therefore, this study proposes the development of models based on fuzzy cognitive maps (FCM) optimized with metaheuristic algorithms (particle swarm optimization (PSO) and genetic algorithms (GA)) for the prediction of PE and IUGR. The results showed that FCM-PSO applied to the PE dataset achieved excellent performance (accuracy, precision, recall, and F1-Score = 1.0). The FCM-GA model excelled in predicting IUGR with an accuracy and F1-Score of 0.97. Our proposed models outperformed those reported in the literature to predict PE and IUGR. Analysis of the relationships between nodes allowed for the identification of influential variables such as sFlt-1, sFlt-1/PlGF, and uterine Doppler parameters, in accordance with the pathophysiology of placental disorders. FCM optimized with PSO and GA offer a viable clinical alternative as a medical decision support system due to their ability to explore nonlinear relationships and interpretability of variables. In addition, they are suitable for scenarios where low computational resource consumption is required.</description>
	<pubDate>2025-12-15</pubDate>

	<content:encoded><![CDATA[
	<p><b>Informatics, Vol. 12, Pages 141: Combining Fuzzy Cognitive Maps and Metaheuristic Algorithms to Predict Preeclampsia and Intrauterine Growth Restriction</b></p>
	<p>Informatics <a href="https://www.mdpi.com/2227-9709/12/4/141">doi: 10.3390/informatics12040141</a></p>
	<p>Authors:
		María Paula García
		Jesús David Díaz-Meza
		Kenia Hoyos
		Bethia Pacheco
		Rodrigo García
		William Hoyos
		</p>
	<p>Preeclampsia (PE) and intrauterine growth restriction (IUGR) are obstetric complications associated with placental dysfunction, which represent a public health problem due to high maternal and fetal morbidity and mortality. Early detection is crucial for timely interventions. Therefore, this study proposes the development of models based on fuzzy cognitive maps (FCM) optimized with metaheuristic algorithms (particle swarm optimization (PSO) and genetic algorithms (GA)) for the prediction of PE and IUGR. The results showed that FCM-PSO applied to the PE dataset achieved excellent performance (accuracy, precision, recall, and F1-Score = 1.0). The FCM-GA model excelled in predicting IUGR with an accuracy and F1-Score of 0.97. Our proposed models outperformed those reported in the literature to predict PE and IUGR. Analysis of the relationships between nodes allowed for the identification of influential variables such as sFlt-1, sFlt-1/PlGF, and uterine Doppler parameters, in accordance with the pathophysiology of placental disorders. FCM optimized with PSO and GA offer a viable clinical alternative as a medical decision support system due to their ability to explore nonlinear relationships and interpretability of variables. In addition, they are suitable for scenarios where low computational resource consumption is required.</p>
	]]></content:encoded>

	<dc:title>Combining Fuzzy Cognitive Maps and Metaheuristic Algorithms to Predict Preeclampsia and Intrauterine Growth Restriction</dc:title>
			<dc:creator>María Paula García</dc:creator>
			<dc:creator>Jesús David Díaz-Meza</dc:creator>
			<dc:creator>Kenia Hoyos</dc:creator>
			<dc:creator>Bethia Pacheco</dc:creator>
			<dc:creator>Rodrigo García</dc:creator>
			<dc:creator>William Hoyos</dc:creator>
		<dc:identifier>doi: 10.3390/informatics12040141</dc:identifier>
	<dc:source>Informatics</dc:source>
	<dc:date>2025-12-15</dc:date>

	<prism:publicationName>Informatics</prism:publicationName>
	<prism:publicationDate>2025-12-15</prism:publicationDate>
	<prism:volume>12</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>141</prism:startingPage>
		<prism:doi>10.3390/informatics12040141</prism:doi>
	<prism:url>https://www.mdpi.com/2227-9709/12/4/141</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-9709/12/4/140">

	<title>Informatics, Vol. 12, Pages 140: Vertebra Segmentation and Cobb Angle Calculation Platform for Scoliosis Diagnosis Using Deep Learning: SpineCheck</title>
	<link>https://www.mdpi.com/2227-9709/12/4/140</link>
	<description>This study presents SpineCheck, a fully integrated deep-learning-based clinical decision support platform for automatic vertebra segmentation and Cobb angle (CA) measurement from scoliosis X-ray images. The system unifies end-to-end preprocessing, U-Net-based segmentation, geometry-driven angle computation, and a web-based clinical interface within a single deployable architecture. For secure clinical use, SpineCheck adopts a stateless &amp;amp;ldquo;process-and-delete&amp;amp;rdquo; design, ensuring that no radiographic data or Protected Health Information (PHI) are permanently stored. Five U-Net family models (U-Net, optimized U-Net-2, Attention U-Net, nnU-Net, and UNet3++) are systematically evaluated under identical conditions using Dice similarity, inference speed, GPU memory usage, and deployment stability, enabling deployment-oriented model selection. A robust CA estimation pipeline is developed by combining minimum-area rectangle analysis with Theil&amp;amp;ndash;Sen regression and spline-based anatomical modeling to suppress outliers and improve numerical stability. The system is validated on a large-scale dataset of 20,000 scoliosis X-ray images, demonstrating strong agreement with expert measurements based on Mean Absolute Error, Pearson correlation, and Intraclass Correlation Coefficient metrics. These findings confirm the reliability and clinical robustness of SpineCheck. By integrating large-scale validation, robust geometric modeling, secure stateless processing, and real-time deployment capabilities, SpineCheck provides a scalable and clinically reliable framework for automated scoliosis assessment.</description>
	<pubDate>2025-12-11</pubDate>

	<content:encoded><![CDATA[
	<p><b>Informatics, Vol. 12, Pages 140: Vertebra Segmentation and Cobb Angle Calculation Platform for Scoliosis Diagnosis Using Deep Learning: SpineCheck</b></p>
	<p>Informatics <a href="https://www.mdpi.com/2227-9709/12/4/140">doi: 10.3390/informatics12040140</a></p>
	<p>Authors:
		İrfan Harun İlkhan
		Halûk Gümüşkaya
		Firdevs Turgut
		</p>
	<p>This study presents SpineCheck, a fully integrated deep-learning-based clinical decision support platform for automatic vertebra segmentation and Cobb angle (CA) measurement from scoliosis X-ray images. The system unifies end-to-end preprocessing, U-Net-based segmentation, geometry-driven angle computation, and a web-based clinical interface within a single deployable architecture. For secure clinical use, SpineCheck adopts a stateless &amp;amp;ldquo;process-and-delete&amp;amp;rdquo; design, ensuring that no radiographic data or Protected Health Information (PHI) are permanently stored. Five U-Net family models (U-Net, optimized U-Net-2, Attention U-Net, nnU-Net, and UNet3++) are systematically evaluated under identical conditions using Dice similarity, inference speed, GPU memory usage, and deployment stability, enabling deployment-oriented model selection. A robust CA estimation pipeline is developed by combining minimum-area rectangle analysis with Theil&amp;amp;ndash;Sen regression and spline-based anatomical modeling to suppress outliers and improve numerical stability. The system is validated on a large-scale dataset of 20,000 scoliosis X-ray images, demonstrating strong agreement with expert measurements based on Mean Absolute Error, Pearson correlation, and Intraclass Correlation Coefficient metrics. These findings confirm the reliability and clinical robustness of SpineCheck. By integrating large-scale validation, robust geometric modeling, secure stateless processing, and real-time deployment capabilities, SpineCheck provides a scalable and clinically reliable framework for automated scoliosis assessment.</p>
	]]></content:encoded>

	<dc:title>Vertebra Segmentation and Cobb Angle Calculation Platform for Scoliosis Diagnosis Using Deep Learning: SpineCheck</dc:title>
			<dc:creator>İrfan Harun İlkhan</dc:creator>
			<dc:creator>Halûk Gümüşkaya</dc:creator>
			<dc:creator>Firdevs Turgut</dc:creator>
		<dc:identifier>doi: 10.3390/informatics12040140</dc:identifier>
	<dc:source>Informatics</dc:source>
	<dc:date>2025-12-11</dc:date>

	<prism:publicationName>Informatics</prism:publicationName>
	<prism:publicationDate>2025-12-11</prism:publicationDate>
	<prism:volume>12</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>140</prism:startingPage>
		<prism:doi>10.3390/informatics12040140</prism:doi>
	<prism:url>https://www.mdpi.com/2227-9709/12/4/140</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-9709/12/4/139">

	<title>Informatics, Vol. 12, Pages 139: Multimodal Large Language Models vs. Human Authors: A Comparative Study of Chinese Fairy Tales for Young Children</title>
	<link>https://www.mdpi.com/2227-9709/12/4/139</link>
	<description>In the realm of children&amp;amp;rsquo;s education, multimodal large language models (MLLMs) are already being utilized to create educational materials for young learners. But how significant are the differences between image-based fairy tales generated by MLLMs and those crafted by human authors? This paper addresses this question through the design of multi-dimensional human evaluation and actual questionnaire surveys. Specifically, we conducted studies on evaluating MLLM-generated stories and distinguishing them from human-written stories involving 50 undergraduate students in education-related majors, 30 first-grade students, 81 second-grade students, and 103 parents. The findings reveal that most undergraduate students with an educational background, elementary school students, and parents perceive stories generated by MLLMs as being highly similar to those written by humans. Through the evaluation of primary school students and vocabulary analysis, it is further shown that, unlike human-authored stories, which tend to exceed the vocabulary level of young students, MLLM-generated stories are able to control vocabulary complexity and are also very interesting for young readers. Based on the results of the above experiments, we further discuss the following question: Can MLLMs assist or even replace humans in writing Chinese children&amp;amp;rsquo;s fairy tales based on pictures for young children? We approached this question from both a technical perspective and a user perspective.</description>
	<pubDate>2025-12-09</pubDate>

	<content:encoded><![CDATA[
	<p><b>Informatics, Vol. 12, Pages 139: Multimodal Large Language Models vs. Human Authors: A Comparative Study of Chinese Fairy Tales for Young Children</b></p>
	<p>Informatics <a href="https://www.mdpi.com/2227-9709/12/4/139">doi: 10.3390/informatics12040139</a></p>
	<p>Authors:
		Jing Du
		Wenhao Liu
		Dibin Zhou
		Seongku Hong
		Fuchang Liu
		</p>
	<p>In the realm of children&amp;amp;rsquo;s education, multimodal large language models (MLLMs) are already being utilized to create educational materials for young learners. But how significant are the differences between image-based fairy tales generated by MLLMs and those crafted by human authors? This paper addresses this question through the design of multi-dimensional human evaluation and actual questionnaire surveys. Specifically, we conducted studies on evaluating MLLM-generated stories and distinguishing them from human-written stories involving 50 undergraduate students in education-related majors, 30 first-grade students, 81 second-grade students, and 103 parents. The findings reveal that most undergraduate students with an educational background, elementary school students, and parents perceive stories generated by MLLMs as being highly similar to those written by humans. Through the evaluation of primary school students and vocabulary analysis, it is further shown that, unlike human-authored stories, which tend to exceed the vocabulary level of young students, MLLM-generated stories are able to control vocabulary complexity and are also very interesting for young readers. Based on the results of the above experiments, we further discuss the following question: Can MLLMs assist or even replace humans in writing Chinese children&amp;amp;rsquo;s fairy tales based on pictures for young children? We approached this question from both a technical perspective and a user perspective.</p>
	]]></content:encoded>

	<dc:title>Multimodal Large Language Models vs. Human Authors: A Comparative Study of Chinese Fairy Tales for Young Children</dc:title>
			<dc:creator>Jing Du</dc:creator>
			<dc:creator>Wenhao Liu</dc:creator>
			<dc:creator>Dibin Zhou</dc:creator>
			<dc:creator>Seongku Hong</dc:creator>
			<dc:creator>Fuchang Liu</dc:creator>
		<dc:identifier>doi: 10.3390/informatics12040139</dc:identifier>
	<dc:source>Informatics</dc:source>
	<dc:date>2025-12-09</dc:date>

	<prism:publicationName>Informatics</prism:publicationName>
	<prism:publicationDate>2025-12-09</prism:publicationDate>
	<prism:volume>12</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>139</prism:startingPage>
		<prism:doi>10.3390/informatics12040139</prism:doi>
	<prism:url>https://www.mdpi.com/2227-9709/12/4/139</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-9709/12/4/138">

	<title>Informatics, Vol. 12, Pages 138: AI-Enabled Intelligent System for Automatic Detection and Classification of Plant Diseases Towards Precision Agriculture</title>
	<link>https://www.mdpi.com/2227-9709/12/4/138</link>
	<description>Technology-driven agriculture, or precision agriculture (PA), is indispensable in the contemporary world due to its advantages and the availability of technological innovations. Particularly, early disease detection in agricultural crops helps the farming community ensure crop health, reduce expenditure, and increase crop yield. Governments have mainly used current systems for agricultural statistics and strategic decision-making, but there is still a critical need for farmers to have access to cost-effective, user-friendly solutions that can be used by them regardless of their educational level. In this study, we used four apple leaf diseases (leaf spot, mosaic, rust and brown spot) from the PlantVillage dataset to develop an Automated Agricultural Crop Disease Identification System (AACDIS), a deep learning framework for identifying and categorizing crop diseases. This framework makes use of deep convolutional neural networks (CNNs) and includes three CNN models created specifically for this application. AACDIS achieves significant performance improvements by combining cascade inception and drawing inspiration from the well-known AlexNet design, making it a potent tool for managing agricultural diseases. AACDIS also has Region of Interest (ROI) awareness, a crucial component that improves the efficiency and precision of illness identification. This feature guarantees that the system can quickly and accurately identify illness-related areas inside images, enabling faster and more accurate disease diagnosis. Experimental findings show a test accuracy of 99.491%, which is better than many state-of-the-art deep learning models. This empirical study reveals the potential benefits of the proposed system for early identification of diseases. This research triggers further investigation to realize full-fledged precision agriculture and smart agriculture.</description>
	<pubDate>2025-12-08</pubDate>

	<content:encoded><![CDATA[
	<p><b>Informatics, Vol. 12, Pages 138: AI-Enabled Intelligent System for Automatic Detection and Classification of Plant Diseases Towards Precision Agriculture</b></p>
	<p>Informatics <a href="https://www.mdpi.com/2227-9709/12/4/138">doi: 10.3390/informatics12040138</a></p>
	<p>Authors:
		Gujju Siva Krishna
		Zameer Gulzar
		Arpita Baronia
		Jagirdar Srinivas
		Padmavathy Paramanandam
		Kasharaju Balakrishna
		</p>
	<p>Technology-driven agriculture, or precision agriculture (PA), is indispensable in the contemporary world due to its advantages and the availability of technological innovations. Particularly, early disease detection in agricultural crops helps the farming community ensure crop health, reduce expenditure, and increase crop yield. Governments have mainly used current systems for agricultural statistics and strategic decision-making, but there is still a critical need for farmers to have access to cost-effective, user-friendly solutions that can be used by them regardless of their educational level. In this study, we used four apple leaf diseases (leaf spot, mosaic, rust and brown spot) from the PlantVillage dataset to develop an Automated Agricultural Crop Disease Identification System (AACDIS), a deep learning framework for identifying and categorizing crop diseases. This framework makes use of deep convolutional neural networks (CNNs) and includes three CNN models created specifically for this application. AACDIS achieves significant performance improvements by combining cascade inception and drawing inspiration from the well-known AlexNet design, making it a potent tool for managing agricultural diseases. AACDIS also has Region of Interest (ROI) awareness, a crucial component that improves the efficiency and precision of illness identification. This feature guarantees that the system can quickly and accurately identify illness-related areas inside images, enabling faster and more accurate disease diagnosis. Experimental findings show a test accuracy of 99.491%, which is better than many state-of-the-art deep learning models. This empirical study reveals the potential benefits of the proposed system for early identification of diseases. This research triggers further investigation to realize full-fledged precision agriculture and smart agriculture.</p>
	]]></content:encoded>

	<dc:title>AI-Enabled Intelligent System for Automatic Detection and Classification of Plant Diseases Towards Precision Agriculture</dc:title>
			<dc:creator>Gujju Siva Krishna</dc:creator>
			<dc:creator>Zameer Gulzar</dc:creator>
			<dc:creator>Arpita Baronia</dc:creator>
			<dc:creator>Jagirdar Srinivas</dc:creator>
			<dc:creator>Padmavathy Paramanandam</dc:creator>
			<dc:creator>Kasharaju Balakrishna</dc:creator>
		<dc:identifier>doi: 10.3390/informatics12040138</dc:identifier>
	<dc:source>Informatics</dc:source>
	<dc:date>2025-12-08</dc:date>

	<prism:publicationName>Informatics</prism:publicationName>
	<prism:publicationDate>2025-12-08</prism:publicationDate>
	<prism:volume>12</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>138</prism:startingPage>
		<prism:doi>10.3390/informatics12040138</prism:doi>
	<prism:url>https://www.mdpi.com/2227-9709/12/4/138</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-9709/12/4/137">

	<title>Informatics, Vol. 12, Pages 137: Mapping the AI Surge in Higher Education: A Bibliometric Study Spanning a Decade (2015&amp;ndash;2025)</title>
	<link>https://www.mdpi.com/2227-9709/12/4/137</link>
	<description>There has recently been a pronounced global escalation in scholarly output concerning Artificial Intelligence (AI) within the context of higher education (HE). However, the precise locus of this growth remains ambiguous, thereby hindering the systematic integration of critical AI trends into HE practices. To address this opacity, the present study adopts a rigorous and impartial analytical approach by synthesizing datasets from the Web of Science (WoS) and Scopus through the Biblioshiny platform. In addition, independent examinations of WoS and Scopus data were conducted using co-occurrence network analyses in VOSviewer, which revealed comparable patterns of cluster strength across both datasets. Complementing these methods, Latent Dirichlet Allocation (LDA) was employed to extract and interpret thematic structures within locally cited references, thereby providing deeper insights into the extant research discourse. Findings revealed significant acceleration patterns from 2023 concerning publication trends, annual growth patterns, cited references, top authors, leading journals, and leading countries. Patterns of strengths from co-occurrence networks in VOSviewer revealed growing interest in generative AI tools, AI ethics, and concerns about AI integration into the curriculum in HE. The LDA analysis identified two dominant themes: the pedagogical integration of generative AI tools and broader academic discourse on AI ethics that correlated with the VOSviewer findings. This enhanced the credibility, reliability, and validity of the bibliometric techniques applied in the study. Recommendations and future directions offer valuable insights for policymakers and stakeholders to address pedagogical integration of generative AI tools in HE. The development of frameworks and ethical guidelines are important to address fair and transparent adoption of AI in HE. Further, global inequalities in adoption, aligning with UNESCO&amp;amp;rsquo;s Sustainable Development Goals, are crucial to ensure equitable and responsible AI integration in HE.</description>
	<pubDate>2025-12-08</pubDate>

	<content:encoded><![CDATA[
	<p><b>Informatics, Vol. 12, Pages 137: Mapping the AI Surge in Higher Education: A Bibliometric Study Spanning a Decade (2015&amp;ndash;2025)</b></p>
	<p>Informatics <a href="https://www.mdpi.com/2227-9709/12/4/137">doi: 10.3390/informatics12040137</a></p>
	<p>Authors:
		Mousin Omarsaib
		Sara Bibi Mitha
		Anisa Vahed
		Ghulam Masudh Mohamed
		</p>
	<p>There has recently been a pronounced global escalation in scholarly output concerning Artificial Intelligence (AI) within the context of higher education (HE). However, the precise locus of this growth remains ambiguous, thereby hindering the systematic integration of critical AI trends into HE practices. To address this opacity, the present study adopts a rigorous and impartial analytical approach by synthesizing datasets from the Web of Science (WoS) and Scopus through the Biblioshiny platform. In addition, independent examinations of WoS and Scopus data were conducted using co-occurrence network analyses in VOSviewer, which revealed comparable patterns of cluster strength across both datasets. Complementing these methods, Latent Dirichlet Allocation (LDA) was employed to extract and interpret thematic structures within locally cited references, thereby providing deeper insights into the extant research discourse. Findings revealed significant acceleration patterns from 2023 concerning publication trends, annual growth patterns, cited references, top authors, leading journals, and leading countries. Patterns of strengths from co-occurrence networks in VOSviewer revealed growing interest in generative AI tools, AI ethics, and concerns about AI integration into the curriculum in HE. The LDA analysis identified two dominant themes: the pedagogical integration of generative AI tools and broader academic discourse on AI ethics that correlated with the VOSviewer findings. This enhanced the credibility, reliability, and validity of the bibliometric techniques applied in the study. Recommendations and future directions offer valuable insights for policymakers and stakeholders to address pedagogical integration of generative AI tools in HE. The development of frameworks and ethical guidelines are important to address fair and transparent adoption of AI in HE. Further, global inequalities in adoption, aligning with UNESCO&amp;amp;rsquo;s Sustainable Development Goals, are crucial to ensure equitable and responsible AI integration in HE.</p>
	]]></content:encoded>

	<dc:title>Mapping the AI Surge in Higher Education: A Bibliometric Study Spanning a Decade (2015&amp;amp;ndash;2025)</dc:title>
			<dc:creator>Mousin Omarsaib</dc:creator>
			<dc:creator>Sara Bibi Mitha</dc:creator>
			<dc:creator>Anisa Vahed</dc:creator>
			<dc:creator>Ghulam Masudh Mohamed</dc:creator>
		<dc:identifier>doi: 10.3390/informatics12040137</dc:identifier>
	<dc:source>Informatics</dc:source>
	<dc:date>2025-12-08</dc:date>

	<prism:publicationName>Informatics</prism:publicationName>
	<prism:publicationDate>2025-12-08</prism:publicationDate>
	<prism:volume>12</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Review</prism:section>
	<prism:startingPage>137</prism:startingPage>
		<prism:doi>10.3390/informatics12040137</prism:doi>
	<prism:url>https://www.mdpi.com/2227-9709/12/4/137</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-9709/12/4/136">

	<title>Informatics, Vol. 12, Pages 136: CLFF-NER: A Cross-Lingual Feature Fusion Model for Named Entity Recognition in the Traditional Chinese Festival Culture Domain</title>
	<link>https://www.mdpi.com/2227-9709/12/4/136</link>
	<description>With the rapid development of information technology, there is an increasing demand for the digital preservation of traditional festival culture and the extraction of relevant knowledge. However, existing research on Named Entity Recognition (NER) for Chinese traditional festival culture lacks support from high-quality corpora and dedicated model methods. To address this gap, this study proposes a Named Entity Recognition model, CLFF-NER, which integrates multi-source heterogeneous information. The model operates as follows: first, Multilingual BERT is employed to obtain the contextual semantic representations of Chinese and English sentences. Subsequently, a Multiconvolutional Kernel Network (MKN) is used to extract the local structural features of entities. Then, a Transformer module is introduced to achieve cross-lingual, cross-attention fusion of Chinese and English semantics. Furthermore, a Graph Neural Network (GNN) is utilized to selectively supplement useful English information, thereby alleviating the interference caused by redundant information. Finally, a gating mechanism and Conditional Random Field (CRF) are combined to jointly optimize the recognition results. Experiments were conducted on the public Chinese Festival Culture Dataset (CTFCDataSet), and the model achieved 89.45%, 90.01%, and 89.73% in precision, recall, and F1 score, respectively&amp;amp;mdash;significantly outperforming a range of mainstream baseline models. Meanwhile, the model also demonstrated competitive performance on two other public datasets, Resume and Weibo, which verifies its strong cross-domain generalization ability.</description>
	<pubDate>2025-12-05</pubDate>

	<content:encoded><![CDATA[
	<p><b>Informatics, Vol. 12, Pages 136: CLFF-NER: A Cross-Lingual Feature Fusion Model for Named Entity Recognition in the Traditional Chinese Festival Culture Domain</b></p>
	<p>Informatics <a href="https://www.mdpi.com/2227-9709/12/4/136">doi: 10.3390/informatics12040136</a></p>
	<p>Authors:
		Shenghe Yang
		Kun He
		Wei Li
		Yingying He
		</p>
	<p>With the rapid development of information technology, there is an increasing demand for the digital preservation of traditional festival culture and the extraction of relevant knowledge. However, existing research on Named Entity Recognition (NER) for Chinese traditional festival culture lacks support from high-quality corpora and dedicated model methods. To address this gap, this study proposes a Named Entity Recognition model, CLFF-NER, which integrates multi-source heterogeneous information. The model operates as follows: first, Multilingual BERT is employed to obtain the contextual semantic representations of Chinese and English sentences. Subsequently, a Multiconvolutional Kernel Network (MKN) is used to extract the local structural features of entities. Then, a Transformer module is introduced to achieve cross-lingual, cross-attention fusion of Chinese and English semantics. Furthermore, a Graph Neural Network (GNN) is utilized to selectively supplement useful English information, thereby alleviating the interference caused by redundant information. Finally, a gating mechanism and Conditional Random Field (CRF) are combined to jointly optimize the recognition results. Experiments were conducted on the public Chinese Festival Culture Dataset (CTFCDataSet), and the model achieved 89.45%, 90.01%, and 89.73% in precision, recall, and F1 score, respectively&amp;amp;mdash;significantly outperforming a range of mainstream baseline models. Meanwhile, the model also demonstrated competitive performance on two other public datasets, Resume and Weibo, which verifies its strong cross-domain generalization ability.</p>
	]]></content:encoded>

	<dc:title>CLFF-NER: A Cross-Lingual Feature Fusion Model for Named Entity Recognition in the Traditional Chinese Festival Culture Domain</dc:title>
			<dc:creator>Shenghe Yang</dc:creator>
			<dc:creator>Kun He</dc:creator>
			<dc:creator>Wei Li</dc:creator>
			<dc:creator>Yingying He</dc:creator>
		<dc:identifier>doi: 10.3390/informatics12040136</dc:identifier>
	<dc:source>Informatics</dc:source>
	<dc:date>2025-12-05</dc:date>

	<prism:publicationName>Informatics</prism:publicationName>
	<prism:publicationDate>2025-12-05</prism:publicationDate>
	<prism:volume>12</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>136</prism:startingPage>
		<prism:doi>10.3390/informatics12040136</prism:doi>
	<prism:url>https://www.mdpi.com/2227-9709/12/4/136</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-9709/12/4/135">

	<title>Informatics, Vol. 12, Pages 135: Enhancing Intuitive Decision-Making and Reliance Through Human&amp;ndash;AI Collaboration: A Review</title>
	<link>https://www.mdpi.com/2227-9709/12/4/135</link>
	<description>As AI decision support systems play a growing role in high-stakes decision making, ensuring effective integration of human intuition with AI recommendations is essential. Despite advances in AI explainability, challenges persist in fostering appropriate reliance. This review explores AI decision support systems that enhance human intuition through the analysis of 84 studies addressing three questions: (1) What design strategies enable AI systems to support humans&amp;amp;rsquo; intuitive capabilities while maintaining decision-making autonomy? (2) How do AI presentation and interaction approaches influence trust calibration and reliance behaviors in human&amp;amp;ndash;AI collaboration? (3) What ethical and practical implications arise from integrating AI decision support systems into high-risk human decision making, particularly regarding trust calibration, skill degradation, and accountability across different domains? Our findings reveal four key design strategies: complementary role architectures that amplify rather than replace human judgment, adaptive user-centered designs tailoring AI support to individual decision-making styles, context-aware task allocation dynamically assigning responsibilities based on situational factors, and autonomous reliance calibration mechanisms empowering users&amp;amp;rsquo; control over AI dependence. We identified that visual presentations, interactive features, and uncertainty communication significantly influence trust calibration, with simple visual highlights proving more effective than complex presentation and interactive methods in preventing over-reliance. However, a concerning performance paradox emerges where human&amp;amp;ndash;AI combinations often underperform the best individual agent while surpassing human-only performance. The research demonstrates that successful AI integration in high-risk contexts requires domain-specific calibration, integrated sociotechnical design addressing trust calibration and skill preservation simultaneously, and proactive measures to maintain human agency and competencies essential for safety, accountability, and ethical responsibility.</description>
	<pubDate>2025-12-05</pubDate>

	<content:encoded><![CDATA[
	<p><b>Informatics, Vol. 12, Pages 135: Enhancing Intuitive Decision-Making and Reliance Through Human&amp;ndash;AI Collaboration: A Review</b></p>
	<p>Informatics <a href="https://www.mdpi.com/2227-9709/12/4/135">doi: 10.3390/informatics12040135</a></p>
	<p>Authors:
		Gerui Xu
		Shruthi Venkatesha Murthy
		Bochen Jia
		</p>
	<p>As AI decision support systems play a growing role in high-stakes decision making, ensuring effective integration of human intuition with AI recommendations is essential. Despite advances in AI explainability, challenges persist in fostering appropriate reliance. This review explores AI decision support systems that enhance human intuition through the analysis of 84 studies addressing three questions: (1) What design strategies enable AI systems to support humans&amp;amp;rsquo; intuitive capabilities while maintaining decision-making autonomy? (2) How do AI presentation and interaction approaches influence trust calibration and reliance behaviors in human&amp;amp;ndash;AI collaboration? (3) What ethical and practical implications arise from integrating AI decision support systems into high-risk human decision making, particularly regarding trust calibration, skill degradation, and accountability across different domains? Our findings reveal four key design strategies: complementary role architectures that amplify rather than replace human judgment, adaptive user-centered designs tailoring AI support to individual decision-making styles, context-aware task allocation dynamically assigning responsibilities based on situational factors, and autonomous reliance calibration mechanisms empowering users&amp;amp;rsquo; control over AI dependence. We identified that visual presentations, interactive features, and uncertainty communication significantly influence trust calibration, with simple visual highlights proving more effective than complex presentation and interactive methods in preventing over-reliance. However, a concerning performance paradox emerges where human&amp;amp;ndash;AI combinations often underperform the best individual agent while surpassing human-only performance. The research demonstrates that successful AI integration in high-risk contexts requires domain-specific calibration, integrated sociotechnical design addressing trust calibration and skill preservation simultaneously, and proactive measures to maintain human agency and competencies essential for safety, accountability, and ethical responsibility.</p>
	]]></content:encoded>

	<dc:title>Enhancing Intuitive Decision-Making and Reliance Through Human&amp;amp;ndash;AI Collaboration: A Review</dc:title>
			<dc:creator>Gerui Xu</dc:creator>
			<dc:creator>Shruthi Venkatesha Murthy</dc:creator>
			<dc:creator>Bochen Jia</dc:creator>
		<dc:identifier>doi: 10.3390/informatics12040135</dc:identifier>
	<dc:source>Informatics</dc:source>
	<dc:date>2025-12-05</dc:date>

	<prism:publicationName>Informatics</prism:publicationName>
	<prism:publicationDate>2025-12-05</prism:publicationDate>
	<prism:volume>12</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Review</prism:section>
	<prism:startingPage>135</prism:startingPage>
		<prism:doi>10.3390/informatics12040135</prism:doi>
	<prism:url>https://www.mdpi.com/2227-9709/12/4/135</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-9709/12/4/134">

	<title>Informatics, Vol. 12, Pages 134: MCD-Temporal: Constructing a New Time-Entropy Enhanced Dynamic Weighted Heterogeneous Ensemble for Cognitive Level Classification</title>
	<link>https://www.mdpi.com/2227-9709/12/4/134</link>
	<description>Accurate classification of cognitive levels in instructional dialogues is essential for personalized education and intelligent teaching systems. However, most existing methods predominantly rely on static textual features and a shallow semantic analysis. They often overlook dynamic temporal interactions and struggle with class imbalance. To address these limitations, this study proposes a novel framework for cognitive-level classification. This framework integrates time entropy-enhanced dynamics with a dynamically weighted, heterogeneous ensemble strategy. Specifically, we reconstruct the original Multi-turn Classroom Dialogue (MCD) dataset by introducing time entropy to quantify teacher&amp;amp;ndash;student speaking balance and semantic richness features based on Term Frequency-Inverse Document Frequency (TF-IDF), resulting in an enhanced MCD-temporal dataset. We then design a Dynamic Weighted Heterogeneous Ensemble (DWHE), which adjusts weights based on the class distribution. Our framework achieves a state-of-the-art macro-F1 score of 0.6236. This study validates the effectiveness of incorporating temporal dynamics and adaptive ensemble learning for robust cognitive level assessment, offering a more powerful tool for educational AI applications.</description>
	<pubDate>2025-12-02</pubDate>

	<content:encoded><![CDATA[
	<p><b>Informatics, Vol. 12, Pages 134: MCD-Temporal: Constructing a New Time-Entropy Enhanced Dynamic Weighted Heterogeneous Ensemble for Cognitive Level Classification</b></p>
	<p>Informatics <a href="https://www.mdpi.com/2227-9709/12/4/134">doi: 10.3390/informatics12040134</a></p>
	<p>Authors:
		Yuhan Wu
		Long Zhang
		Bin Li
		Wendong Zhang
		</p>
	<p>Accurate classification of cognitive levels in instructional dialogues is essential for personalized education and intelligent teaching systems. However, most existing methods predominantly rely on static textual features and a shallow semantic analysis. They often overlook dynamic temporal interactions and struggle with class imbalance. To address these limitations, this study proposes a novel framework for cognitive-level classification. This framework integrates time entropy-enhanced dynamics with a dynamically weighted, heterogeneous ensemble strategy. Specifically, we reconstruct the original Multi-turn Classroom Dialogue (MCD) dataset by introducing time entropy to quantify teacher&amp;amp;ndash;student speaking balance and semantic richness features based on Term Frequency-Inverse Document Frequency (TF-IDF), resulting in an enhanced MCD-temporal dataset. We then design a Dynamic Weighted Heterogeneous Ensemble (DWHE), which adjusts weights based on the class distribution. Our framework achieves a state-of-the-art macro-F1 score of 0.6236. This study validates the effectiveness of incorporating temporal dynamics and adaptive ensemble learning for robust cognitive level assessment, offering a more powerful tool for educational AI applications.</p>
	]]></content:encoded>

	<dc:title>MCD-Temporal: Constructing a New Time-Entropy Enhanced Dynamic Weighted Heterogeneous Ensemble for Cognitive Level Classification</dc:title>
			<dc:creator>Yuhan Wu</dc:creator>
			<dc:creator>Long Zhang</dc:creator>
			<dc:creator>Bin Li</dc:creator>
			<dc:creator>Wendong Zhang</dc:creator>
		<dc:identifier>doi: 10.3390/informatics12040134</dc:identifier>
	<dc:source>Informatics</dc:source>
	<dc:date>2025-12-02</dc:date>

	<prism:publicationName>Informatics</prism:publicationName>
	<prism:publicationDate>2025-12-02</prism:publicationDate>
	<prism:volume>12</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>134</prism:startingPage>
		<prism:doi>10.3390/informatics12040134</prism:doi>
	<prism:url>https://www.mdpi.com/2227-9709/12/4/134</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-9709/12/4/133">

	<title>Informatics, Vol. 12, Pages 133: Fuzzy Ontology Embeddings and Visual Query Building for Ontology Exploration</title>
	<link>https://www.mdpi.com/2227-9709/12/4/133</link>
	<description>Ontologies play a central role in structuring knowledge across domains, supporting tasks such as reasoning, data integration, and semantic search. However, their large size and complexity&amp;amp;mdash;particularly in fields such as biomedicine, computational biology, law, and engineering&amp;amp;mdash;make them difficult for non-experts to navigate. Formal query languages such as SPARQL offer expressive access but require users to understand the ontology&amp;amp;rsquo;s structure and syntax. In contrast, visual exploration tools and basic keyword-based search interfaces are easier to use but often lack flexibility and expressiveness. We introduce FuzzyVis, a proof-of-concept system that enables intuitive and expressive exploration of complex ontologies. FuzzyVis integrates two key components: a fuzzy logic-based querying model built on fuzzy ontology embeddings, and an interactive visual interface for building and interpreting queries. Users can construct new composite concepts by selecting and combining existing ontology concepts using logical operators such as conjunction, disjunction, and negation. These composite concepts are matched against the ontology using fuzzy membership-based embeddings, which capture degrees of membership and support approximate, concept-level similarity search. The visual interface supports browsing, query composition, and partial search without requiring formal syntax. By combining fuzzy semantics with embedding-based reasoning, FuzzyVis enables flexible interpretation, efficient computation, and exploratory learning. A usage scenario demonstrates how FuzzyVis supports subtle information needs and helps users uncover relevant concepts in large, complex ontologies.</description>
	<pubDate>2025-12-01</pubDate>

	<content:encoded><![CDATA[
	<p><b>Informatics, Vol. 12, Pages 133: Fuzzy Ontology Embeddings and Visual Query Building for Ontology Exploration</b></p>
	<p>Informatics <a href="https://www.mdpi.com/2227-9709/12/4/133">doi: 10.3390/informatics12040133</a></p>
	<p>Authors:
		Vladimir Zhurov
		John Kausch
		Kamran Sedig
		Mostafa Milani
		</p>
	<p>Ontologies play a central role in structuring knowledge across domains, supporting tasks such as reasoning, data integration, and semantic search. However, their large size and complexity&amp;amp;mdash;particularly in fields such as biomedicine, computational biology, law, and engineering&amp;amp;mdash;make them difficult for non-experts to navigate. Formal query languages such as SPARQL offer expressive access but require users to understand the ontology&amp;amp;rsquo;s structure and syntax. In contrast, visual exploration tools and basic keyword-based search interfaces are easier to use but often lack flexibility and expressiveness. We introduce FuzzyVis, a proof-of-concept system that enables intuitive and expressive exploration of complex ontologies. FuzzyVis integrates two key components: a fuzzy logic-based querying model built on fuzzy ontology embeddings, and an interactive visual interface for building and interpreting queries. Users can construct new composite concepts by selecting and combining existing ontology concepts using logical operators such as conjunction, disjunction, and negation. These composite concepts are matched against the ontology using fuzzy membership-based embeddings, which capture degrees of membership and support approximate, concept-level similarity search. The visual interface supports browsing, query composition, and partial search without requiring formal syntax. By combining fuzzy semantics with embedding-based reasoning, FuzzyVis enables flexible interpretation, efficient computation, and exploratory learning. A usage scenario demonstrates how FuzzyVis supports subtle information needs and helps users uncover relevant concepts in large, complex ontologies.</p>
	]]></content:encoded>

	<dc:title>Fuzzy Ontology Embeddings and Visual Query Building for Ontology Exploration</dc:title>
			<dc:creator>Vladimir Zhurov</dc:creator>
			<dc:creator>John Kausch</dc:creator>
			<dc:creator>Kamran Sedig</dc:creator>
			<dc:creator>Mostafa Milani</dc:creator>
		<dc:identifier>doi: 10.3390/informatics12040133</dc:identifier>
	<dc:source>Informatics</dc:source>
	<dc:date>2025-12-01</dc:date>

	<prism:publicationName>Informatics</prism:publicationName>
	<prism:publicationDate>2025-12-01</prism:publicationDate>
	<prism:volume>12</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>133</prism:startingPage>
		<prism:doi>10.3390/informatics12040133</prism:doi>
	<prism:url>https://www.mdpi.com/2227-9709/12/4/133</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-9709/12/4/132">

	<title>Informatics, Vol. 12, Pages 132: Reducing AI-Generated Misinformation in Australian Higher Education: A Qualitative Analysis of Institutional Responses to AI-Generated Misinformation and Implications for Cybercrime Prevention</title>
	<link>https://www.mdpi.com/2227-9709/12/4/132</link>
	<description>Generative Artificial Intelligence (GenAI) has transformed Australian higher education, amplifying online harms such as misinformation, fraud, and image-based abuse, with significant implications for cybercrime prevention. Combining a PRISMA-guided systematic review with MAXQDA-driven analysis of Australian university policies, this research evaluates institutional strategies against national frameworks, such as the Cybersecurity Strategy 2023&amp;amp;ndash;2030. Analyzing data from academic literature, we identify three key themes: educational strategies, alignment with national frameworks, and policy gaps and development. As the first qualitative analysis of 40 Australian university policies, this study uncovers systemic fragmentation in governance frameworks, with only 12 institutions addressing data privacy risks and none directly targeting AI-driven disinformation threats like deepfake harassment&amp;amp;mdash;a critical gap in global AI governance literature. This study provides actionable recommendations to develop the National GenAI Governance Framework, co-developed by TEQSA/UA and DoE, enhanced cyberbullying policies, and behavior-focused training to enhance digital safety and prevent cybercrime in Australian higher education. Mandatory annual CyberAI Literacy Module for all students and staff to ensure awareness of cybersecurity risks, responsible use of artificial intelligence, and digital safety practices within the university community.</description>
	<pubDate>2025-11-28</pubDate>

	<content:encoded><![CDATA[
	<p><b>Informatics, Vol. 12, Pages 132: Reducing AI-Generated Misinformation in Australian Higher Education: A Qualitative Analysis of Institutional Responses to AI-Generated Misinformation and Implications for Cybercrime Prevention</b></p>
	<p>Informatics <a href="https://www.mdpi.com/2227-9709/12/4/132">doi: 10.3390/informatics12040132</a></p>
	<p>Authors:
		Leo S. F. Lin
		Geberew Tulu Mekonnen
		Mladen Zecevic
		Immaculate Motsi-Omoijiade
		Duane Aslett
		Douglas M. C. Allan
		</p>
	<p>Generative Artificial Intelligence (GenAI) has transformed Australian higher education, amplifying online harms such as misinformation, fraud, and image-based abuse, with significant implications for cybercrime prevention. Combining a PRISMA-guided systematic review with MAXQDA-driven analysis of Australian university policies, this research evaluates institutional strategies against national frameworks, such as the Cybersecurity Strategy 2023&amp;amp;ndash;2030. Analyzing data from academic literature, we identify three key themes: educational strategies, alignment with national frameworks, and policy gaps and development. As the first qualitative analysis of 40 Australian university policies, this study uncovers systemic fragmentation in governance frameworks, with only 12 institutions addressing data privacy risks and none directly targeting AI-driven disinformation threats like deepfake harassment&amp;amp;mdash;a critical gap in global AI governance literature. This study provides actionable recommendations to develop the National GenAI Governance Framework, co-developed by TEQSA/UA and DoE, enhanced cyberbullying policies, and behavior-focused training to enhance digital safety and prevent cybercrime in Australian higher education. Mandatory annual CyberAI Literacy Module for all students and staff to ensure awareness of cybersecurity risks, responsible use of artificial intelligence, and digital safety practices within the university community.</p>
	]]></content:encoded>

	<dc:title>Reducing AI-Generated Misinformation in Australian Higher Education: A Qualitative Analysis of Institutional Responses to AI-Generated Misinformation and Implications for Cybercrime Prevention</dc:title>
			<dc:creator>Leo S. F. Lin</dc:creator>
			<dc:creator>Geberew Tulu Mekonnen</dc:creator>
			<dc:creator>Mladen Zecevic</dc:creator>
			<dc:creator>Immaculate Motsi-Omoijiade</dc:creator>
			<dc:creator>Duane Aslett</dc:creator>
			<dc:creator>Douglas M. C. Allan</dc:creator>
		<dc:identifier>doi: 10.3390/informatics12040132</dc:identifier>
	<dc:source>Informatics</dc:source>
	<dc:date>2025-11-28</dc:date>

	<prism:publicationName>Informatics</prism:publicationName>
	<prism:publicationDate>2025-11-28</prism:publicationDate>
	<prism:volume>12</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>132</prism:startingPage>
		<prism:doi>10.3390/informatics12040132</prism:doi>
	<prism:url>https://www.mdpi.com/2227-9709/12/4/132</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-9709/12/4/131">

	<title>Informatics, Vol. 12, Pages 131: Hierarchical Fake News Detection Model Based on Multi-Task Learning and Adversarial Training</title>
	<link>https://www.mdpi.com/2227-9709/12/4/131</link>
	<description>The harmfulness of online fake news has brought widespread attention to fake news detection by researchers. Most existing methods focus on improving the accuracy and early detection of fake news, while ignoring the frequent cross-topic issues faced by fake news in online environments. A hierarchical fake news detection method (HAMFD) based on multi-task learning and adversarial training is proposed. Through the multi-task learning task at the event level, subjective and objective information is introduced. A subjectivity classifier is used to capture sentiment shift within events, aiming to improve in-domain performance and generalization ability of fake news detection. On this basis, textual features and sentiment shift features are fused to perform event-level fake news detection and enhance detection accuracy. The post-level loss and event-level loss are weighted and summed for backpropagation. Adversarial perturbations are added to the embedding layer of the post-level module to deceive the detector, enabling the model to better resist adversarial attacks and enhance its robustness and topic adaptability. Experiments are conducted on three real-world social media datasets, and the results show that the proposed method improves performance in both in-domain and cross-topic fake news detection. Specifically, the model attains accuracies of 91.3% on Twitter15, 90.4% on Twitter16, and 95.7% on Weibo, surpassing advanced baseline methods by 1.6%, 1.5%, and 1.1%, respectively.</description>
	<pubDate>2025-11-27</pubDate>

	<content:encoded><![CDATA[
	<p><b>Informatics, Vol. 12, Pages 131: Hierarchical Fake News Detection Model Based on Multi-Task Learning and Adversarial Training</b></p>
	<p>Informatics <a href="https://www.mdpi.com/2227-9709/12/4/131">doi: 10.3390/informatics12040131</a></p>
	<p>Authors:
		Yi Sun
		Dunhui Yu
		</p>
	<p>The harmfulness of online fake news has brought widespread attention to fake news detection by researchers. Most existing methods focus on improving the accuracy and early detection of fake news, while ignoring the frequent cross-topic issues faced by fake news in online environments. A hierarchical fake news detection method (HAMFD) based on multi-task learning and adversarial training is proposed. Through the multi-task learning task at the event level, subjective and objective information is introduced. A subjectivity classifier is used to capture sentiment shift within events, aiming to improve in-domain performance and generalization ability of fake news detection. On this basis, textual features and sentiment shift features are fused to perform event-level fake news detection and enhance detection accuracy. The post-level loss and event-level loss are weighted and summed for backpropagation. Adversarial perturbations are added to the embedding layer of the post-level module to deceive the detector, enabling the model to better resist adversarial attacks and enhance its robustness and topic adaptability. Experiments are conducted on three real-world social media datasets, and the results show that the proposed method improves performance in both in-domain and cross-topic fake news detection. Specifically, the model attains accuracies of 91.3% on Twitter15, 90.4% on Twitter16, and 95.7% on Weibo, surpassing advanced baseline methods by 1.6%, 1.5%, and 1.1%, respectively.</p>
	]]></content:encoded>

	<dc:title>Hierarchical Fake News Detection Model Based on Multi-Task Learning and Adversarial Training</dc:title>
			<dc:creator>Yi Sun</dc:creator>
			<dc:creator>Dunhui Yu</dc:creator>
		<dc:identifier>doi: 10.3390/informatics12040131</dc:identifier>
	<dc:source>Informatics</dc:source>
	<dc:date>2025-11-27</dc:date>

	<prism:publicationName>Informatics</prism:publicationName>
	<prism:publicationDate>2025-11-27</prism:publicationDate>
	<prism:volume>12</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>131</prism:startingPage>
		<prism:doi>10.3390/informatics12040131</prism:doi>
	<prism:url>https://www.mdpi.com/2227-9709/12/4/131</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-9709/12/4/130">

	<title>Informatics, Vol. 12, Pages 130: Explainable Artificial Intelligence for Workplace Mental Health Prediction</title>
	<link>https://www.mdpi.com/2227-9709/12/4/130</link>
	<description>The increased prevalence of mental health issues in the workplace affects employees&amp;amp;rsquo; well-being and organisational success, necessitating proactive interventions such as employee assistance programmes, stress management workshops, and tailored wellness initiatives. Artificial intelligence (AI) techniques are transforming mental health risk prediction using behavioural, environmental, and workplace data. However, the &amp;amp;ldquo;black-box&amp;amp;rdquo; nature of many AI models hinders trust, transparency, and adoption in sensitive domains such as mental health. This study used the Open Sourcing Mental Illness (OSMI) secondary dataset (2016&amp;amp;ndash;2023) and applied four ML classifiers, Random Forest (RF), xGBoost, Support Vector Machine (SVM), and AdaBoost, to predict workplace mental health outcomes. Explainable AI (XAI) techniques, SHapley Additive exPlanations (SHAP) and Local Interpretable Model-agnostic Explanations (LIME), were integrated to provide both global (SHAP) and instance-level (LIME) interpretability. The Synthetic Minority Oversampling Technique (SMOTE) was applied to address class imbalance. The results show that xGBoost and RF achieved the highest cross-validation accuracy (94%), with xGBoost performing best overall (accuracy = 91%, ROC AUC = 90%), followed by RF (accuracy = 91%). SHAP revealed that sought_treatment, past_mh_disorder, and current_mh_disorder had the most significant positive impact on predictions, while LIME provided case-level explanations to support individualised interpretation. These findings show the importance of explainable ML models in informing timely, targeted interventions, such as improving access to mental health resources, promoting stigma-free workplaces, and supporting treatment-seeking behaviour, while ensuring the ethical and transparent integration of AI into workplace mental health management.</description>
	<pubDate>2025-11-26</pubDate>

	<content:encoded><![CDATA[
	<p><b>Informatics, Vol. 12, Pages 130: Explainable Artificial Intelligence for Workplace Mental Health Prediction</b></p>
	<p>Informatics <a href="https://www.mdpi.com/2227-9709/12/4/130">doi: 10.3390/informatics12040130</a></p>
	<p>Authors:
		Tsholofelo Mokheleli
		Tebogo Bokaba
		Elliot Mbunge
		</p>
	<p>The increased prevalence of mental health issues in the workplace affects employees&amp;amp;rsquo; well-being and organisational success, necessitating proactive interventions such as employee assistance programmes, stress management workshops, and tailored wellness initiatives. Artificial intelligence (AI) techniques are transforming mental health risk prediction using behavioural, environmental, and workplace data. However, the &amp;amp;ldquo;black-box&amp;amp;rdquo; nature of many AI models hinders trust, transparency, and adoption in sensitive domains such as mental health. This study used the Open Sourcing Mental Illness (OSMI) secondary dataset (2016&amp;amp;ndash;2023) and applied four ML classifiers, Random Forest (RF), xGBoost, Support Vector Machine (SVM), and AdaBoost, to predict workplace mental health outcomes. Explainable AI (XAI) techniques, SHapley Additive exPlanations (SHAP) and Local Interpretable Model-agnostic Explanations (LIME), were integrated to provide both global (SHAP) and instance-level (LIME) interpretability. The Synthetic Minority Oversampling Technique (SMOTE) was applied to address class imbalance. The results show that xGBoost and RF achieved the highest cross-validation accuracy (94%), with xGBoost performing best overall (accuracy = 91%, ROC AUC = 90%), followed by RF (accuracy = 91%). SHAP revealed that sought_treatment, past_mh_disorder, and current_mh_disorder had the most significant positive impact on predictions, while LIME provided case-level explanations to support individualised interpretation. These findings show the importance of explainable ML models in informing timely, targeted interventions, such as improving access to mental health resources, promoting stigma-free workplaces, and supporting treatment-seeking behaviour, while ensuring the ethical and transparent integration of AI into workplace mental health management.</p>
	]]></content:encoded>

	<dc:title>Explainable Artificial Intelligence for Workplace Mental Health Prediction</dc:title>
			<dc:creator>Tsholofelo Mokheleli</dc:creator>
			<dc:creator>Tebogo Bokaba</dc:creator>
			<dc:creator>Elliot Mbunge</dc:creator>
		<dc:identifier>doi: 10.3390/informatics12040130</dc:identifier>
	<dc:source>Informatics</dc:source>
	<dc:date>2025-11-26</dc:date>

	<prism:publicationName>Informatics</prism:publicationName>
	<prism:publicationDate>2025-11-26</prism:publicationDate>
	<prism:volume>12</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>130</prism:startingPage>
		<prism:doi>10.3390/informatics12040130</prism:doi>
	<prism:url>https://www.mdpi.com/2227-9709/12/4/130</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-9709/12/4/129">

	<title>Informatics, Vol. 12, Pages 129: ETICD-Net: A Multimodal Fake News Detection Network via Emotion-Topic Injection and Consistency Modeling</title>
	<link>https://www.mdpi.com/2227-9709/12/4/129</link>
	<description>The widespread dissemination of multimodal disinformation, which combines inflammatory text with manipulated images, poses a severe threat to society. Existing detection methods typically process textual and visual features in isolation or perform simple fusion, failing to capture the sophisticated semantic inconsistencies commonly found in false information. To address this, we propose a novel framework: Emotion-Topic Injection and Consistency Detection Network (ETICD-Net). First, a large language model (LLM) extracts structured sentiment and topic-guided signals from news texts to provide rich semantic clues. Second, unlike previous approaches, this guided signal is injected into the feature extraction processes of both modalities: it enhances text features from BERT and modulates image features from ResNet, thereby generating sentiment-topic-aware feature representations. Additionally, this paper introduces a hierarchical consistency fusion module that explicitly evaluates semantic coherence among these enhanced features. It employs cross-modal attention mechanisms, enabling text to query image regions relevant to its statements, and calculates explicit dissimilarity metrics to quantify inconsistencies. Extensive experiments on the Weibo and Twitter benchmark datasets demonstrate that ETICD-Net outperforms or matches state-of-the-art methods, achieving accuracy and F1 scores of 90.6% and 91.5%, respectively.</description>
	<pubDate>2025-11-25</pubDate>

	<content:encoded><![CDATA[
	<p><b>Informatics, Vol. 12, Pages 129: ETICD-Net: A Multimodal Fake News Detection Network via Emotion-Topic Injection and Consistency Modeling</b></p>
	<p>Informatics <a href="https://www.mdpi.com/2227-9709/12/4/129">doi: 10.3390/informatics12040129</a></p>
	<p>Authors:
		Wenqian Shang
		Jinru Yang
		Linlin Zhang
		Tong Yi
		Peng Liu
		</p>
	<p>The widespread dissemination of multimodal disinformation, which combines inflammatory text with manipulated images, poses a severe threat to society. Existing detection methods typically process textual and visual features in isolation or perform simple fusion, failing to capture the sophisticated semantic inconsistencies commonly found in false information. To address this, we propose a novel framework: Emotion-Topic Injection and Consistency Detection Network (ETICD-Net). First, a large language model (LLM) extracts structured sentiment and topic-guided signals from news texts to provide rich semantic clues. Second, unlike previous approaches, this guided signal is injected into the feature extraction processes of both modalities: it enhances text features from BERT and modulates image features from ResNet, thereby generating sentiment-topic-aware feature representations. Additionally, this paper introduces a hierarchical consistency fusion module that explicitly evaluates semantic coherence among these enhanced features. It employs cross-modal attention mechanisms, enabling text to query image regions relevant to its statements, and calculates explicit dissimilarity metrics to quantify inconsistencies. Extensive experiments on the Weibo and Twitter benchmark datasets demonstrate that ETICD-Net outperforms or matches state-of-the-art methods, achieving accuracy and F1 scores of 90.6% and 91.5%, respectively.</p>
	]]></content:encoded>

	<dc:title>ETICD-Net: A Multimodal Fake News Detection Network via Emotion-Topic Injection and Consistency Modeling</dc:title>
			<dc:creator>Wenqian Shang</dc:creator>
			<dc:creator>Jinru Yang</dc:creator>
			<dc:creator>Linlin Zhang</dc:creator>
			<dc:creator>Tong Yi</dc:creator>
			<dc:creator>Peng Liu</dc:creator>
		<dc:identifier>doi: 10.3390/informatics12040129</dc:identifier>
	<dc:source>Informatics</dc:source>
	<dc:date>2025-11-25</dc:date>

	<prism:publicationName>Informatics</prism:publicationName>
	<prism:publicationDate>2025-11-25</prism:publicationDate>
	<prism:volume>12</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>129</prism:startingPage>
		<prism:doi>10.3390/informatics12040129</prism:doi>
	<prism:url>https://www.mdpi.com/2227-9709/12/4/129</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-9709/12/4/128">

	<title>Informatics, Vol. 12, Pages 128: Learning Dynamics Analysis: Assessing Generalization of Machine Learning Models for Optical Coherence Tomography Multiclass Classification</title>
	<link>https://www.mdpi.com/2227-9709/12/4/128</link>
	<description>This study evaluated the generalization and reliability of machine learning models for multiclass classification of retinal pathologies using a diverse set of images representing eight disease categories. Images were aggregated from two public datasets and divided into training, validation, and test sets, with an additional independent dataset used for external validation. Multiple modeling approaches were compared, including classical machine learning algorithms, convolutional neural networks with and without data augmentation, and a deep neural network using pre-trained feature extraction. Analysis of learning dynamics revealed that classical models and unaugmented convolutional neural networks exhibited overfitting and poor generalization, while models with data augmentation and the deep neural network showed healthy, parallel convergence of training and validation performance. Only the deep neural network demonstrated a consistent, monotonic decrease in accuracy, F1-score, and recall from training through external validation, indicating robust generalization. These results underscore the necessity of evaluating learning dynamics (not just summary metrics) to ensure model reliability and patient safety. Typically, model performance is expected to decrease gradually as data becomes less familiar. Therefore, models that do not exhibit these healthy learning dynamics, or that show unexpected improvements in performance on subsequent datasets, should not be considered for clinical application, as such patterns may indicate methodological flaws or data leakage rather than true generalization.</description>
	<pubDate>2025-11-22</pubDate>

	<content:encoded><![CDATA[
	<p><b>Informatics, Vol. 12, Pages 128: Learning Dynamics Analysis: Assessing Generalization of Machine Learning Models for Optical Coherence Tomography Multiclass Classification</b></p>
	<p>Informatics <a href="https://www.mdpi.com/2227-9709/12/4/128">doi: 10.3390/informatics12040128</a></p>
	<p>Authors:
		Michael Sher
		David Remyes
		Riah Sharma
		Milan Toma
		</p>
	<p>This study evaluated the generalization and reliability of machine learning models for multiclass classification of retinal pathologies using a diverse set of images representing eight disease categories. Images were aggregated from two public datasets and divided into training, validation, and test sets, with an additional independent dataset used for external validation. Multiple modeling approaches were compared, including classical machine learning algorithms, convolutional neural networks with and without data augmentation, and a deep neural network using pre-trained feature extraction. Analysis of learning dynamics revealed that classical models and unaugmented convolutional neural networks exhibited overfitting and poor generalization, while models with data augmentation and the deep neural network showed healthy, parallel convergence of training and validation performance. Only the deep neural network demonstrated a consistent, monotonic decrease in accuracy, F1-score, and recall from training through external validation, indicating robust generalization. These results underscore the necessity of evaluating learning dynamics (not just summary metrics) to ensure model reliability and patient safety. Typically, model performance is expected to decrease gradually as data becomes less familiar. Therefore, models that do not exhibit these healthy learning dynamics, or that show unexpected improvements in performance on subsequent datasets, should not be considered for clinical application, as such patterns may indicate methodological flaws or data leakage rather than true generalization.</p>
	]]></content:encoded>

	<dc:title>Learning Dynamics Analysis: Assessing Generalization of Machine Learning Models for Optical Coherence Tomography Multiclass Classification</dc:title>
			<dc:creator>Michael Sher</dc:creator>
			<dc:creator>David Remyes</dc:creator>
			<dc:creator>Riah Sharma</dc:creator>
			<dc:creator>Milan Toma</dc:creator>
		<dc:identifier>doi: 10.3390/informatics12040128</dc:identifier>
	<dc:source>Informatics</dc:source>
	<dc:date>2025-11-22</dc:date>

	<prism:publicationName>Informatics</prism:publicationName>
	<prism:publicationDate>2025-11-22</prism:publicationDate>
	<prism:volume>12</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>128</prism:startingPage>
		<prism:doi>10.3390/informatics12040128</prism:doi>
	<prism:url>https://www.mdpi.com/2227-9709/12/4/128</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-9709/12/4/127">

	<title>Informatics, Vol. 12, Pages 127: MOOC Dropout Prediction via a Dilated Convolutional Attention Network with Lie Group Features</title>
	<link>https://www.mdpi.com/2227-9709/12/4/127</link>
	<description>Massive open online courses (MOOCs) represent an innovative online learning paradigm that has garnered considerable popularity in recent years, attracting a multitude of learners to MOOC platforms due to their accessible and adaptable instructional structure. However, the elevated dropout rate in current MOOCs limits their advancement. Current dropout prediction models predominantly employ fixed-size convolutional kernels for feature extraction, which insufficiently address temporal dependencies and consequently demonstrate specific limitations. We propose a Lie Group-based feature context-local fusion attention model for predicting dropout in MOOCs. This model initially extracts shallow features using Lie Group machine learning techniques and subsequently integrates multiple parallel dilated convolutional modules to acquire high-level semantic representations. We design an attention mechanism that integrates contextual and local features, effectively capturing the temporal dependencies in the study behaviors of learners. We performed multiple experiments on the XuetangX dataset to evaluate the model&amp;amp;rsquo;s efficacy. The results show that our method attains a precision score of 0.910, exceeding the previous state-of-the-art approach by 3.3%.</description>
	<pubDate>2025-11-21</pubDate>

	<content:encoded><![CDATA[
	<p><b>Informatics, Vol. 12, Pages 127: MOOC Dropout Prediction via a Dilated Convolutional Attention Network with Lie Group Features</b></p>
	<p>Informatics <a href="https://www.mdpi.com/2227-9709/12/4/127">doi: 10.3390/informatics12040127</a></p>
	<p>Authors:
		Yinxu Liu
		Chengjun Xu
		Desheng Yang
		Yuncheng Shen
		</p>
	<p>Massive open online courses (MOOCs) represent an innovative online learning paradigm that has garnered considerable popularity in recent years, attracting a multitude of learners to MOOC platforms due to their accessible and adaptable instructional structure. However, the elevated dropout rate in current MOOCs limits their advancement. Current dropout prediction models predominantly employ fixed-size convolutional kernels for feature extraction, which insufficiently address temporal dependencies and consequently demonstrate specific limitations. We propose a Lie Group-based feature context-local fusion attention model for predicting dropout in MOOCs. This model initially extracts shallow features using Lie Group machine learning techniques and subsequently integrates multiple parallel dilated convolutional modules to acquire high-level semantic representations. We design an attention mechanism that integrates contextual and local features, effectively capturing the temporal dependencies in the study behaviors of learners. We performed multiple experiments on the XuetangX dataset to evaluate the model&amp;amp;rsquo;s efficacy. The results show that our method attains a precision score of 0.910, exceeding the previous state-of-the-art approach by 3.3%.</p>
	]]></content:encoded>

	<dc:title>MOOC Dropout Prediction via a Dilated Convolutional Attention Network with Lie Group Features</dc:title>
			<dc:creator>Yinxu Liu</dc:creator>
			<dc:creator>Chengjun Xu</dc:creator>
			<dc:creator>Desheng Yang</dc:creator>
			<dc:creator>Yuncheng Shen</dc:creator>
		<dc:identifier>doi: 10.3390/informatics12040127</dc:identifier>
	<dc:source>Informatics</dc:source>
	<dc:date>2025-11-21</dc:date>

	<prism:publicationName>Informatics</prism:publicationName>
	<prism:publicationDate>2025-11-21</prism:publicationDate>
	<prism:volume>12</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>127</prism:startingPage>
		<prism:doi>10.3390/informatics12040127</prism:doi>
	<prism:url>https://www.mdpi.com/2227-9709/12/4/127</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-9709/12/4/126">

	<title>Informatics, Vol. 12, Pages 126: Automated Hyperparameter Optimization for Cyberattack Detection Based on Machine Learning in IoT Systems</title>
	<link>https://www.mdpi.com/2227-9709/12/4/126</link>
	<description>The growing sophistication of cyberattacks in Internet of Things (IoT) environments demands proactive and efficient solutions. We present an automated hyperparameter optimization (HPO) method for detecting cyberattacks in IoT that explicitly addresses class imbalance. The approach combines a Random Forest surrogate, a UCB acquisition function with controlled exploration, and an objective function that maximizes weighted F1 and MCC; it also integrates stratified validation and a compact selection of descriptors by metaheuristic consensus. Five models (RandomForest, AdaBoost, DecisionTree, XGBoost, and MLP) were evaluated on CICIoT2023 and CIC-DDoS2019. The results show systematic improvements over default configurations and competitiveness compared to Hyperopt and GridSearch. For RandomForest, marked increases were observed in CIC-DDoS2019 (F1-Score from 0.9469 to 0.9995; MCC from 0.9284 to 0.9986) and consistent improvements in CICIoT2023 (F1-Score from 0.9947 to 0.9954; MCC from 0.9885 to 0.9896), while maintaining low inference times. These results demonstrate that the proposed HPO offers a solid balance between performance, computational cost, and traceability, and constitutes a reproducible alternative for strengthening cybersecurity mechanisms in IoT environments with limited resources.</description>
	<pubDate>2025-11-20</pubDate>

	<content:encoded><![CDATA[
	<p><b>Informatics, Vol. 12, Pages 126: Automated Hyperparameter Optimization for Cyberattack Detection Based on Machine Learning in IoT Systems</b></p>
	<p>Informatics <a href="https://www.mdpi.com/2227-9709/12/4/126">doi: 10.3390/informatics12040126</a></p>
	<p>Authors:
		Fray L. Becerra-Suarez
		Lloy Pinedo
		Madeleine J. Gavilán-Colca
		Mónica Díaz
		Manuel G. Forero
		</p>
	<p>The growing sophistication of cyberattacks in Internet of Things (IoT) environments demands proactive and efficient solutions. We present an automated hyperparameter optimization (HPO) method for detecting cyberattacks in IoT that explicitly addresses class imbalance. The approach combines a Random Forest surrogate, a UCB acquisition function with controlled exploration, and an objective function that maximizes weighted F1 and MCC; it also integrates stratified validation and a compact selection of descriptors by metaheuristic consensus. Five models (RandomForest, AdaBoost, DecisionTree, XGBoost, and MLP) were evaluated on CICIoT2023 and CIC-DDoS2019. The results show systematic improvements over default configurations and competitiveness compared to Hyperopt and GridSearch. For RandomForest, marked increases were observed in CIC-DDoS2019 (F1-Score from 0.9469 to 0.9995; MCC from 0.9284 to 0.9986) and consistent improvements in CICIoT2023 (F1-Score from 0.9947 to 0.9954; MCC from 0.9885 to 0.9896), while maintaining low inference times. These results demonstrate that the proposed HPO offers a solid balance between performance, computational cost, and traceability, and constitutes a reproducible alternative for strengthening cybersecurity mechanisms in IoT environments with limited resources.</p>
	]]></content:encoded>

	<dc:title>Automated Hyperparameter Optimization for Cyberattack Detection Based on Machine Learning in IoT Systems</dc:title>
			<dc:creator>Fray L. Becerra-Suarez</dc:creator>
			<dc:creator>Lloy Pinedo</dc:creator>
			<dc:creator>Madeleine J. Gavilán-Colca</dc:creator>
			<dc:creator>Mónica Díaz</dc:creator>
			<dc:creator>Manuel G. Forero</dc:creator>
		<dc:identifier>doi: 10.3390/informatics12040126</dc:identifier>
	<dc:source>Informatics</dc:source>
	<dc:date>2025-11-20</dc:date>

	<prism:publicationName>Informatics</prism:publicationName>
	<prism:publicationDate>2025-11-20</prism:publicationDate>
	<prism:volume>12</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>126</prism:startingPage>
		<prism:doi>10.3390/informatics12040126</prism:doi>
	<prism:url>https://www.mdpi.com/2227-9709/12/4/126</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-9709/12/4/125">

	<title>Informatics, Vol. 12, Pages 125: An Adaptive Protocol Selection Framework for Energy-Efficient IoT Communication: Dynamic Optimization Through Context-Aware Decision Making</title>
	<link>https://www.mdpi.com/2227-9709/12/4/125</link>
	<description>The rapid growth of Internet of Things (IoT) deployments has created an urgent need for energy-efficient communication strategies that can adapt to dynamic operational conditions. This study presents a novel adaptive protocol selection framework that dynamically optimizes IoT communication energy consumption through context-aware decision making, achieving up to 34% energy reduction compared to static protocol selection. The framework is grounded in a comprehensive empirical evaluation of three widely used IoT communication protocols&amp;amp;mdash;MQTT, CoAP, and HTTP&amp;amp;mdash;using Intel&amp;amp;rsquo;s Running Average Power Limit (RAPL) for precise energy measurement across varied network conditions including packet loss (0&amp;amp;ndash;20%) and latency variations (1&amp;amp;ndash;200 ms). Our key contribution is the design and validation of an adaptive selection mechanism that employs multi-criteria decision making with hysteresis control to prevent oscillation, dynamically switching between protocols based on six runtime metrics: message frequency, payload size, network conditions, packet loss rate, available energy budget, and QoS requirements. Results show MQTT consumes only 40% of HTTP&amp;amp;rsquo;s energy per byte at high volumes (&amp;amp;gt;10,000 messages), while HTTP remains practical for low-volume traffic (&amp;amp;lt;10 msg/min). A novel finding reveals receiver nodes consistently consume 15&amp;amp;ndash;20% more energy than senders, requiring new design considerations for IoT gateways. The framework demonstrates robust performance across simulated real-world conditions, maintaining 92% of optimal performance while requiring 85% less computation than machine learning approaches. These findings offer actionable guidance for IoT architects and developers, positioning this work as a practical solution for energy-aware IoT communication in production environments.</description>
	<pubDate>2025-11-17</pubDate>

	<content:encoded><![CDATA[
	<p><b>Informatics, Vol. 12, Pages 125: An Adaptive Protocol Selection Framework for Energy-Efficient IoT Communication: Dynamic Optimization Through Context-Aware Decision Making</b></p>
	<p>Informatics <a href="https://www.mdpi.com/2227-9709/12/4/125">doi: 10.3390/informatics12040125</a></p>
	<p>Authors:
		Dmitrij Żatuchin
		Maksim Azarskov
		</p>
	<p>The rapid growth of Internet of Things (IoT) deployments has created an urgent need for energy-efficient communication strategies that can adapt to dynamic operational conditions. This study presents a novel adaptive protocol selection framework that dynamically optimizes IoT communication energy consumption through context-aware decision making, achieving up to 34% energy reduction compared to static protocol selection. The framework is grounded in a comprehensive empirical evaluation of three widely used IoT communication protocols&amp;amp;mdash;MQTT, CoAP, and HTTP&amp;amp;mdash;using Intel&amp;amp;rsquo;s Running Average Power Limit (RAPL) for precise energy measurement across varied network conditions including packet loss (0&amp;amp;ndash;20%) and latency variations (1&amp;amp;ndash;200 ms). Our key contribution is the design and validation of an adaptive selection mechanism that employs multi-criteria decision making with hysteresis control to prevent oscillation, dynamically switching between protocols based on six runtime metrics: message frequency, payload size, network conditions, packet loss rate, available energy budget, and QoS requirements. Results show MQTT consumes only 40% of HTTP&amp;amp;rsquo;s energy per byte at high volumes (&amp;amp;gt;10,000 messages), while HTTP remains practical for low-volume traffic (&amp;amp;lt;10 msg/min). A novel finding reveals receiver nodes consistently consume 15&amp;amp;ndash;20% more energy than senders, requiring new design considerations for IoT gateways. The framework demonstrates robust performance across simulated real-world conditions, maintaining 92% of optimal performance while requiring 85% less computation than machine learning approaches. These findings offer actionable guidance for IoT architects and developers, positioning this work as a practical solution for energy-aware IoT communication in production environments.</p>
	]]></content:encoded>

	<dc:title>An Adaptive Protocol Selection Framework for Energy-Efficient IoT Communication: Dynamic Optimization Through Context-Aware Decision Making</dc:title>
			<dc:creator>Dmitrij Żatuchin</dc:creator>
			<dc:creator>Maksim Azarskov</dc:creator>
		<dc:identifier>doi: 10.3390/informatics12040125</dc:identifier>
	<dc:source>Informatics</dc:source>
	<dc:date>2025-11-17</dc:date>

	<prism:publicationName>Informatics</prism:publicationName>
	<prism:publicationDate>2025-11-17</prism:publicationDate>
	<prism:volume>12</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>125</prism:startingPage>
		<prism:doi>10.3390/informatics12040125</prism:doi>
	<prism:url>https://www.mdpi.com/2227-9709/12/4/125</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-9709/12/4/124">

	<title>Informatics, Vol. 12, Pages 124: Leveraging the Graph-Based LLM to Support the Analysis of Supply Chain Information</title>
	<link>https://www.mdpi.com/2227-9709/12/4/124</link>
	<description>Modern companies often rely on integrating an extensive network of suppliers to organize and produce industrial artifacts. Within this process, it is critical to maintain sustainability and flexibility by analyzing and managing information from the supply chain. In particular, there is a continuous demand to automatically analyze and infer information from extensive datasets structured in various forms, such as natural language and domain-specific models. The advancement of Large Language Models (LLM) presents a promising solution to address this challenge. By leveraging prompts that contain the necessary information provided by humans, LLM can generate insightful responses through analysis and reasoning over the provided content. However, the quality of these responses is still affected by the inherent opaqueness of LLM, stemming from their complex architectures, thus weakening their trustworthiness and limiting their applicability across different fields. To address this issue, this work presents a framework to leverage the graph-based LLM to support the analysis of supply chain information by combining the LLM and domain knowledge. Specifically, this work proposes an integration of LLM and domain knowledge to support an analysis of the supply chain as follows: (1) constructing a graph-based knowledge base to describe and model the domain knowledge; (2) creating prompts to support the retrieval of the graph-based models and guide the generation of LLM; (3) generating responses via LLM to support the analysis and reason about information across the supply chain. We demonstrate the proposed framework in the tasks of entity classification, link prediction, and reasoning across entities. Compared to the average performance of the best methods in the comparative studies, the proposed framework achieves a significant improvement of 59%, increasing the ROUGE-1 F1 score from 0.42 to 0.67.</description>
	<pubDate>2025-11-13</pubDate>

	<content:encoded><![CDATA[
	<p><b>Informatics, Vol. 12, Pages 124: Leveraging the Graph-Based LLM to Support the Analysis of Supply Chain Information</b></p>
	<p>Informatics <a href="https://www.mdpi.com/2227-9709/12/4/124">doi: 10.3390/informatics12040124</a></p>
	<p>Authors:
		Peng Su
		Rui Xu
		Dejiu Chen
		</p>
	<p>Modern companies often rely on integrating an extensive network of suppliers to organize and produce industrial artifacts. Within this process, it is critical to maintain sustainability and flexibility by analyzing and managing information from the supply chain. In particular, there is a continuous demand to automatically analyze and infer information from extensive datasets structured in various forms, such as natural language and domain-specific models. The advancement of Large Language Models (LLM) presents a promising solution to address this challenge. By leveraging prompts that contain the necessary information provided by humans, LLM can generate insightful responses through analysis and reasoning over the provided content. However, the quality of these responses is still affected by the inherent opaqueness of LLM, stemming from their complex architectures, thus weakening their trustworthiness and limiting their applicability across different fields. To address this issue, this work presents a framework to leverage the graph-based LLM to support the analysis of supply chain information by combining the LLM and domain knowledge. Specifically, this work proposes an integration of LLM and domain knowledge to support an analysis of the supply chain as follows: (1) constructing a graph-based knowledge base to describe and model the domain knowledge; (2) creating prompts to support the retrieval of the graph-based models and guide the generation of LLM; (3) generating responses via LLM to support the analysis and reason about information across the supply chain. We demonstrate the proposed framework in the tasks of entity classification, link prediction, and reasoning across entities. Compared to the average performance of the best methods in the comparative studies, the proposed framework achieves a significant improvement of 59%, increasing the ROUGE-1 F1 score from 0.42 to 0.67.</p>
	]]></content:encoded>

	<dc:title>Leveraging the Graph-Based LLM to Support the Analysis of Supply Chain Information</dc:title>
			<dc:creator>Peng Su</dc:creator>
			<dc:creator>Rui Xu</dc:creator>
			<dc:creator>Dejiu Chen</dc:creator>
		<dc:identifier>doi: 10.3390/informatics12040124</dc:identifier>
	<dc:source>Informatics</dc:source>
	<dc:date>2025-11-13</dc:date>

	<prism:publicationName>Informatics</prism:publicationName>
	<prism:publicationDate>2025-11-13</prism:publicationDate>
	<prism:volume>12</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>124</prism:startingPage>
		<prism:doi>10.3390/informatics12040124</prism:doi>
	<prism:url>https://www.mdpi.com/2227-9709/12/4/124</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-9709/12/4/123">

	<title>Informatics, Vol. 12, Pages 123: GraderAssist: A Graph-Based Multi-LLM Framework for Transparent and Reproducible Automated Evaluation</title>
	<link>https://www.mdpi.com/2227-9709/12/4/123</link>
	<description>Background and objectives: Automated evaluation of open-ended responses remains a persistent challenge, particularly when consistency, transparency, and reproducibility are required. While large language models (LLMs) have shown promise in rubric-based evaluation, their reliability across multiple evaluators is still uncertain. Variability in scoring, feedback, and rubric adherence raises concerns about interpretability and system robustness. This study introduces GraderAssist, a graph-based, rubric-guided, multi-LLM framework designed to ensure transparent and reproducible automated evaluation. Methods: GraderAssist evaluates a dataset of 220 responses to both technical and argumentative questions, collected from undergraduate computer science courses. Six open-source LLMs and GPT-4 (as expert reference) independently scored each response using two predefined rubrics. All outputs&amp;amp;mdash;including scores, feedback, and metadata&amp;amp;mdash;were parsed, validated, and stored in a Neo4j graph database, enabling structured querying, traceability, and longitudinal analysis. Results: Cross-model analysis revealed systematic differences in scoring behavior and feedback generation. Some models produced more generous evaluations, while others aligned closely with GPT-4. Semantic analysis using Sentence-BERT embeddings highlighted distinctive feedback styles and variable rubric adherence. Inter-model agreement was stronger for technical criteria but diverged substantially for argumentative tasks. Originality: GraderAssist integrates rubric-guided evaluation, multi-model comparison, and graph-based storage into a unified pipeline. By emphasizing reproducibility, transparency, and fine-grained analysis of evaluator behavior, it advances the design of interpretable automated evaluation systems with applications in education and beyond.</description>
	<pubDate>2025-11-09</pubDate>

	<content:encoded><![CDATA[
	<p><b>Informatics, Vol. 12, Pages 123: GraderAssist: A Graph-Based Multi-LLM Framework for Transparent and Reproducible Automated Evaluation</b></p>
	<p>Informatics <a href="https://www.mdpi.com/2227-9709/12/4/123">doi: 10.3390/informatics12040123</a></p>
	<p>Authors:
		Catalin Anghel
		Andreea Alexandra Anghel
		Emilia Pecheanu
		Adina Cocu
		Marian Viorel Craciun
		Paul Iacobescu
		Antonio Stefan Balau
		Constantin Adrian Andrei
		</p>
	<p>Background and objectives: Automated evaluation of open-ended responses remains a persistent challenge, particularly when consistency, transparency, and reproducibility are required. While large language models (LLMs) have shown promise in rubric-based evaluation, their reliability across multiple evaluators is still uncertain. Variability in scoring, feedback, and rubric adherence raises concerns about interpretability and system robustness. This study introduces GraderAssist, a graph-based, rubric-guided, multi-LLM framework designed to ensure transparent and reproducible automated evaluation. Methods: GraderAssist evaluates a dataset of 220 responses to both technical and argumentative questions, collected from undergraduate computer science courses. Six open-source LLMs and GPT-4 (as expert reference) independently scored each response using two predefined rubrics. All outputs&amp;amp;mdash;including scores, feedback, and metadata&amp;amp;mdash;were parsed, validated, and stored in a Neo4j graph database, enabling structured querying, traceability, and longitudinal analysis. Results: Cross-model analysis revealed systematic differences in scoring behavior and feedback generation. Some models produced more generous evaluations, while others aligned closely with GPT-4. Semantic analysis using Sentence-BERT embeddings highlighted distinctive feedback styles and variable rubric adherence. Inter-model agreement was stronger for technical criteria but diverged substantially for argumentative tasks. Originality: GraderAssist integrates rubric-guided evaluation, multi-model comparison, and graph-based storage into a unified pipeline. By emphasizing reproducibility, transparency, and fine-grained analysis of evaluator behavior, it advances the design of interpretable automated evaluation systems with applications in education and beyond.</p>
	]]></content:encoded>

	<dc:title>GraderAssist: A Graph-Based Multi-LLM Framework for Transparent and Reproducible Automated Evaluation</dc:title>
			<dc:creator>Catalin Anghel</dc:creator>
			<dc:creator>Andreea Alexandra Anghel</dc:creator>
			<dc:creator>Emilia Pecheanu</dc:creator>
			<dc:creator>Adina Cocu</dc:creator>
			<dc:creator>Marian Viorel Craciun</dc:creator>
			<dc:creator>Paul Iacobescu</dc:creator>
			<dc:creator>Antonio Stefan Balau</dc:creator>
			<dc:creator>Constantin Adrian Andrei</dc:creator>
		<dc:identifier>doi: 10.3390/informatics12040123</dc:identifier>
	<dc:source>Informatics</dc:source>
	<dc:date>2025-11-09</dc:date>

	<prism:publicationName>Informatics</prism:publicationName>
	<prism:publicationDate>2025-11-09</prism:publicationDate>
	<prism:volume>12</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>123</prism:startingPage>
		<prism:doi>10.3390/informatics12040123</prism:doi>
	<prism:url>https://www.mdpi.com/2227-9709/12/4/123</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-9709/12/4/122">

	<title>Informatics, Vol. 12, Pages 122: Percolation&amp;ndash;Stochastic Model for Traffic Management in Transport Networks</title>
	<link>https://www.mdpi.com/2227-9709/12/4/122</link>
	<description>This article describes a model for optimizing traffic flow control and generating traffic signal phases based on the stochastic dynamics of traffic and the percolation properties of transport networks. As input data (in SUMO), we use lane-level vehicle flow rates, treating them as random processes with unknown distributions. It is shown that the percolation threshold of the transport network can serve as a reliability criterion in a stochastic model of lane blockage and can be used to determine the control interval. To calculate the durations of permissive control signals and their sequence for different directions, vehicle queues are considered and the time required for them to reach the network&amp;amp;rsquo;s percolation threshold is estimated. Subsequently, the lane with the largest queue (i.e., the shortest time to reach blockage) is selected, and a phase is formed for its signal control, as well as for other lanes that can be opened simultaneously. Simulation results show that when dynamic traffic signal control is used and a percolation-dynamic model for balancing road traffic is applied, lane occupancy indicators such as &amp;amp;ldquo;congestion&amp;amp;rdquo; decrease by 19&amp;amp;ndash;51% compared to a model with statically specified traffic signal phase cycles. The characteristics of flow dynamics obtained in the simulation make it possible to construct an overall control quality function and to assess, from the standpoint of traffic network management organization, an acceptable density of traffic signals and unsignalized intersections.</description>
	<pubDate>2025-11-06</pubDate>

	<content:encoded><![CDATA[
	<p><b>Informatics, Vol. 12, Pages 122: Percolation&amp;ndash;Stochastic Model for Traffic Management in Transport Networks</b></p>
	<p>Informatics <a href="https://www.mdpi.com/2227-9709/12/4/122">doi: 10.3390/informatics12040122</a></p>
	<p>Authors:
		Anton Aleshkin
		Dmitry Zhukov
		Vadim Zhmud
		</p>
	<p>This article describes a model for optimizing traffic flow control and generating traffic signal phases based on the stochastic dynamics of traffic and the percolation properties of transport networks. As input data (in SUMO), we use lane-level vehicle flow rates, treating them as random processes with unknown distributions. It is shown that the percolation threshold of the transport network can serve as a reliability criterion in a stochastic model of lane blockage and can be used to determine the control interval. To calculate the durations of permissive control signals and their sequence for different directions, vehicle queues are considered and the time required for them to reach the network&amp;amp;rsquo;s percolation threshold is estimated. Subsequently, the lane with the largest queue (i.e., the shortest time to reach blockage) is selected, and a phase is formed for its signal control, as well as for other lanes that can be opened simultaneously. Simulation results show that when dynamic traffic signal control is used and a percolation-dynamic model for balancing road traffic is applied, lane occupancy indicators such as &amp;amp;ldquo;congestion&amp;amp;rdquo; decrease by 19&amp;amp;ndash;51% compared to a model with statically specified traffic signal phase cycles. The characteristics of flow dynamics obtained in the simulation make it possible to construct an overall control quality function and to assess, from the standpoint of traffic network management organization, an acceptable density of traffic signals and unsignalized intersections.</p>
	]]></content:encoded>

	<dc:title>Percolation&amp;amp;ndash;Stochastic Model for Traffic Management in Transport Networks</dc:title>
			<dc:creator>Anton Aleshkin</dc:creator>
			<dc:creator>Dmitry Zhukov</dc:creator>
			<dc:creator>Vadim Zhmud</dc:creator>
		<dc:identifier>doi: 10.3390/informatics12040122</dc:identifier>
	<dc:source>Informatics</dc:source>
	<dc:date>2025-11-06</dc:date>

	<prism:publicationName>Informatics</prism:publicationName>
	<prism:publicationDate>2025-11-06</prism:publicationDate>
	<prism:volume>12</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>122</prism:startingPage>
		<prism:doi>10.3390/informatics12040122</prism:doi>
	<prism:url>https://www.mdpi.com/2227-9709/12/4/122</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-9709/12/4/121">

	<title>Informatics, Vol. 12, Pages 121: Digital Competencies for a FinTech-Driven Accounting Profession: A Systematic Literature Review</title>
	<link>https://www.mdpi.com/2227-9709/12/4/121</link>
	<description>Financial Technology (FinTech) is fundamentally reshaping the accounting profession, accelerating the shift from routine transactional activities to more strategic, data-driven functions. This transformation demands advanced digital competencies, yet the scholarly understanding of these skills remains fragmented. To provide conceptual and analytical clarity, this study defines FinTech as an ecosystem of enabling technologies, including artificial intelligence, data analytics, and blockchain, that collectively drive this professional transition. Addressing the lack of systematic synthesis, the study employs a systematic literature review (SLR) guided by the PRISMA 2020 framework, complemented by bibliometric analysis, to map the intellectual landscape. The review focuses on peer-reviewed journal articles published between January 2020 and June 2025, thereby capturing the accelerated digital transformation of the post-pandemic era. The analysis identifies four dominant thematic clusters: (1) the professional context and digital transformation; (2) the educational response and curriculum development; (3) core competencies and their technological drivers; and (4) ethical judgement and professional responsibilities. Synthesising these themes reveals critical research gaps in faculty readiness, curriculum integration, ethical governance, and the empirical validation of institutional strategies. By offering a structured map of the field, this review contributes actionable insights for educators, professional bodies, and firms, and advances a forward-looking research agenda to align professional readiness with the realities of the FinTech era.</description>
	<pubDate>2025-11-06</pubDate>

	<content:encoded><![CDATA[
	<p><b>Informatics, Vol. 12, Pages 121: Digital Competencies for a FinTech-Driven Accounting Profession: A Systematic Literature Review</b></p>
	<p>Informatics <a href="https://www.mdpi.com/2227-9709/12/4/121">doi: 10.3390/informatics12040121</a></p>
	<p>Authors:
		Saiphit Satjawisate
		Kanitsorn Suriyapaiboonwattana
		Alisara Saramolee
		Kate Hone
		</p>
	<p>Financial Technology (FinTech) is fundamentally reshaping the accounting profession, accelerating the shift from routine transactional activities to more strategic, data-driven functions. This transformation demands advanced digital competencies, yet the scholarly understanding of these skills remains fragmented. To provide conceptual and analytical clarity, this study defines FinTech as an ecosystem of enabling technologies, including artificial intelligence, data analytics, and blockchain, that collectively drive this professional transition. Addressing the lack of systematic synthesis, the study employs a systematic literature review (SLR) guided by the PRISMA 2020 framework, complemented by bibliometric analysis, to map the intellectual landscape. The review focuses on peer-reviewed journal articles published between January 2020 and June 2025, thereby capturing the accelerated digital transformation of the post-pandemic era. The analysis identifies four dominant thematic clusters: (1) the professional context and digital transformation; (2) the educational response and curriculum development; (3) core competencies and their technological drivers; and (4) ethical judgement and professional responsibilities. Synthesising these themes reveals critical research gaps in faculty readiness, curriculum integration, ethical governance, and the empirical validation of institutional strategies. By offering a structured map of the field, this review contributes actionable insights for educators, professional bodies, and firms, and advances a forward-looking research agenda to align professional readiness with the realities of the FinTech era.</p>
	]]></content:encoded>

	<dc:title>Digital Competencies for a FinTech-Driven Accounting Profession: A Systematic Literature Review</dc:title>
			<dc:creator>Saiphit Satjawisate</dc:creator>
			<dc:creator>Kanitsorn Suriyapaiboonwattana</dc:creator>
			<dc:creator>Alisara Saramolee</dc:creator>
			<dc:creator>Kate Hone</dc:creator>
		<dc:identifier>doi: 10.3390/informatics12040121</dc:identifier>
	<dc:source>Informatics</dc:source>
	<dc:date>2025-11-06</dc:date>

	<prism:publicationName>Informatics</prism:publicationName>
	<prism:publicationDate>2025-11-06</prism:publicationDate>
	<prism:volume>12</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Systematic Review</prism:section>
	<prism:startingPage>121</prism:startingPage>
		<prism:doi>10.3390/informatics12040121</prism:doi>
	<prism:url>https://www.mdpi.com/2227-9709/12/4/121</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-9709/12/4/120">

	<title>Informatics, Vol. 12, Pages 120: Negotiating Human&amp;ndash;AI Complementarity in Geriatric and Palliative Care: A Qualitative Study of Healthcare Practitioners&amp;rsquo; Perspectives in Northeast China</title>
	<link>https://www.mdpi.com/2227-9709/12/4/120</link>
	<description>Artificial intelligence (AI) is becoming increasingly significant in healthcare around the world, especially in China, where rapid population ageing coincides with rising expectations for quality of life and a shrinking care workforce. This study explores Chinese health practitioners&amp;amp;rsquo; perspectives on using AI assistants in integrated geriatric and palliative care. Drawing on Actor&amp;amp;ndash;Network Theory, care is viewed as a network of interconnected human and non-human actors, including practitioners, technologies, patients and policies. Based in Northeast China, a region with structurally marginalised healthcare infrastructure, this article analyses qualitative interviews with 14 practitioners. Our findings reveal three key themes: (1) tensions between AI&amp;amp;rsquo;s rule-based logic and practitioners&amp;amp;rsquo; human-centred approach; (2) ethical discomfort with AI performing intimate or emotionally sensitive care, especially in end-of-life contexts; (3) structural inequalities, with weak policy and infrastructure limiting effective AI integration. The study highlights that AI offers clearer benefits for routine geriatric care, such as monitoring and basic symptom management, but its utility is far more limited in the complex, relational and ethically sensitive domain of palliative care. Proposing a model of human&amp;amp;ndash;AI complementarity, the article argues that technology should support rather than replace the emotional and relational aspects of care and identifies policy considerations for ethically grounded integration in resource-limited contexts.</description>
	<pubDate>2025-11-01</pubDate>

	<content:encoded><![CDATA[
	<p><b>Informatics, Vol. 12, Pages 120: Negotiating Human&amp;ndash;AI Complementarity in Geriatric and Palliative Care: A Qualitative Study of Healthcare Practitioners&amp;rsquo; Perspectives in Northeast China</b></p>
	<p>Informatics <a href="https://www.mdpi.com/2227-9709/12/4/120">doi: 10.3390/informatics12040120</a></p>
	<p>Authors:
		Chenyang Guo
		Chao Fang
		Wenbo Zhang
		John Troyer
		</p>
	<p>Artificial intelligence (AI) is becoming increasingly significant in healthcare around the world, especially in China, where rapid population ageing coincides with rising expectations for quality of life and a shrinking care workforce. This study explores Chinese health practitioners&amp;amp;rsquo; perspectives on using AI assistants in integrated geriatric and palliative care. Drawing on Actor&amp;amp;ndash;Network Theory, care is viewed as a network of interconnected human and non-human actors, including practitioners, technologies, patients and policies. Based in Northeast China, a region with structurally marginalised healthcare infrastructure, this article analyses qualitative interviews with 14 practitioners. Our findings reveal three key themes: (1) tensions between AI&amp;amp;rsquo;s rule-based logic and practitioners&amp;amp;rsquo; human-centred approach; (2) ethical discomfort with AI performing intimate or emotionally sensitive care, especially in end-of-life contexts; (3) structural inequalities, with weak policy and infrastructure limiting effective AI integration. The study highlights that AI offers clearer benefits for routine geriatric care, such as monitoring and basic symptom management, but its utility is far more limited in the complex, relational and ethically sensitive domain of palliative care. Proposing a model of human&amp;amp;ndash;AI complementarity, the article argues that technology should support rather than replace the emotional and relational aspects of care and identifies policy considerations for ethically grounded integration in resource-limited contexts.</p>
	]]></content:encoded>

	<dc:title>Negotiating Human&amp;amp;ndash;AI Complementarity in Geriatric and Palliative Care: A Qualitative Study of Healthcare Practitioners&amp;amp;rsquo; Perspectives in Northeast China</dc:title>
			<dc:creator>Chenyang Guo</dc:creator>
			<dc:creator>Chao Fang</dc:creator>
			<dc:creator>Wenbo Zhang</dc:creator>
			<dc:creator>John Troyer</dc:creator>
		<dc:identifier>doi: 10.3390/informatics12040120</dc:identifier>
	<dc:source>Informatics</dc:source>
	<dc:date>2025-11-01</dc:date>

	<prism:publicationName>Informatics</prism:publicationName>
	<prism:publicationDate>2025-11-01</prism:publicationDate>
	<prism:volume>12</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>120</prism:startingPage>
		<prism:doi>10.3390/informatics12040120</prism:doi>
	<prism:url>https://www.mdpi.com/2227-9709/12/4/120</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-9709/12/4/119">

	<title>Informatics, Vol. 12, Pages 119: Explainable AI for Clinical Decision Support Systems: Literature Review, Key Gaps, and Research Synthesis</title>
	<link>https://www.mdpi.com/2227-9709/12/4/119</link>
	<description>While Artificial Intelligence (AI) promises significant enhancements for Clinical Decision Support Systems (CDSSs), the opacity of many AI models remains a major barrier to clinical adoption, primarily due to interpretability and trust challenges. Explainable AI (XAI) seeks to bridge this gap by making model reasoning understandable to clinicians, but technical XAI solutions have too often failed to address real-world clinician needs, workflow integration, and usability concerns. This study synthesizes persistent challenges in applying XAI to CDSS&amp;amp;mdash;including mismatched explanation methods, suboptimal interface designs, and insufficient evaluation practices&amp;amp;mdash;and proposes a structured, user-centered framework to guide more effective and trustworthy XAI-CDSS development. Drawing on a comprehensive literature review, we detail a three-phase framework encompassing user-centered XAI method selection, interface co-design, and iterative evaluation and refinement. We demonstrate its application through a retrospective case study analysis of a published XAI-CDSS for sepsis care. Our synthesis highlights the importance of aligning XAI with clinical workflows, supporting calibrated trust, and deploying robust evaluation methodologies that capture real-world clinician&amp;amp;ndash;AI interaction patterns, such as negotiation. The case analysis shows how the framework can systematically identify and address user-centric gaps, leading to better workflow integration, tailored explanations, and more usable interfaces. We conclude that achieving trustworthy and clinically useful XAI-CDSS requires a fundamentally user-centered approach; our framework offers actionable guidance for creating explainable, usable, and trusted AI systems in healthcare.</description>
	<pubDate>2025-10-28</pubDate>

	<content:encoded><![CDATA[
	<p><b>Informatics, Vol. 12, Pages 119: Explainable AI for Clinical Decision Support Systems: Literature Review, Key Gaps, and Research Synthesis</b></p>
	<p>Informatics <a href="https://www.mdpi.com/2227-9709/12/4/119">doi: 10.3390/informatics12040119</a></p>
	<p>Authors:
		Mozhgan Salimparsa
		Kamran Sedig
		Daniel J. Lizotte
		Sheikh S. Abdullah
		Niaz Chalabianloo
		Flory T. Muanda
		</p>
	<p>While Artificial Intelligence (AI) promises significant enhancements for Clinical Decision Support Systems (CDSSs), the opacity of many AI models remains a major barrier to clinical adoption, primarily due to interpretability and trust challenges. Explainable AI (XAI) seeks to bridge this gap by making model reasoning understandable to clinicians, but technical XAI solutions have too often failed to address real-world clinician needs, workflow integration, and usability concerns. This study synthesizes persistent challenges in applying XAI to CDSS&amp;amp;mdash;including mismatched explanation methods, suboptimal interface designs, and insufficient evaluation practices&amp;amp;mdash;and proposes a structured, user-centered framework to guide more effective and trustworthy XAI-CDSS development. Drawing on a comprehensive literature review, we detail a three-phase framework encompassing user-centered XAI method selection, interface co-design, and iterative evaluation and refinement. We demonstrate its application through a retrospective case study analysis of a published XAI-CDSS for sepsis care. Our synthesis highlights the importance of aligning XAI with clinical workflows, supporting calibrated trust, and deploying robust evaluation methodologies that capture real-world clinician&amp;amp;ndash;AI interaction patterns, such as negotiation. The case analysis shows how the framework can systematically identify and address user-centric gaps, leading to better workflow integration, tailored explanations, and more usable interfaces. We conclude that achieving trustworthy and clinically useful XAI-CDSS requires a fundamentally user-centered approach; our framework offers actionable guidance for creating explainable, usable, and trusted AI systems in healthcare.</p>
	]]></content:encoded>

	<dc:title>Explainable AI for Clinical Decision Support Systems: Literature Review, Key Gaps, and Research Synthesis</dc:title>
			<dc:creator>Mozhgan Salimparsa</dc:creator>
			<dc:creator>Kamran Sedig</dc:creator>
			<dc:creator>Daniel J. Lizotte</dc:creator>
			<dc:creator>Sheikh S. Abdullah</dc:creator>
			<dc:creator>Niaz Chalabianloo</dc:creator>
			<dc:creator>Flory T. Muanda</dc:creator>
		<dc:identifier>doi: 10.3390/informatics12040119</dc:identifier>
	<dc:source>Informatics</dc:source>
	<dc:date>2025-10-28</dc:date>

	<prism:publicationName>Informatics</prism:publicationName>
	<prism:publicationDate>2025-10-28</prism:publicationDate>
	<prism:volume>12</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>119</prism:startingPage>
		<prism:doi>10.3390/informatics12040119</prism:doi>
	<prism:url>https://www.mdpi.com/2227-9709/12/4/119</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-9709/12/4/118">

	<title>Informatics, Vol. 12, Pages 118: Sentiment Analysis of Meme Images Using Deep Neural Network Based on Keypoint Representation</title>
	<link>https://www.mdpi.com/2227-9709/12/4/118</link>
	<description>Meme image sentiment analysis is a task of examining public opinion based on meme images posted on social media. In various fields, stakeholders often need to quickly and accurately determine the sentiment of memes from large amounts of available data. Therefore, innovation is needed in image pre-processing so that an increase in performance metrics, especially accuracy, can be obtained in improving the classification of meme image sentiment. This is because sentiment classification using human face datasets yields higher accuracy than using meme images. This research aims to develop a sentiment analysis model for meme images based on key points. The analyzed meme images contain human faces. The facial features extracted using key points are the eyebrows, eyes, and mouth. In the proposed method, key points of facial features are represented in the form of graphs, specifically directed graphs, weighted graphs, or weighted directed graphs. These graph representations of key points are then used to build a sentiment analysis model based on a Deep Neural Network (DNN) with three layers (hidden layer: i = 64, j = 64, k = 90). There are several contributions of this study, namely developing a human facial sentiment detection model using key points, representing key points as various graphs, and constructing a meme dataset with Indonesian text. The proposed model is evaluated using several metrics, namely accuracy, precision, recall, and F-1 score. Furthermore, a comparative analysis is conducted to evaluate the performance of the proposed model against existing approaches. The experimental results show that the proposed model, which utilized the directed graph representation of key points, obtained the highest accuracy at 83% and F1 score at 81%, respectively.</description>
	<pubDate>2025-10-28</pubDate>

	<content:encoded><![CDATA[
	<p><b>Informatics, Vol. 12, Pages 118: Sentiment Analysis of Meme Images Using Deep Neural Network Based on Keypoint Representation</b></p>
	<p>Informatics <a href="https://www.mdpi.com/2227-9709/12/4/118">doi: 10.3390/informatics12040118</a></p>
	<p>Authors:
		Endah Asmawati
		Ahmad Saikhu
		Daniel O. Siahaan
		</p>
	<p>Meme image sentiment analysis is a task of examining public opinion based on meme images posted on social media. In various fields, stakeholders often need to quickly and accurately determine the sentiment of memes from large amounts of available data. Therefore, innovation is needed in image pre-processing so that an increase in performance metrics, especially accuracy, can be obtained in improving the classification of meme image sentiment. This is because sentiment classification using human face datasets yields higher accuracy than using meme images. This research aims to develop a sentiment analysis model for meme images based on key points. The analyzed meme images contain human faces. The facial features extracted using key points are the eyebrows, eyes, and mouth. In the proposed method, key points of facial features are represented in the form of graphs, specifically directed graphs, weighted graphs, or weighted directed graphs. These graph representations of key points are then used to build a sentiment analysis model based on a Deep Neural Network (DNN) with three layers (hidden layer: i = 64, j = 64, k = 90). There are several contributions of this study, namely developing a human facial sentiment detection model using key points, representing key points as various graphs, and constructing a meme dataset with Indonesian text. The proposed model is evaluated using several metrics, namely accuracy, precision, recall, and F-1 score. Furthermore, a comparative analysis is conducted to evaluate the performance of the proposed model against existing approaches. The experimental results show that the proposed model, which utilized the directed graph representation of key points, obtained the highest accuracy at 83% and F1 score at 81%, respectively.</p>
	]]></content:encoded>

	<dc:title>Sentiment Analysis of Meme Images Using Deep Neural Network Based on Keypoint Representation</dc:title>
			<dc:creator>Endah Asmawati</dc:creator>
			<dc:creator>Ahmad Saikhu</dc:creator>
			<dc:creator>Daniel O. Siahaan</dc:creator>
		<dc:identifier>doi: 10.3390/informatics12040118</dc:identifier>
	<dc:source>Informatics</dc:source>
	<dc:date>2025-10-28</dc:date>

	<prism:publicationName>Informatics</prism:publicationName>
	<prism:publicationDate>2025-10-28</prism:publicationDate>
	<prism:volume>12</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>118</prism:startingPage>
		<prism:doi>10.3390/informatics12040118</prism:doi>
	<prism:url>https://www.mdpi.com/2227-9709/12/4/118</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-9709/12/4/117">

	<title>Informatics, Vol. 12, Pages 117: Hybrid Approach Using Dynamic Mode Decomposition and Wavelet Scattering Transform for EEG-Based Seizure Classification</title>
	<link>https://www.mdpi.com/2227-9709/12/4/117</link>
	<description>Epilepsy is a brain disorder that affects individuals; hence, preemptive diagnosis is required. Accurate classification of seizures is critical to optimize the treatment of epilepsy. Patients with epilepsy are unable to lead normal lives due to the unpredictable nature of seizures. Thus, developing new methods to help these patients can significantly improve their quality of life and result in huge financial savings for the healthcare industry. This paper presents a hybrid method integrating dynamic mode decomposition (DMD) and wavelet scattering transform (WST) for EEG-based seizure analysis. DMD allows for the breakdown of EEG signals into modes that catch the dynamical structures present in the EEG. Then, WST is applied as it is invariant to time-warping and computes robust hierarchical features at different timescales. DMD-WST combination provides an in-depth multi-scale analysis of the temporal structures present within the EEG data. This process improves the representation quality for feature extraction, which can convey dynamic modes and multi-scale frequency information for improved classification performance. The proposed hybrid approach is validated with three datasets, namely the CHB-MIT PhysioNet dataset, the Bern Barcelona dataset, and the Khas dataset, which can accurately distinguish the seizure and non-seizure states. The proposed method performed classification using different machine learning and deep learning methods, including support vector machine, random forest, k-nearest neighbours, booster algorithm, and bagging. These models were compared in terms of accuracy, precision, sensitivity, Cohen&amp;amp;rsquo;s kappa, and Matthew&amp;amp;rsquo;s correlation coefficient. The DMD-WST approach achieved a maximum accuracy of 99% and F1 score of 0.99 on the CHB-MIT dataset, and obtained 100% accuracy and F1 score of 1.00 on both the Bern Barcelona and Khas datasets, outperforming existing methods</description>
	<pubDate>2025-10-28</pubDate>

	<content:encoded><![CDATA[
	<p><b>Informatics, Vol. 12, Pages 117: Hybrid Approach Using Dynamic Mode Decomposition and Wavelet Scattering Transform for EEG-Based Seizure Classification</b></p>
	<p>Informatics <a href="https://www.mdpi.com/2227-9709/12/4/117">doi: 10.3390/informatics12040117</a></p>
	<p>Authors:
		Sreevidya C
		Neethu Mohan
		Sachin Kumar S
		Aravind Harikumar
		</p>
	<p>Epilepsy is a brain disorder that affects individuals; hence, preemptive diagnosis is required. Accurate classification of seizures is critical to optimize the treatment of epilepsy. Patients with epilepsy are unable to lead normal lives due to the unpredictable nature of seizures. Thus, developing new methods to help these patients can significantly improve their quality of life and result in huge financial savings for the healthcare industry. This paper presents a hybrid method integrating dynamic mode decomposition (DMD) and wavelet scattering transform (WST) for EEG-based seizure analysis. DMD allows for the breakdown of EEG signals into modes that catch the dynamical structures present in the EEG. Then, WST is applied as it is invariant to time-warping and computes robust hierarchical features at different timescales. DMD-WST combination provides an in-depth multi-scale analysis of the temporal structures present within the EEG data. This process improves the representation quality for feature extraction, which can convey dynamic modes and multi-scale frequency information for improved classification performance. The proposed hybrid approach is validated with three datasets, namely the CHB-MIT PhysioNet dataset, the Bern Barcelona dataset, and the Khas dataset, which can accurately distinguish the seizure and non-seizure states. The proposed method performed classification using different machine learning and deep learning methods, including support vector machine, random forest, k-nearest neighbours, booster algorithm, and bagging. These models were compared in terms of accuracy, precision, sensitivity, Cohen&amp;amp;rsquo;s kappa, and Matthew&amp;amp;rsquo;s correlation coefficient. The DMD-WST approach achieved a maximum accuracy of 99% and F1 score of 0.99 on the CHB-MIT dataset, and obtained 100% accuracy and F1 score of 1.00 on both the Bern Barcelona and Khas datasets, outperforming existing methods</p>
	]]></content:encoded>

	<dc:title>Hybrid Approach Using Dynamic Mode Decomposition and Wavelet Scattering Transform for EEG-Based Seizure Classification</dc:title>
			<dc:creator>Sreevidya C</dc:creator>
			<dc:creator>Neethu Mohan</dc:creator>
			<dc:creator>Sachin Kumar S</dc:creator>
			<dc:creator>Aravind Harikumar</dc:creator>
		<dc:identifier>doi: 10.3390/informatics12040117</dc:identifier>
	<dc:source>Informatics</dc:source>
	<dc:date>2025-10-28</dc:date>

	<prism:publicationName>Informatics</prism:publicationName>
	<prism:publicationDate>2025-10-28</prism:publicationDate>
	<prism:volume>12</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>117</prism:startingPage>
		<prism:doi>10.3390/informatics12040117</prism:doi>
	<prism:url>https://www.mdpi.com/2227-9709/12/4/117</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-9709/12/4/116">

	<title>Informatics, Vol. 12, Pages 116: Computational Analysis of Zingiber officinale Identifies GABAergic Signaling as a Potential Therapeutic Mechanism in Colorectal Cancer</title>
	<link>https://www.mdpi.com/2227-9709/12/4/116</link>
	<description>Colorectal cancer cases are on the rise and have become a leading cause of cancer-related deaths. Ginger (Zingiber officinale) is widely used in traditional herbal medicine and has been proposed as a potential treatment for colorectal cancer. This study aimed to explore the network pharmacology and pharmacodynamics of ginger in colorectal cancer treatment. Colorectal cancer patient data from the GEO dataset were analyzed to identify differentially expressed genes (DEGs). Six key components of ginger were selected based on specific criteria, and their target proteins were predicted using the TCMSP database. By overlapping DEGs with predicted targets, 36 candidate drug targets were identified. These targets were analyzed for biological alterations, pathway enrichment, protein&amp;amp;ndash;protein interactions, and hub-gene selection, integrating network pharmacology. Molecular docking simulations were conducted to confirm the binding interactions between ginger components and target proteins. The findings showed that GABAergic signaling and apoptosis were the most enriched pathways, suggesting their potential role in colorectal cancer treatment. Docking simulations further revealed that ginger&amp;amp;rsquo;s active compounds bind to COX2 and ESR1, indicating anti-inflammatory effects and modulation of estrogenic activity. This study provides insight into the systemic mechanisms of ginger in colorectal cancer treatment through an integrated &amp;amp;ldquo;drug&amp;amp;ndash;gene&amp;amp;ndash;pathway&amp;amp;ndash;disease&amp;amp;rdquo; network approach.</description>
	<pubDate>2025-10-24</pubDate>

	<content:encoded><![CDATA[
	<p><b>Informatics, Vol. 12, Pages 116: Computational Analysis of Zingiber officinale Identifies GABAergic Signaling as a Potential Therapeutic Mechanism in Colorectal Cancer</b></p>
	<p>Informatics <a href="https://www.mdpi.com/2227-9709/12/4/116">doi: 10.3390/informatics12040116</a></p>
	<p>Authors:
		Suthipong Chujan
		Nutsira Vajeethaveesin
		Jutamaad Satayavivad
		</p>
	<p>Colorectal cancer cases are on the rise and have become a leading cause of cancer-related deaths. Ginger (Zingiber officinale) is widely used in traditional herbal medicine and has been proposed as a potential treatment for colorectal cancer. This study aimed to explore the network pharmacology and pharmacodynamics of ginger in colorectal cancer treatment. Colorectal cancer patient data from the GEO dataset were analyzed to identify differentially expressed genes (DEGs). Six key components of ginger were selected based on specific criteria, and their target proteins were predicted using the TCMSP database. By overlapping DEGs with predicted targets, 36 candidate drug targets were identified. These targets were analyzed for biological alterations, pathway enrichment, protein&amp;amp;ndash;protein interactions, and hub-gene selection, integrating network pharmacology. Molecular docking simulations were conducted to confirm the binding interactions between ginger components and target proteins. The findings showed that GABAergic signaling and apoptosis were the most enriched pathways, suggesting their potential role in colorectal cancer treatment. Docking simulations further revealed that ginger&amp;amp;rsquo;s active compounds bind to COX2 and ESR1, indicating anti-inflammatory effects and modulation of estrogenic activity. This study provides insight into the systemic mechanisms of ginger in colorectal cancer treatment through an integrated &amp;amp;ldquo;drug&amp;amp;ndash;gene&amp;amp;ndash;pathway&amp;amp;ndash;disease&amp;amp;rdquo; network approach.</p>
	]]></content:encoded>

	<dc:title>Computational Analysis of Zingiber officinale Identifies GABAergic Signaling as a Potential Therapeutic Mechanism in Colorectal Cancer</dc:title>
			<dc:creator>Suthipong Chujan</dc:creator>
			<dc:creator>Nutsira Vajeethaveesin</dc:creator>
			<dc:creator>Jutamaad Satayavivad</dc:creator>
		<dc:identifier>doi: 10.3390/informatics12040116</dc:identifier>
	<dc:source>Informatics</dc:source>
	<dc:date>2025-10-24</dc:date>

	<prism:publicationName>Informatics</prism:publicationName>
	<prism:publicationDate>2025-10-24</prism:publicationDate>
	<prism:volume>12</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>116</prism:startingPage>
		<prism:doi>10.3390/informatics12040116</prism:doi>
	<prism:url>https://www.mdpi.com/2227-9709/12/4/116</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-9709/12/4/115">

	<title>Informatics, Vol. 12, Pages 115: Efficient Wearable Sensor-Based Activity Recognition for Human&amp;ndash;Robot Collaboration in Agricultural Environments</title>
	<link>https://www.mdpi.com/2227-9709/12/4/115</link>
	<description>This study focuses on human awareness, a critical component in human&amp;amp;ndash;robot interaction, particularly within agricultural environments where interactions are enriched by complex contextual information. The main objective is identifying human activities occurring during collaborative harvesting tasks involving humans and robots. To achieve this, we propose a novel and lightweight deep learning model, named 1D-ResNeXt, designed explicitly for recognizing activities in agriculture-related human&amp;amp;ndash;robot collaboration. The model is built as an end-to-end architecture incorporating feature fusion and a multi-kernel convolutional block strategy. It utilizes residual connections and a split&amp;amp;ndash;transform&amp;amp;ndash;merge mechanism to mitigate performance degradation and reduce model complexity by limiting the number of trainable parameters. Sensor data were collected from twenty individuals with five wearable devices placed on different body parts. Each sensor was embedded with tri-axial accelerometers, gyroscopes, and magnetometers. Under real field conditions, the participants performed several sub-tasks commonly associated with agricultural labor, such as lifting and carrying loads. Before classification, the raw sensor signals were pre-processed to eliminate noise. The cleaned time-series data were then input into the proposed deep learning network for sequential pattern recognition. Experimental results showed that the chest-mounted sensor achieved the highest F1-score of 99.86%, outperforming other sensor placements and combinations. An analysis of temporal window sizes (0.5, 1.0, 1.5, and 2.0 s) demonstrated that the 0.5 s window provided the best recognition performance, indicating that key activity features in agriculture can be captured over short intervals. Moreover, a comprehensive evaluation of sensor modalities revealed that multimodal fusion of accelerometer, gyroscope, and magnetometer data yielded the best accuracy at 99.92%. The combination of accelerometer and gyroscope data offered an optimal compromise, achieving 99.49% accuracy while maintaining lower system complexity. These findings highlight the importance of strategic sensor placement and data fusion in enhancing activity recognition performance while reducing the need for extensive data and computational resources. This work contributes to developing intelligent, efficient, and adaptive collaborative systems, offering promising applications in agriculture and beyond, with improved safety, cost-efficiency, and real-time operational capability.</description>
	<pubDate>2025-10-23</pubDate>

	<content:encoded><![CDATA[
	<p><b>Informatics, Vol. 12, Pages 115: Efficient Wearable Sensor-Based Activity Recognition for Human&amp;ndash;Robot Collaboration in Agricultural Environments</b></p>
	<p>Informatics <a href="https://www.mdpi.com/2227-9709/12/4/115">doi: 10.3390/informatics12040115</a></p>
	<p>Authors:
		Sakorn Mekruksavanich
		Anuchit Jitpattanakul
		</p>
	<p>This study focuses on human awareness, a critical component in human&amp;amp;ndash;robot interaction, particularly within agricultural environments where interactions are enriched by complex contextual information. The main objective is identifying human activities occurring during collaborative harvesting tasks involving humans and robots. To achieve this, we propose a novel and lightweight deep learning model, named 1D-ResNeXt, designed explicitly for recognizing activities in agriculture-related human&amp;amp;ndash;robot collaboration. The model is built as an end-to-end architecture incorporating feature fusion and a multi-kernel convolutional block strategy. It utilizes residual connections and a split&amp;amp;ndash;transform&amp;amp;ndash;merge mechanism to mitigate performance degradation and reduce model complexity by limiting the number of trainable parameters. Sensor data were collected from twenty individuals with five wearable devices placed on different body parts. Each sensor was embedded with tri-axial accelerometers, gyroscopes, and magnetometers. Under real field conditions, the participants performed several sub-tasks commonly associated with agricultural labor, such as lifting and carrying loads. Before classification, the raw sensor signals were pre-processed to eliminate noise. The cleaned time-series data were then input into the proposed deep learning network for sequential pattern recognition. Experimental results showed that the chest-mounted sensor achieved the highest F1-score of 99.86%, outperforming other sensor placements and combinations. An analysis of temporal window sizes (0.5, 1.0, 1.5, and 2.0 s) demonstrated that the 0.5 s window provided the best recognition performance, indicating that key activity features in agriculture can be captured over short intervals. Moreover, a comprehensive evaluation of sensor modalities revealed that multimodal fusion of accelerometer, gyroscope, and magnetometer data yielded the best accuracy at 99.92%. The combination of accelerometer and gyroscope data offered an optimal compromise, achieving 99.49% accuracy while maintaining lower system complexity. These findings highlight the importance of strategic sensor placement and data fusion in enhancing activity recognition performance while reducing the need for extensive data and computational resources. This work contributes to developing intelligent, efficient, and adaptive collaborative systems, offering promising applications in agriculture and beyond, with improved safety, cost-efficiency, and real-time operational capability.</p>
	]]></content:encoded>

	<dc:title>Efficient Wearable Sensor-Based Activity Recognition for Human&amp;amp;ndash;Robot Collaboration in Agricultural Environments</dc:title>
			<dc:creator>Sakorn Mekruksavanich</dc:creator>
			<dc:creator>Anuchit Jitpattanakul</dc:creator>
		<dc:identifier>doi: 10.3390/informatics12040115</dc:identifier>
	<dc:source>Informatics</dc:source>
	<dc:date>2025-10-23</dc:date>

	<prism:publicationName>Informatics</prism:publicationName>
	<prism:publicationDate>2025-10-23</prism:publicationDate>
	<prism:volume>12</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>115</prism:startingPage>
		<prism:doi>10.3390/informatics12040115</prism:doi>
	<prism:url>https://www.mdpi.com/2227-9709/12/4/115</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2227-9709/12/4/114">

	<title>Informatics, Vol. 12, Pages 114: Leveraging Transformer with Self-Attention for Multi-Label Emotion Classification in Crisis Tweets</title>
	<link>https://www.mdpi.com/2227-9709/12/4/114</link>
	<description>Social media platforms have become a widely used medium for individuals to express complex and multifaceted emotions. Traditional single-label emotion classification methods fall short in accurately capturing the simultaneous presence of multiple emotions within these texts. To address this limitation, we propose a classification model that enhances the pre-trained Cardiff NLP transformer by integrating additional self-attention layers. Experimental results show our approach achieves a micro-F1 score of 0.7208, a macro-F1 score of 0.6192, and an average Jaccard index of 0.6066, which is an overall improvement of approximately 3.00% compared to the baseline. We apply this model to a real-world dataset of tweets related to the 2011 Christchurch earthquakes as a case study to demonstrate its ability to capture multi-category emotional expressions and detect co-occurring emotions that single-label approaches would miss. Our analysis revealed distinct emotional patterns aligned with key seismic events, including overlapping positive and negative emotions, and temporal dynamics of emotional response. This work contributes a robust method for fine-grained emotion analysis which can aid disaster response, mental health monitoring and social research.</description>
	<pubDate>2025-10-22</pubDate>

	<content:encoded><![CDATA[
	<p><b>Informatics, Vol. 12, Pages 114: Leveraging Transformer with Self-Attention for Multi-Label Emotion Classification in Crisis Tweets</b></p>
	<p>Informatics <a href="https://www.mdpi.com/2227-9709/12/4/114">doi: 10.3390/informatics12040114</a></p>
	<p>Authors:
		Patricia Anthony
		Jing Zhou
		</p>
	<p>Social media platforms have become a widely used medium for individuals to express complex and multifaceted emotions. Traditional single-label emotion classification methods fall short in accurately capturing the simultaneous presence of multiple emotions within these texts. To address this limitation, we propose a classification model that enhances the pre-trained Cardiff NLP transformer by integrating additional self-attention layers. Experimental results show our approach achieves a micro-F1 score of 0.7208, a macro-F1 score of 0.6192, and an average Jaccard index of 0.6066, which is an overall improvement of approximately 3.00% compared to the baseline. We apply this model to a real-world dataset of tweets related to the 2011 Christchurch earthquakes as a case study to demonstrate its ability to capture multi-category emotional expressions and detect co-occurring emotions that single-label approaches would miss. Our analysis revealed distinct emotional patterns aligned with key seismic events, including overlapping positive and negative emotions, and temporal dynamics of emotional response. This work contributes a robust method for fine-grained emotion analysis which can aid disaster response, mental health monitoring and social research.</p>
	]]></content:encoded>

	<dc:title>Leveraging Transformer with Self-Attention for Multi-Label Emotion Classification in Crisis Tweets</dc:title>
			<dc:creator>Patricia Anthony</dc:creator>
			<dc:creator>Jing Zhou</dc:creator>
		<dc:identifier>doi: 10.3390/informatics12040114</dc:identifier>
	<dc:source>Informatics</dc:source>
	<dc:date>2025-10-22</dc:date>

	<prism:publicationName>Informatics</prism:publicationName>
	<prism:publicationDate>2025-10-22</prism:publicationDate>
	<prism:volume>12</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>114</prism:startingPage>
		<prism:doi>10.3390/informatics12040114</prism:doi>
	<prism:url>https://www.mdpi.com/2227-9709/12/4/114</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
    
<cc:License rdf:about="https://creativecommons.org/licenses/by/4.0/">
	<cc:permits rdf:resource="https://creativecommons.org/ns#Reproduction" />
	<cc:permits rdf:resource="https://creativecommons.org/ns#Distribution" />
	<cc:permits rdf:resource="https://creativecommons.org/ns#DerivativeWorks" />
</cc:License>

</rdf:RDF>
