<?xml version="1.0" encoding="UTF-8"?>
<rdf:RDF xmlns="http://purl.org/rss/1.0/"
 xmlns:dc="http://purl.org/dc/elements/1.1/"
 xmlns:dcterms="http://purl.org/dc/terms/"
 xmlns:cc="http://web.resource.org/cc/"
 xmlns:prism="http://prismstandard.org/namespaces/basic/2.0/"
 xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
 xmlns:admin="http://webns.net/mvcb/"
 xmlns:content="http://purl.org/rss/1.0/modules/content/">
    <channel rdf:about="https://www.mdpi.com/rss/journal/jemr">
		<title>Journal of Eye Movement Research</title>
		<description>Latest open access articles published in J. Eye Mov. Res. at https://www.mdpi.com/journal/jemr</description>
		<link>https://www.mdpi.com/journal/jemr</link>
		<admin:generatorAgent rdf:resource="https://www.mdpi.com/journal/jemr"/>
		<admin:errorReportsTo rdf:resource="mailto:support@mdpi.com"/>
		<dc:publisher>MDPI</dc:publisher>
		<dc:language>en</dc:language>
		<dc:rights>Creative Commons Attribution (CC-BY)</dc:rights>
						<prism:copyright>MDPI</prism:copyright>
		<prism:rightsAgent>support@mdpi.com</prism:rightsAgent>
		<image rdf:resource="https://pub.mdpi-res.com/img/design/mdpi-pub-logo.png?13cf3b5bd783e021?1778841406"/>
				<items>
			<rdf:Seq>
            				<rdf:li rdf:resource="https://www.mdpi.com/1995-8692/19/3/55" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1995-8692/19/3/54" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1995-8692/19/3/53" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1995-8692/19/3/52" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1995-8692/19/3/51" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1995-8692/19/3/50" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1995-8692/19/3/49" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1995-8692/19/3/48" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1995-8692/19/3/47" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1995-8692/19/3/46" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1995-8692/19/3/45" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1995-8692/19/3/44" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1995-8692/19/3/43" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1995-8692/19/3/42" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1995-8692/19/2/41" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1995-8692/19/2/40" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1995-8692/19/2/39" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1995-8692/19/2/38" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1995-8692/19/2/37" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1995-8692/19/2/36" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1995-8692/19/2/35" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1995-8692/19/2/34" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1995-8692/19/2/33" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1995-8692/19/2/32" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1995-8692/19/2/31" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1995-8692/19/2/30" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1995-8692/19/2/29" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1995-8692/19/2/28" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1995-8692/19/2/27" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1995-8692/19/2/26" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1995-8692/19/2/25" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1995-8692/19/2/24" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1995-8692/19/1/23" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1995-8692/19/1/22" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1995-8692/19/1/21" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1995-8692/19/1/20" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1995-8692/19/1/19" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1995-8692/19/1/18" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1995-8692/19/1/17" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1995-8692/19/1/16" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1995-8692/19/1/15" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1995-8692/19/1/14" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1995-8692/19/1/13" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1995-8692/19/1/12" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1995-8692/19/1/11" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1995-8692/19/1/10" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1995-8692/19/1/9" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1995-8692/19/1/8" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1995-8692/19/1/7" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1995-8692/19/1/6" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1995-8692/19/1/5" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1995-8692/19/1/4" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1995-8692/19/1/3" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1995-8692/19/1/2" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1995-8692/19/1/1" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1995-8692/18/6/77" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1995-8692/18/6/76" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1995-8692/18/6/75" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1995-8692/18/6/74" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1995-8692/18/6/73" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1995-8692/18/6/72" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1995-8692/18/6/71" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1995-8692/18/6/70" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1995-8692/18/6/69" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1995-8692/18/6/68" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1995-8692/18/6/67" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1995-8692/18/6/66" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1995-8692/18/6/65" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1995-8692/18/6/64" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1995-8692/18/6/63" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1995-8692/18/6/62" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1995-8692/18/6/61" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1995-8692/18/6/60" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1995-8692/18/5/59" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1995-8692/18/5/58" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1995-8692/18/5/57" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1995-8692/18/5/56" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1995-8692/18/5/55" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1995-8692/18/5/54" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1995-8692/18/5/53" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1995-8692/18/5/52" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1995-8692/18/5/51" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1995-8692/18/5/50" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1995-8692/18/5/49" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1995-8692/18/5/48" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1995-8692/18/5/47" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1995-8692/18/5/46" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1995-8692/18/5/45" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1995-8692/18/5/44" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1995-8692/18/5/43" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1995-8692/18/5/42" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1995-8692/18/5/41" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1995-8692/18/5/40" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1995-8692/18/5/39" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1995-8692/18/5/38" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1995-8692/18/5/37" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1995-8692/18/4/36" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1995-8692/18/4/35" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1995-8692/18/4/34" />
            				<rdf:li rdf:resource="https://www.mdpi.com/1995-8692/18/4/33" />
                    	</rdf:Seq>
		</items>
				<cc:license rdf:resource="https://creativecommons.org/licenses/by/4.0/" />
	</channel>

        <item rdf:about="https://www.mdpi.com/1995-8692/19/3/55">

	<title>JEMR, Vol. 19, Pages 55: Eye-Tracking Evidence That Verifiable Explanations Support Visual Evidence Checking in AI-Assisted Chest Radiograph Interpretation</title>
	<link>https://www.mdpi.com/1995-8692/19/3/55</link>
	<description>Evaluations of medical artificial intelligence (AI) explanations often rely on self-reported trust, perceived usefulness, acceptance, or final decision outcomes, while less directly characterizing whether users check evidence around AI outputs during decision making. In AI-assisted chest radiograph interpretation, a critical process-level question is whether users return from the AI output to the original image evidence when further scrutiny is needed. To address this question, we examined whether verifiable explanations&amp;amp;mdash;explanations designed to make AI recommendations checkable against the original image evidence&amp;amp;mdash;are associated with process markers of visual evidence checking in AI-assisted chest radiograph interpretation using eye-tracking and human-factors process measures. A 2 &amp;amp;times; 2 between-subjects experiment manipulated verifiable explanations (present vs. absent) and risk context (high vs. low), with AI recommendation correctness embedded at the trial level. Fifty-six clinically trained participants each completed 24 interpretation trials. Analyses focused primarily on gaze transitions between the AI output and the original image and dwell time on the original image, with response time and exploratory verification-related behavioral states used as auxiliary process measures. Verifiable explanations did not simply increase acceptance of AI recommendations. Instead, when AI recommendations were incorrect, they were most clearly associated with more frequent AI&amp;amp;ndash;image transitions and longer absolute dwell time on the original image evidence. Exploratory state-based analyses further suggested a lower tendency toward no-verify adopt under incorrect AI recommendations, but these findings were treated as complementary rather than primary evidence. Overall, the value of verifiable explanations lies not only in final decisions but in whether they make AI recommendations more inspectable against the original evidence. These findings provide eye-tracking evidence consistent with visual evidence checking in AI-assisted diagnostic interfaces and underscore the value of process-sensitive human-factors measures in medical AI evaluation.</description>
	<pubDate>2026-05-15</pubDate>

	<content:encoded><![CDATA[
	<p><b>JEMR, Vol. 19, Pages 55: Eye-Tracking Evidence That Verifiable Explanations Support Visual Evidence Checking in AI-Assisted Chest Radiograph Interpretation</b></p>
	<p>Journal of Eye Movement Research <a href="https://www.mdpi.com/1995-8692/19/3/55">doi: 10.3390/jemr19030055</a></p>
	<p>Authors:
		Yong Han
		Wumin Ouyang
		Hemin Du
		Mengyun Ma
		Guanning Wang
		</p>
	<p>Evaluations of medical artificial intelligence (AI) explanations often rely on self-reported trust, perceived usefulness, acceptance, or final decision outcomes, while less directly characterizing whether users check evidence around AI outputs during decision making. In AI-assisted chest radiograph interpretation, a critical process-level question is whether users return from the AI output to the original image evidence when further scrutiny is needed. To address this question, we examined whether verifiable explanations&amp;amp;mdash;explanations designed to make AI recommendations checkable against the original image evidence&amp;amp;mdash;are associated with process markers of visual evidence checking in AI-assisted chest radiograph interpretation using eye-tracking and human-factors process measures. A 2 &amp;amp;times; 2 between-subjects experiment manipulated verifiable explanations (present vs. absent) and risk context (high vs. low), with AI recommendation correctness embedded at the trial level. Fifty-six clinically trained participants each completed 24 interpretation trials. Analyses focused primarily on gaze transitions between the AI output and the original image and dwell time on the original image, with response time and exploratory verification-related behavioral states used as auxiliary process measures. Verifiable explanations did not simply increase acceptance of AI recommendations. Instead, when AI recommendations were incorrect, they were most clearly associated with more frequent AI&amp;amp;ndash;image transitions and longer absolute dwell time on the original image evidence. Exploratory state-based analyses further suggested a lower tendency toward no-verify adopt under incorrect AI recommendations, but these findings were treated as complementary rather than primary evidence. Overall, the value of verifiable explanations lies not only in final decisions but in whether they make AI recommendations more inspectable against the original evidence. These findings provide eye-tracking evidence consistent with visual evidence checking in AI-assisted diagnostic interfaces and underscore the value of process-sensitive human-factors measures in medical AI evaluation.</p>
	]]></content:encoded>

	<dc:title>Eye-Tracking Evidence That Verifiable Explanations Support Visual Evidence Checking in AI-Assisted Chest Radiograph Interpretation</dc:title>
			<dc:creator>Yong Han</dc:creator>
			<dc:creator>Wumin Ouyang</dc:creator>
			<dc:creator>Hemin Du</dc:creator>
			<dc:creator>Mengyun Ma</dc:creator>
			<dc:creator>Guanning Wang</dc:creator>
		<dc:identifier>doi: 10.3390/jemr19030055</dc:identifier>
	<dc:source>Journal of Eye Movement Research</dc:source>
	<dc:date>2026-05-15</dc:date>

	<prism:publicationName>Journal of Eye Movement Research</prism:publicationName>
	<prism:publicationDate>2026-05-15</prism:publicationDate>
	<prism:volume>19</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>55</prism:startingPage>
		<prism:doi>10.3390/jemr19030055</prism:doi>
	<prism:url>https://www.mdpi.com/1995-8692/19/3/55</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1995-8692/19/3/54">

	<title>JEMR, Vol. 19, Pages 54: Cognitive Mechanisms of Predictive Processing in Chinese Reading: An Eye-Movement Analysis Based on the Ex-Gaussian Distribution</title>
	<link>https://www.mdpi.com/1995-8692/19/3/54</link>
	<description>This study employed the Ex-Gaussian distribution model to analyse eye-tracking data, to elucidate the cognitive mechanisms underlying predictive processing during Chinese reading. Using a single-factor, two-level within-subjects design (contextual predictability: high vs. low), data from 32 adult readers were analysed across the pre-target and target word regions. The results revealed that predictive reading follows a three-stage cognitive model. In the expectation generation stage (pre-target region), a significant negative &amp;amp;tau; effect indicated resource pre-allocation driven by strong contextual constraints, thereby facilitating the construction of predictive lexical representations. In the verification and integration stage (target word region), a significant negative &amp;amp;mu; effect in the later measurement window indicated that successful prediction&amp;amp;ndash;input matching accelerated lexical identification and enhanced integration efficiency; the &amp;amp;sigma; parameter did not reach significance in either measurement window. In the conflict resolution stage (pre-target and target word regions), a significant positive &amp;amp;tau; effect indicated that verification failure triggered lexical activation competition at the target word, driving regressive fixations to the pre-target region for contextual reanalysis; conflict resolution costs were markedly higher under the low-predictability condition, owing to the absence of a dominant activation anchor. These findings suggest that contextual predictability influences reading through a dual mechanism: the &amp;amp;mu; parameter modulates the automatic processing speed of lexical identification, whereas the &amp;amp;tau; parameter regulates the cognitive control processes underlying expectation generation and conflict resolution. Together, these results provide empirical support for the integration of predictive coding theory and cognitive control frameworks.</description>
	<pubDate>2026-05-15</pubDate>

	<content:encoded><![CDATA[
	<p><b>JEMR, Vol. 19, Pages 54: Cognitive Mechanisms of Predictive Processing in Chinese Reading: An Eye-Movement Analysis Based on the Ex-Gaussian Distribution</b></p>
	<p>Journal of Eye Movement Research <a href="https://www.mdpi.com/1995-8692/19/3/54">doi: 10.3390/jemr19030054</a></p>
	<p>Authors:
		Wen Tong
		Xiaojiao Li
		Yingdi Liu
		Zhifang Liu
		</p>
	<p>This study employed the Ex-Gaussian distribution model to analyse eye-tracking data, to elucidate the cognitive mechanisms underlying predictive processing during Chinese reading. Using a single-factor, two-level within-subjects design (contextual predictability: high vs. low), data from 32 adult readers were analysed across the pre-target and target word regions. The results revealed that predictive reading follows a three-stage cognitive model. In the expectation generation stage (pre-target region), a significant negative &amp;amp;tau; effect indicated resource pre-allocation driven by strong contextual constraints, thereby facilitating the construction of predictive lexical representations. In the verification and integration stage (target word region), a significant negative &amp;amp;mu; effect in the later measurement window indicated that successful prediction&amp;amp;ndash;input matching accelerated lexical identification and enhanced integration efficiency; the &amp;amp;sigma; parameter did not reach significance in either measurement window. In the conflict resolution stage (pre-target and target word regions), a significant positive &amp;amp;tau; effect indicated that verification failure triggered lexical activation competition at the target word, driving regressive fixations to the pre-target region for contextual reanalysis; conflict resolution costs were markedly higher under the low-predictability condition, owing to the absence of a dominant activation anchor. These findings suggest that contextual predictability influences reading through a dual mechanism: the &amp;amp;mu; parameter modulates the automatic processing speed of lexical identification, whereas the &amp;amp;tau; parameter regulates the cognitive control processes underlying expectation generation and conflict resolution. Together, these results provide empirical support for the integration of predictive coding theory and cognitive control frameworks.</p>
	]]></content:encoded>

	<dc:title>Cognitive Mechanisms of Predictive Processing in Chinese Reading: An Eye-Movement Analysis Based on the Ex-Gaussian Distribution</dc:title>
			<dc:creator>Wen Tong</dc:creator>
			<dc:creator>Xiaojiao Li</dc:creator>
			<dc:creator>Yingdi Liu</dc:creator>
			<dc:creator>Zhifang Liu</dc:creator>
		<dc:identifier>doi: 10.3390/jemr19030054</dc:identifier>
	<dc:source>Journal of Eye Movement Research</dc:source>
	<dc:date>2026-05-15</dc:date>

	<prism:publicationName>Journal of Eye Movement Research</prism:publicationName>
	<prism:publicationDate>2026-05-15</prism:publicationDate>
	<prism:volume>19</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>54</prism:startingPage>
		<prism:doi>10.3390/jemr19030054</prism:doi>
	<prism:url>https://www.mdpi.com/1995-8692/19/3/54</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1995-8692/19/3/53">

	<title>JEMR, Vol. 19, Pages 53: Diagnostic Criteria for Convergence Excess: Diagnostic Validity of Clinical Signs Associated with Near Esophoria</title>
	<link>https://www.mdpi.com/1995-8692/19/3/53</link>
	<description>To propose which tests may be used for diagnosing convergence excess. A prospective study of a consecutive clinical sample was performed. Patients (18&amp;amp;ndash;35 years) attending optometric care underwent subjective refraction, cover test, and Symptom Questionnaire for Visual Dysfunctions (SQVD). Based on cover test and SQVD scores, two groups were recruited: 64 symptomatic subjects with near esophoria and 64 asymptomatic with normal binocular vision. Accommodative and binocular tests were assessed, identifying those with significant statistical differences between groups. Diagnostic validity was analysed using ROC curves, sensitivity, specificity, and likelihood ratios. A serial testing strategy combining tests was also evaluated. ROC analysis showed best diagnostic accuracy for binocular accommodative facility (BAF) failing with &amp;amp;minus;2.00 D (area under the curve, AUC = 0.865) and vergence facility (VF) failing with base-in prisms (AUC = 0.864). Using cutoffs from ROC analysis (BAF: &amp;amp;le;8.25 cpm and VF &amp;amp;le; 12.75 cpm), their combination showed best validity (S = 0.625, Sp = 0.938, LR+ = 10, LR&amp;amp;minus; = 0.4). The combined AUC was 0.932. The proposal for diagnosing convergence excess is to use, in addition to near esophoria with normal distance heterophoria, the combination of failing BAF with negative lenses and failing vergence facility with base-in prisms.</description>
	<pubDate>2026-05-14</pubDate>

	<content:encoded><![CDATA[
	<p><b>JEMR, Vol. 19, Pages 53: Diagnostic Criteria for Convergence Excess: Diagnostic Validity of Clinical Signs Associated with Near Esophoria</b></p>
	<p>Journal of Eye Movement Research <a href="https://www.mdpi.com/1995-8692/19/3/53">doi: 10.3390/jemr19030053</a></p>
	<p>Authors:
		Pilar Cacho-Martínez
		Mario Cantó-Cerdán
		Zaíra Cervera-Sánchez
		Ángel García-Muñoz
		</p>
	<p>To propose which tests may be used for diagnosing convergence excess. A prospective study of a consecutive clinical sample was performed. Patients (18&amp;amp;ndash;35 years) attending optometric care underwent subjective refraction, cover test, and Symptom Questionnaire for Visual Dysfunctions (SQVD). Based on cover test and SQVD scores, two groups were recruited: 64 symptomatic subjects with near esophoria and 64 asymptomatic with normal binocular vision. Accommodative and binocular tests were assessed, identifying those with significant statistical differences between groups. Diagnostic validity was analysed using ROC curves, sensitivity, specificity, and likelihood ratios. A serial testing strategy combining tests was also evaluated. ROC analysis showed best diagnostic accuracy for binocular accommodative facility (BAF) failing with &amp;amp;minus;2.00 D (area under the curve, AUC = 0.865) and vergence facility (VF) failing with base-in prisms (AUC = 0.864). Using cutoffs from ROC analysis (BAF: &amp;amp;le;8.25 cpm and VF &amp;amp;le; 12.75 cpm), their combination showed best validity (S = 0.625, Sp = 0.938, LR+ = 10, LR&amp;amp;minus; = 0.4). The combined AUC was 0.932. The proposal for diagnosing convergence excess is to use, in addition to near esophoria with normal distance heterophoria, the combination of failing BAF with negative lenses and failing vergence facility with base-in prisms.</p>
	]]></content:encoded>

	<dc:title>Diagnostic Criteria for Convergence Excess: Diagnostic Validity of Clinical Signs Associated with Near Esophoria</dc:title>
			<dc:creator>Pilar Cacho-Martínez</dc:creator>
			<dc:creator>Mario Cantó-Cerdán</dc:creator>
			<dc:creator>Zaíra Cervera-Sánchez</dc:creator>
			<dc:creator>Ángel García-Muñoz</dc:creator>
		<dc:identifier>doi: 10.3390/jemr19030053</dc:identifier>
	<dc:source>Journal of Eye Movement Research</dc:source>
	<dc:date>2026-05-14</dc:date>

	<prism:publicationName>Journal of Eye Movement Research</prism:publicationName>
	<prism:publicationDate>2026-05-14</prism:publicationDate>
	<prism:volume>19</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>53</prism:startingPage>
		<prism:doi>10.3390/jemr19030053</prism:doi>
	<prism:url>https://www.mdpi.com/1995-8692/19/3/53</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1995-8692/19/3/52">

	<title>JEMR, Vol. 19, Pages 52: Effects of Word Frequency, Word Length, and Visual Complexity on Chinese Sentence Oral Reading: An Eye Movement Comparison Study Between Children and Adults</title>
	<link>https://www.mdpi.com/1995-8692/19/3/52</link>
	<description>This study investigated how word frequency, word length and visual complexity affect lexical processing during Chinese sentence oral reading, and whether these effects differ between developing and skilled readers. Third-grade children and adults read sentences aloud while their eye movements were recorded with an EyeLink 1000 Plus eye-tracker. Linear mixed-effects models revealed three main findings. First, children showed larger word-frequency and visual-complexity effects than adults, indicating less efficient lexical processing in developing readers. Second, word length moderated the effects of word frequency and visual complexity. Frequency effects were amplified for two-character words, whereas visual-complexity effects were stronger for single-character words on early measures and followed a different pattern on some late measures. Third, at the sentence level, children exhibited shorter forward saccades, more regressions and longer total reading times than adults. These findings provide developmental evidence for the visual and linguistic constraints hypothesis and show how visual recognition and overt phonological output jointly shape foveal lexical processing in Chinese oral reading.</description>
	<pubDate>2026-05-13</pubDate>

	<content:encoded><![CDATA[
	<p><b>JEMR, Vol. 19, Pages 52: Effects of Word Frequency, Word Length, and Visual Complexity on Chinese Sentence Oral Reading: An Eye Movement Comparison Study Between Children and Adults</b></p>
	<p>Journal of Eye Movement Research <a href="https://www.mdpi.com/1995-8692/19/3/52">doi: 10.3390/jemr19030052</a></p>
	<p>Authors:
		Kunyu Lian
		Junhui Pei
		Feifei Liang
		Jie Ma
		Rong Lian
		Xuejun Bai
		</p>
	<p>This study investigated how word frequency, word length and visual complexity affect lexical processing during Chinese sentence oral reading, and whether these effects differ between developing and skilled readers. Third-grade children and adults read sentences aloud while their eye movements were recorded with an EyeLink 1000 Plus eye-tracker. Linear mixed-effects models revealed three main findings. First, children showed larger word-frequency and visual-complexity effects than adults, indicating less efficient lexical processing in developing readers. Second, word length moderated the effects of word frequency and visual complexity. Frequency effects were amplified for two-character words, whereas visual-complexity effects were stronger for single-character words on early measures and followed a different pattern on some late measures. Third, at the sentence level, children exhibited shorter forward saccades, more regressions and longer total reading times than adults. These findings provide developmental evidence for the visual and linguistic constraints hypothesis and show how visual recognition and overt phonological output jointly shape foveal lexical processing in Chinese oral reading.</p>
	]]></content:encoded>

	<dc:title>Effects of Word Frequency, Word Length, and Visual Complexity on Chinese Sentence Oral Reading: An Eye Movement Comparison Study Between Children and Adults</dc:title>
			<dc:creator>Kunyu Lian</dc:creator>
			<dc:creator>Junhui Pei</dc:creator>
			<dc:creator>Feifei Liang</dc:creator>
			<dc:creator>Jie Ma</dc:creator>
			<dc:creator>Rong Lian</dc:creator>
			<dc:creator>Xuejun Bai</dc:creator>
		<dc:identifier>doi: 10.3390/jemr19030052</dc:identifier>
	<dc:source>Journal of Eye Movement Research</dc:source>
	<dc:date>2026-05-13</dc:date>

	<prism:publicationName>Journal of Eye Movement Research</prism:publicationName>
	<prism:publicationDate>2026-05-13</prism:publicationDate>
	<prism:volume>19</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>52</prism:startingPage>
		<prism:doi>10.3390/jemr19030052</prism:doi>
	<prism:url>https://www.mdpi.com/1995-8692/19/3/52</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1995-8692/19/3/51">

	<title>JEMR, Vol. 19, Pages 51: Eye Movement Patterns as Robust Biomarkers for Schizophrenia Identification Using a Novel Data Transformation Approach</title>
	<link>https://www.mdpi.com/1995-8692/19/3/51</link>
	<description>Although eye movement abnormalities are documented in schizophrenia (SZ), their translation into objective diagnostic biomarkers remains limited. In this study, we propose a novel identification framework that integrates a Sparsity-Scoring Kernel Entropy Component Analysis (SSKECA) algorithm with a multidimensional eye movement feature set. A total of 40 patients with SZ and 50 healthy controls (HC) completed a free-viewing task involving 100 distinct semantic images. The proposed SSKECA algorithm optimizes multidimensional feature representations to capture latent eye movement patterns characteristic of SZ. The SSKECA&amp;amp;ndash;AdaBoost model achieved competitive performance, with an accuracy of 0.933 and an area under the receiver operating characteristic curve (AUC) of 0.960. Notably, when restricted to only 25 highly discriminative images, the SSKECA&amp;amp;ndash;XGBoost model achieved an accuracy of 0.922. Feature ablation analyses not only reproduced previously reported eye movement findings but also highlighted additional atypical patterns. Misclassification analyses revealed more pronounced eye movement deficits in incorrectly classified SZ patients. Overall, the proposed framework translates complex eye movement patterns into robust indicators for subject-level identification, offering a practical and efficient tool to support objective assessment in SZ.</description>
	<pubDate>2026-05-11</pubDate>

	<content:encoded><![CDATA[
	<p><b>JEMR, Vol. 19, Pages 51: Eye Movement Patterns as Robust Biomarkers for Schizophrenia Identification Using a Novel Data Transformation Approach</b></p>
	<p>Journal of Eye Movement Research <a href="https://www.mdpi.com/1995-8692/19/3/51">doi: 10.3390/jemr19030051</a></p>
	<p>Authors:
		Lijin Huang
		Senhao Li
		Zhi Liu
		Dan Zhang
		Lihua Xu
		Tianhong Zhang
		Jijun Wang
		</p>
	<p>Although eye movement abnormalities are documented in schizophrenia (SZ), their translation into objective diagnostic biomarkers remains limited. In this study, we propose a novel identification framework that integrates a Sparsity-Scoring Kernel Entropy Component Analysis (SSKECA) algorithm with a multidimensional eye movement feature set. A total of 40 patients with SZ and 50 healthy controls (HC) completed a free-viewing task involving 100 distinct semantic images. The proposed SSKECA algorithm optimizes multidimensional feature representations to capture latent eye movement patterns characteristic of SZ. The SSKECA&amp;amp;ndash;AdaBoost model achieved competitive performance, with an accuracy of 0.933 and an area under the receiver operating characteristic curve (AUC) of 0.960. Notably, when restricted to only 25 highly discriminative images, the SSKECA&amp;amp;ndash;XGBoost model achieved an accuracy of 0.922. Feature ablation analyses not only reproduced previously reported eye movement findings but also highlighted additional atypical patterns. Misclassification analyses revealed more pronounced eye movement deficits in incorrectly classified SZ patients. Overall, the proposed framework translates complex eye movement patterns into robust indicators for subject-level identification, offering a practical and efficient tool to support objective assessment in SZ.</p>
	]]></content:encoded>

	<dc:title>Eye Movement Patterns as Robust Biomarkers for Schizophrenia Identification Using a Novel Data Transformation Approach</dc:title>
			<dc:creator>Lijin Huang</dc:creator>
			<dc:creator>Senhao Li</dc:creator>
			<dc:creator>Zhi Liu</dc:creator>
			<dc:creator>Dan Zhang</dc:creator>
			<dc:creator>Lihua Xu</dc:creator>
			<dc:creator>Tianhong Zhang</dc:creator>
			<dc:creator>Jijun Wang</dc:creator>
		<dc:identifier>doi: 10.3390/jemr19030051</dc:identifier>
	<dc:source>Journal of Eye Movement Research</dc:source>
	<dc:date>2026-05-11</dc:date>

	<prism:publicationName>Journal of Eye Movement Research</prism:publicationName>
	<prism:publicationDate>2026-05-11</prism:publicationDate>
	<prism:volume>19</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>51</prism:startingPage>
		<prism:doi>10.3390/jemr19030051</prism:doi>
	<prism:url>https://www.mdpi.com/1995-8692/19/3/51</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1995-8692/19/3/50">

	<title>JEMR, Vol. 19, Pages 50: Quantification of Cognitive States via Eye Tracking and Using Artificial Intelligence to Analyze Virtual Reality Learning Experiences</title>
	<link>https://www.mdpi.com/1995-8692/19/3/50</link>
	<description>Virtual reality (VR) technology provides a high sense of immersion and presence to users and can enhance the engagement and performance of learning. However, the VR learning environment introduces more complex audio&amp;amp;ndash;visual stimuli than the traditional multimedia learning environment. These excessive stimuli cause negative effects such as distraction and cognitive overload. To minimize these negative impacts and improve the learning environment, we must evaluate learners&amp;amp;rsquo; cognitive states under the VR environment. Cognitive states can be evaluated subjectively (e.g., through questionnaires) or objectively (e.g., using biometric signals). Subjective and objective methods must be used simultaneously, and correlations between them must be analyzed for quantifying objective measures. The accurate detection of cognitive states is challenging for traditional statistical analysis methods, necessitating the exploration of artificial intelligence (AI) techniques that can classify cognitive states. This study develops a VR learning experience evaluation system based on eye-tracking data. Cognitive states during VR learning are classified as cognitive overload, immersion, and distraction. Correlations between each cognitive state and eye-tracking metrics are evaluated, and the possibility of cognitive-state quantification is discussed. An LSTM-based model developed in this study classified cognitive states from eye-tracking data with moderate accuracy (75.60%) under a subject-independent validation setting.</description>
	<pubDate>2026-05-05</pubDate>

	<content:encoded><![CDATA[
	<p><b>JEMR, Vol. 19, Pages 50: Quantification of Cognitive States via Eye Tracking and Using Artificial Intelligence to Analyze Virtual Reality Learning Experiences</b></p>
	<p>Journal of Eye Movement Research <a href="https://www.mdpi.com/1995-8692/19/3/50">doi: 10.3390/jemr19030050</a></p>
	<p>Authors:
		Haram Choi
		Sanghun Nam
		</p>
	<p>Virtual reality (VR) technology provides a high sense of immersion and presence to users and can enhance the engagement and performance of learning. However, the VR learning environment introduces more complex audio&amp;amp;ndash;visual stimuli than the traditional multimedia learning environment. These excessive stimuli cause negative effects such as distraction and cognitive overload. To minimize these negative impacts and improve the learning environment, we must evaluate learners&amp;amp;rsquo; cognitive states under the VR environment. Cognitive states can be evaluated subjectively (e.g., through questionnaires) or objectively (e.g., using biometric signals). Subjective and objective methods must be used simultaneously, and correlations between them must be analyzed for quantifying objective measures. The accurate detection of cognitive states is challenging for traditional statistical analysis methods, necessitating the exploration of artificial intelligence (AI) techniques that can classify cognitive states. This study develops a VR learning experience evaluation system based on eye-tracking data. Cognitive states during VR learning are classified as cognitive overload, immersion, and distraction. Correlations between each cognitive state and eye-tracking metrics are evaluated, and the possibility of cognitive-state quantification is discussed. An LSTM-based model developed in this study classified cognitive states from eye-tracking data with moderate accuracy (75.60%) under a subject-independent validation setting.</p>
	]]></content:encoded>

	<dc:title>Quantification of Cognitive States via Eye Tracking and Using Artificial Intelligence to Analyze Virtual Reality Learning Experiences</dc:title>
			<dc:creator>Haram Choi</dc:creator>
			<dc:creator>Sanghun Nam</dc:creator>
		<dc:identifier>doi: 10.3390/jemr19030050</dc:identifier>
	<dc:source>Journal of Eye Movement Research</dc:source>
	<dc:date>2026-05-05</dc:date>

	<prism:publicationName>Journal of Eye Movement Research</prism:publicationName>
	<prism:publicationDate>2026-05-05</prism:publicationDate>
	<prism:volume>19</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>50</prism:startingPage>
		<prism:doi>10.3390/jemr19030050</prism:doi>
	<prism:url>https://www.mdpi.com/1995-8692/19/3/50</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1995-8692/19/3/49">

	<title>JEMR, Vol. 19, Pages 49: Oculomotor Vergence Eye Movement Endurance in Normal Vision via Virtual Reality-Integrated Eye Tracking</title>
	<link>https://www.mdpi.com/1995-8692/19/3/49</link>
	<description>Modern societies are becoming increasingly dependent on electronics, leading to an increase in visual symptoms. Vergence endurance, the ability to sustain performance, may serve as a quantitative metric to complement symptom surveys to assess vergence performance during near visual tasks. To quantify vergence endurance, 48 participants, aged 15 to 23 years with normal binocular vision, completed a 15 min symmetrical disparity vergence step task to assess potential changes in peak vergence speed over the course of the experiment. Peak velocity, final amplitude, and the slope of the linear regression fit of the peak velocity as a function of stimulus recording were quantified for convergence and divergence responses using an eye tracker integrated in a virtual reality headset. Peak velocity was sustained by 63% and 69% of participants for convergence and divergence eye movements, respectively. Convergence and divergence responses were significantly different for peak velocity (p &amp;amp;lt; 0.001) and vergence endurance (p &amp;amp;lt; 0.03). The endurance metric tool has potential that may help shape future clinical applications for those with acquired brain injuries, including concussions or neurodegenerative diseases such as multiple sclerosis or Parkinson&amp;amp;rsquo;s disease.</description>
	<pubDate>2026-05-05</pubDate>

	<content:encoded><![CDATA[
	<p><b>JEMR, Vol. 19, Pages 49: Oculomotor Vergence Eye Movement Endurance in Normal Vision via Virtual Reality-Integrated Eye Tracking</b></p>
	<p>Journal of Eye Movement Research <a href="https://www.mdpi.com/1995-8692/19/3/49">doi: 10.3390/jemr19030049</a></p>
	<p>Authors:
		Fatema F. Hirani
		Farzin Hajebrahimi
		Tara L. Alvarez
		</p>
	<p>Modern societies are becoming increasingly dependent on electronics, leading to an increase in visual symptoms. Vergence endurance, the ability to sustain performance, may serve as a quantitative metric to complement symptom surveys to assess vergence performance during near visual tasks. To quantify vergence endurance, 48 participants, aged 15 to 23 years with normal binocular vision, completed a 15 min symmetrical disparity vergence step task to assess potential changes in peak vergence speed over the course of the experiment. Peak velocity, final amplitude, and the slope of the linear regression fit of the peak velocity as a function of stimulus recording were quantified for convergence and divergence responses using an eye tracker integrated in a virtual reality headset. Peak velocity was sustained by 63% and 69% of participants for convergence and divergence eye movements, respectively. Convergence and divergence responses were significantly different for peak velocity (p &amp;amp;lt; 0.001) and vergence endurance (p &amp;amp;lt; 0.03). The endurance metric tool has potential that may help shape future clinical applications for those with acquired brain injuries, including concussions or neurodegenerative diseases such as multiple sclerosis or Parkinson&amp;amp;rsquo;s disease.</p>
	]]></content:encoded>

	<dc:title>Oculomotor Vergence Eye Movement Endurance in Normal Vision via Virtual Reality-Integrated Eye Tracking</dc:title>
			<dc:creator>Fatema F. Hirani</dc:creator>
			<dc:creator>Farzin Hajebrahimi</dc:creator>
			<dc:creator>Tara L. Alvarez</dc:creator>
		<dc:identifier>doi: 10.3390/jemr19030049</dc:identifier>
	<dc:source>Journal of Eye Movement Research</dc:source>
	<dc:date>2026-05-05</dc:date>

	<prism:publicationName>Journal of Eye Movement Research</prism:publicationName>
	<prism:publicationDate>2026-05-05</prism:publicationDate>
	<prism:volume>19</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>49</prism:startingPage>
		<prism:doi>10.3390/jemr19030049</prism:doi>
	<prism:url>https://www.mdpi.com/1995-8692/19/3/49</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1995-8692/19/3/48">

	<title>JEMR, Vol. 19, Pages 48: Effects of Driving Task Demands and Information Load on AR-HUD Cognitive Efficiency: The Moderating Role of Working Memory Capacity in a VR-Based Simulated Driving Environment</title>
	<link>https://www.mdpi.com/1995-8692/19/3/48</link>
	<description>The driving scenario and information load jointly influence the cognitive efficiency of augmented reality head-up display (AR-HUD) interfaces. However, the moderating role of drivers’ working memory capacity (WMC) remains unclear. To investigate this mechanism, a simulated driving experiment with a mixed design was conducted in a low-immersivity desktop virtual reality (VR) environment. First, 40 volunteers were screened using an automated operation span task, yielding 16 high- and low-WMC participants. They then drove under three scenarios (urban intersection, expressway, construction zone) and six levels of AR-HUD visual information load. Generalized linear models were applied to the reaction time, fixation duration, and pupil diameter. The results revealed a significant three-way interaction among WMC, scenario, and information load. High-WMC drivers maintained faster responses and lower subjective loads up to Levels 4–6, adopting a deep processing strategy; low-WMC drivers already showed cognitive overload at Level 4 and above, requiring an optimal load range of Level 2–3. The construction zone induced the steepest increase in cognitive load, whereas the expressway markedly reduced sensitivity to additional visual information. Therefore, the optimal AR-HUD information load must be adapted to drivers’ WMC: high-WMC drivers can safely handle Levels 4–6 in low- or medium-demand scenarios, whereas low-WMC drivers require a minimalist presentation of Levels 2–3 in high-demand situations. This study provides quantitative, empirically grounded guidelines for designing cognitively adaptive AR-HUD interfaces.</description>
	<pubDate>2026-05-03</pubDate>

	<content:encoded><![CDATA[
	<p><b>JEMR, Vol. 19, Pages 48: Effects of Driving Task Demands and Information Load on AR-HUD Cognitive Efficiency: The Moderating Role of Working Memory Capacity in a VR-Based Simulated Driving Environment</b></p>
	<p>Journal of Eye Movement Research <a href="https://www.mdpi.com/1995-8692/19/3/48">doi: 10.3390/jemr19030048</a></p>
	<p>Authors:
		Jing Li
		Min Lin
		Xinyu Feng
		Hua Zhang
		Chuchu Wang
		Yulian Ma
		</p>
	<p>The driving scenario and information load jointly influence the cognitive efficiency of augmented reality head-up display (AR-HUD) interfaces. However, the moderating role of drivers’ working memory capacity (WMC) remains unclear. To investigate this mechanism, a simulated driving experiment with a mixed design was conducted in a low-immersivity desktop virtual reality (VR) environment. First, 40 volunteers were screened using an automated operation span task, yielding 16 high- and low-WMC participants. They then drove under three scenarios (urban intersection, expressway, construction zone) and six levels of AR-HUD visual information load. Generalized linear models were applied to the reaction time, fixation duration, and pupil diameter. The results revealed a significant three-way interaction among WMC, scenario, and information load. High-WMC drivers maintained faster responses and lower subjective loads up to Levels 4–6, adopting a deep processing strategy; low-WMC drivers already showed cognitive overload at Level 4 and above, requiring an optimal load range of Level 2–3. The construction zone induced the steepest increase in cognitive load, whereas the expressway markedly reduced sensitivity to additional visual information. Therefore, the optimal AR-HUD information load must be adapted to drivers’ WMC: high-WMC drivers can safely handle Levels 4–6 in low- or medium-demand scenarios, whereas low-WMC drivers require a minimalist presentation of Levels 2–3 in high-demand situations. This study provides quantitative, empirically grounded guidelines for designing cognitively adaptive AR-HUD interfaces.</p>
	]]></content:encoded>

	<dc:title>Effects of Driving Task Demands and Information Load on AR-HUD Cognitive Efficiency: The Moderating Role of Working Memory Capacity in a VR-Based Simulated Driving Environment</dc:title>
			<dc:creator>Jing Li</dc:creator>
			<dc:creator>Min Lin</dc:creator>
			<dc:creator>Xinyu Feng</dc:creator>
			<dc:creator>Hua Zhang</dc:creator>
			<dc:creator>Chuchu Wang</dc:creator>
			<dc:creator>Yulian Ma</dc:creator>
		<dc:identifier>doi: 10.3390/jemr19030048</dc:identifier>
	<dc:source>Journal of Eye Movement Research</dc:source>
	<dc:date>2026-05-03</dc:date>

	<prism:publicationName>Journal of Eye Movement Research</prism:publicationName>
	<prism:publicationDate>2026-05-03</prism:publicationDate>
	<prism:volume>19</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>48</prism:startingPage>
		<prism:doi>10.3390/jemr19030048</prism:doi>
	<prism:url>https://www.mdpi.com/1995-8692/19/3/48</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1995-8692/19/3/47">

	<title>JEMR, Vol. 19, Pages 47: Differential Fixation and Eye Alignment Patterns in Strabismus with and Without Amblyopia Across Viewing Conditions</title>
	<link>https://www.mdpi.com/1995-8692/19/3/47</link>
	<description>Fixation instability (FI) and vergence instability (VI) in amblyopia and strabismus are associated with disrupted physiologic fixation eye movements (FEMs). This study examined how viewing conditions affect FEM patterns in strabismic subjects with and without amblyopia. FEMs of the non-dominant/amblyopic and dominant/fellow eyes were recorded using video-oculography during both-eye viewing (BEV), fellow/dominant-eye viewing (FEV/DEV), and amblyopic/non-dominant-eye viewing (AEV/NDEV) in strabismic subjects with amblyopia (SA, n = 56), without amblyopia (S, n = 19), and controls (C, n = 25). FI, VI, fast FEM amplitudes, slow FEM velocities, and time-based control of eye deviation were analyzed. The SA group showed the greatest FI in the amblyopic eye during AEV compared with the fellow eye during FEV, whereas minimal inter-ocular FI differences were observed in the S group and controls. Under monocular viewing, both SA and S groups exhibited increased FI in the non-viewing eye and higher VI than controls. Regression analyses indicated that visual acuity deficits primarily influenced viewing-eye FI and FEM dynamics, while strabismus mainly affected non-viewing-eye FI and slow FEMs. C and S groups showed the least eye deviation during BEV, whereas the SA group showed the least eye deviation&amp;amp;mdash;but the highest VI&amp;amp;mdash;during AEV, indicating a distinct pattern of incomitance. Distinct FEM patterns shaped by viewing conditions may reflect underlying visuomotor control mechanisms and serve as biomarkers for AI (artificial intelligence)-based classification.</description>
	<pubDate>2026-05-03</pubDate>

	<content:encoded><![CDATA[
	<p><b>JEMR, Vol. 19, Pages 47: Differential Fixation and Eye Alignment Patterns in Strabismus with and Without Amblyopia Across Viewing Conditions</b></p>
	<p>Journal of Eye Movement Research <a href="https://www.mdpi.com/1995-8692/19/3/47">doi: 10.3390/jemr19030047</a></p>
	<p>Authors:
		Archayeeta Rakshit
		Ibrahim M. Quagraine
		Gokce Busra Cakir
		Aasef G. Shaikh
		Fatema F. Ghasia
		</p>
	<p>Fixation instability (FI) and vergence instability (VI) in amblyopia and strabismus are associated with disrupted physiologic fixation eye movements (FEMs). This study examined how viewing conditions affect FEM patterns in strabismic subjects with and without amblyopia. FEMs of the non-dominant/amblyopic and dominant/fellow eyes were recorded using video-oculography during both-eye viewing (BEV), fellow/dominant-eye viewing (FEV/DEV), and amblyopic/non-dominant-eye viewing (AEV/NDEV) in strabismic subjects with amblyopia (SA, n = 56), without amblyopia (S, n = 19), and controls (C, n = 25). FI, VI, fast FEM amplitudes, slow FEM velocities, and time-based control of eye deviation were analyzed. The SA group showed the greatest FI in the amblyopic eye during AEV compared with the fellow eye during FEV, whereas minimal inter-ocular FI differences were observed in the S group and controls. Under monocular viewing, both SA and S groups exhibited increased FI in the non-viewing eye and higher VI than controls. Regression analyses indicated that visual acuity deficits primarily influenced viewing-eye FI and FEM dynamics, while strabismus mainly affected non-viewing-eye FI and slow FEMs. C and S groups showed the least eye deviation during BEV, whereas the SA group showed the least eye deviation&amp;amp;mdash;but the highest VI&amp;amp;mdash;during AEV, indicating a distinct pattern of incomitance. Distinct FEM patterns shaped by viewing conditions may reflect underlying visuomotor control mechanisms and serve as biomarkers for AI (artificial intelligence)-based classification.</p>
	]]></content:encoded>

	<dc:title>Differential Fixation and Eye Alignment Patterns in Strabismus with and Without Amblyopia Across Viewing Conditions</dc:title>
			<dc:creator>Archayeeta Rakshit</dc:creator>
			<dc:creator>Ibrahim M. Quagraine</dc:creator>
			<dc:creator>Gokce Busra Cakir</dc:creator>
			<dc:creator>Aasef G. Shaikh</dc:creator>
			<dc:creator>Fatema F. Ghasia</dc:creator>
		<dc:identifier>doi: 10.3390/jemr19030047</dc:identifier>
	<dc:source>Journal of Eye Movement Research</dc:source>
	<dc:date>2026-05-03</dc:date>

	<prism:publicationName>Journal of Eye Movement Research</prism:publicationName>
	<prism:publicationDate>2026-05-03</prism:publicationDate>
	<prism:volume>19</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>47</prism:startingPage>
		<prism:doi>10.3390/jemr19030047</prism:doi>
	<prism:url>https://www.mdpi.com/1995-8692/19/3/47</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1995-8692/19/3/46">

	<title>JEMR, Vol. 19, Pages 46: Oculometric Function More Strongly Predicts Working Memory than Stress in Military Officers</title>
	<link>https://www.mdpi.com/1995-8692/19/3/46</link>
	<description>Working memory, the capacity to store information for near-immediate use, and visual attention, the ability to focus on task-relevant information, are integral skills for military personnel. In civilian populations, stress is associated with worse skills. However, little is known about the relationship between stress, working memory, and visual attention in military officers, who are trained to handle acute stress and operate in high-stress environments. Thirty-three military officers completed a working memory test, a Perceived Stress Questionnaire (PSQ), and an oculometric assessment of visual tracking. The oculometric test was a modified step-ramp test that produces 10 z-scored metrics. Working memory and executive function were assessed via the n-back task. Oculometric performance and self-reported stress levels were independently associated with n-back accuracy, explaining 67% of the variance (adjusted R2, n = 30). The association between oculometric performance and n-back accuracy was driven by directional anisotropy, directional noise and proportion of smooth pursuit. The association between oculometric performance and stress was complicated by sex differences. Results have important implications for the assessment of cognitive readiness in military populations. The strong relationship between oculometric performance and working memory suggests that eye-tracking-based metrics may serve as candidate indicators of cognitive function under operational demands.</description>
	<pubDate>2026-05-02</pubDate>

	<content:encoded><![CDATA[
	<p><b>JEMR, Vol. 19, Pages 46: Oculometric Function More Strongly Predicts Working Memory than Stress in Military Officers</b></p>
	<p>Journal of Eye Movement Research <a href="https://www.mdpi.com/1995-8692/19/3/46">doi: 10.3390/jemr19030046</a></p>
	<p>Authors:
		Mollie McGuire
		Neda Bahrani
		Quinn Kennedy
		Dorion Liston
		</p>
	<p>Working memory, the capacity to store information for near-immediate use, and visual attention, the ability to focus on task-relevant information, are integral skills for military personnel. In civilian populations, stress is associated with worse skills. However, little is known about the relationship between stress, working memory, and visual attention in military officers, who are trained to handle acute stress and operate in high-stress environments. Thirty-three military officers completed a working memory test, a Perceived Stress Questionnaire (PSQ), and an oculometric assessment of visual tracking. The oculometric test was a modified step-ramp test that produces 10 z-scored metrics. Working memory and executive function were assessed via the n-back task. Oculometric performance and self-reported stress levels were independently associated with n-back accuracy, explaining 67% of the variance (adjusted R2, n = 30). The association between oculometric performance and n-back accuracy was driven by directional anisotropy, directional noise and proportion of smooth pursuit. The association between oculometric performance and stress was complicated by sex differences. Results have important implications for the assessment of cognitive readiness in military populations. The strong relationship between oculometric performance and working memory suggests that eye-tracking-based metrics may serve as candidate indicators of cognitive function under operational demands.</p>
	]]></content:encoded>

	<dc:title>Oculometric Function More Strongly Predicts Working Memory than Stress in Military Officers</dc:title>
			<dc:creator>Mollie McGuire</dc:creator>
			<dc:creator>Neda Bahrani</dc:creator>
			<dc:creator>Quinn Kennedy</dc:creator>
			<dc:creator>Dorion Liston</dc:creator>
		<dc:identifier>doi: 10.3390/jemr19030046</dc:identifier>
	<dc:source>Journal of Eye Movement Research</dc:source>
	<dc:date>2026-05-02</dc:date>

	<prism:publicationName>Journal of Eye Movement Research</prism:publicationName>
	<prism:publicationDate>2026-05-02</prism:publicationDate>
	<prism:volume>19</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>46</prism:startingPage>
		<prism:doi>10.3390/jemr19030046</prism:doi>
	<prism:url>https://www.mdpi.com/1995-8692/19/3/46</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1995-8692/19/3/45">

	<title>JEMR, Vol. 19, Pages 45: Effects of Cognitive, Simulator, and Real-World Training on Novice Driver Gaze Behaviour: A Pre&amp;ndash;Post Study</title>
	<link>https://www.mdpi.com/1995-8692/19/3/45</link>
	<description>Novice drivers demonstrate inefficient visual scanning and elevated crash risk relative to experienced drivers. Different training programmes may influence gaze behaviour and performance in distinct ways. This study compared the impact of cognitive, simulator-based, and real-world training on visual attention and driving-related outcomes in novice drivers. Thirty novice drivers (18&amp;amp;ndash;27 years; &amp;amp;le;1 year driving experience) were randomized into three training groups (n = 10 each): cognitive training (PsyToolkit, Version 3.7.0), game-based simulator training, and supervised real-world driving. Baseline and post-training assessments included visuomotor performance (Fitts&amp;amp;rsquo; Law), attentional cueing (valid/invalid reaction time), simulator-based driving errors, and eye-tracking measures of gaze behaviour. Eye-tracking outcomes included dwell-time percentage and first-fixation order across predefined areas of interest (AOIs). Participants completed 10 consecutive days of modality-specific training. Cognitive training improved visuomotor performance and increased forward road monitoring. Game-based simulator training yielded the largest reductions in simulator driving errors, particularly lane deviations (Z = &amp;amp;minus;2.89, p = 0.004). Real-world driving altered visual scanning patterns, with significant differences in rear-view mirror prioritization (p = 0.024). Across groups, gaze shifted from dashboard view toward safety-relevant AOIs. Training modifies novice drivers&amp;amp;rsquo; gaze behaviour in modality-specific ways, suggesting that a multimodal training approach may enhance visual attention and driving safety</description>
	<pubDate>2026-04-30</pubDate>

	<content:encoded><![CDATA[
	<p><b>JEMR, Vol. 19, Pages 45: Effects of Cognitive, Simulator, and Real-World Training on Novice Driver Gaze Behaviour: A Pre&amp;ndash;Post Study</b></p>
	<p>Journal of Eye Movement Research <a href="https://www.mdpi.com/1995-8692/19/3/45">doi: 10.3390/jemr19030045</a></p>
	<p>Authors:
		Prem Sudhakar Lawrence
		Aiswaryah Radhakrishnan
		</p>
	<p>Novice drivers demonstrate inefficient visual scanning and elevated crash risk relative to experienced drivers. Different training programmes may influence gaze behaviour and performance in distinct ways. This study compared the impact of cognitive, simulator-based, and real-world training on visual attention and driving-related outcomes in novice drivers. Thirty novice drivers (18&amp;amp;ndash;27 years; &amp;amp;le;1 year driving experience) were randomized into three training groups (n = 10 each): cognitive training (PsyToolkit, Version 3.7.0), game-based simulator training, and supervised real-world driving. Baseline and post-training assessments included visuomotor performance (Fitts&amp;amp;rsquo; Law), attentional cueing (valid/invalid reaction time), simulator-based driving errors, and eye-tracking measures of gaze behaviour. Eye-tracking outcomes included dwell-time percentage and first-fixation order across predefined areas of interest (AOIs). Participants completed 10 consecutive days of modality-specific training. Cognitive training improved visuomotor performance and increased forward road monitoring. Game-based simulator training yielded the largest reductions in simulator driving errors, particularly lane deviations (Z = &amp;amp;minus;2.89, p = 0.004). Real-world driving altered visual scanning patterns, with significant differences in rear-view mirror prioritization (p = 0.024). Across groups, gaze shifted from dashboard view toward safety-relevant AOIs. Training modifies novice drivers&amp;amp;rsquo; gaze behaviour in modality-specific ways, suggesting that a multimodal training approach may enhance visual attention and driving safety</p>
	]]></content:encoded>

	<dc:title>Effects of Cognitive, Simulator, and Real-World Training on Novice Driver Gaze Behaviour: A Pre&amp;amp;ndash;Post Study</dc:title>
			<dc:creator>Prem Sudhakar Lawrence</dc:creator>
			<dc:creator>Aiswaryah Radhakrishnan</dc:creator>
		<dc:identifier>doi: 10.3390/jemr19030045</dc:identifier>
	<dc:source>Journal of Eye Movement Research</dc:source>
	<dc:date>2026-04-30</dc:date>

	<prism:publicationName>Journal of Eye Movement Research</prism:publicationName>
	<prism:publicationDate>2026-04-30</prism:publicationDate>
	<prism:volume>19</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>45</prism:startingPage>
		<prism:doi>10.3390/jemr19030045</prism:doi>
	<prism:url>https://www.mdpi.com/1995-8692/19/3/45</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1995-8692/19/3/44">

	<title>JEMR, Vol. 19, Pages 44: Validating Temporal Eye Tracking Metrics as Orthogonal Biomarkers for Aggressive Traits: A Mixed-Effects Analysis</title>
	<link>https://www.mdpi.com/1995-8692/19/3/44</link>
	<description>Atypical visual attention to aversive or threatening stimuli is a clinically relevant feature of aggressive behavior. However, the developmental dissociation between sustained visual allocation and early orienting remains unclear. This study examined the temporal dynamics of visual attentional biases in a sample of 119 children and adolescents (51 males, 68 females), clinically and behaviorally categorized into aggressive and non-aggressive cohorts. Using a free-viewing paradigm with standardized emotional stimulus pairs selected from the International Affective Picture System (IAPS), eye-tracking analysis focused on first-fixation direction and dwell time. Inferential analyses were conducted using Linear Mixed-Effect Models (LMM) and Generalized Linear Mixed-Effects Models (GLMM). The linear model revealed a significant main effect of behavioral condition: individuals with aggressive traits, regardless of their stage of development, showed greater sustained visual allocation toward negative stimuli. In contrast, the GLMM for first-fixation direction identified a significant age-by-condition interaction, indicating that early orienting differences were more clearly expressed in the aggressive adolescent cohort. These findings suggest that sustained visual preference for negative content may represent a relatively stable correlate of aggressive traits, whereas early orienting differences may vary across developmental stages. Together, these two temporal eye-tracking measures may provide complementary information for future computational approaches to aggression screening. In conclusion, these two temporal oculomotor dimensions may provide a useful feature space for future machine-learning pipelines and may serve as complementary candidate markers for comparing computational predictions against clinically established ground truth in aggression screening research.</description>
	<pubDate>2026-04-28</pubDate>

	<content:encoded><![CDATA[
	<p><b>JEMR, Vol. 19, Pages 44: Validating Temporal Eye Tracking Metrics as Orthogonal Biomarkers for Aggressive Traits: A Mixed-Effects Analysis</b></p>
	<p>Journal of Eye Movement Research <a href="https://www.mdpi.com/1995-8692/19/3/44">doi: 10.3390/jemr19030044</a></p>
	<p>Authors:
		Omar Alvarado-Cando
		Oscar Casanova-Carvajal
		José-Javier Serrano-Olmedo
		</p>
	<p>Atypical visual attention to aversive or threatening stimuli is a clinically relevant feature of aggressive behavior. However, the developmental dissociation between sustained visual allocation and early orienting remains unclear. This study examined the temporal dynamics of visual attentional biases in a sample of 119 children and adolescents (51 males, 68 females), clinically and behaviorally categorized into aggressive and non-aggressive cohorts. Using a free-viewing paradigm with standardized emotional stimulus pairs selected from the International Affective Picture System (IAPS), eye-tracking analysis focused on first-fixation direction and dwell time. Inferential analyses were conducted using Linear Mixed-Effect Models (LMM) and Generalized Linear Mixed-Effects Models (GLMM). The linear model revealed a significant main effect of behavioral condition: individuals with aggressive traits, regardless of their stage of development, showed greater sustained visual allocation toward negative stimuli. In contrast, the GLMM for first-fixation direction identified a significant age-by-condition interaction, indicating that early orienting differences were more clearly expressed in the aggressive adolescent cohort. These findings suggest that sustained visual preference for negative content may represent a relatively stable correlate of aggressive traits, whereas early orienting differences may vary across developmental stages. Together, these two temporal eye-tracking measures may provide complementary information for future computational approaches to aggression screening. In conclusion, these two temporal oculomotor dimensions may provide a useful feature space for future machine-learning pipelines and may serve as complementary candidate markers for comparing computational predictions against clinically established ground truth in aggression screening research.</p>
	]]></content:encoded>

	<dc:title>Validating Temporal Eye Tracking Metrics as Orthogonal Biomarkers for Aggressive Traits: A Mixed-Effects Analysis</dc:title>
			<dc:creator>Omar Alvarado-Cando</dc:creator>
			<dc:creator>Oscar Casanova-Carvajal</dc:creator>
			<dc:creator>José-Javier Serrano-Olmedo</dc:creator>
		<dc:identifier>doi: 10.3390/jemr19030044</dc:identifier>
	<dc:source>Journal of Eye Movement Research</dc:source>
	<dc:date>2026-04-28</dc:date>

	<prism:publicationName>Journal of Eye Movement Research</prism:publicationName>
	<prism:publicationDate>2026-04-28</prism:publicationDate>
	<prism:volume>19</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>44</prism:startingPage>
		<prism:doi>10.3390/jemr19030044</prism:doi>
	<prism:url>https://www.mdpi.com/1995-8692/19/3/44</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1995-8692/19/3/43">

	<title>JEMR, Vol. 19, Pages 43: Reversible Orbital Apex Syndrome</title>
	<link>https://www.mdpi.com/1995-8692/19/3/43</link>
	<description>Orbital apex syndrome (OAS) is characterized by optic neuropathy and ophthalmoplegia and is generally associated with poor visual prognosis. The aim of this study was to describe patients with acute OAS who demonstrated substantial recovery of visual function and ocular motility. We retrospectively reviewed the medical records of patients treated for OAS at a tertiary medical center between 2019 and 2024 whose condition ultimately proved reversible. Data on demographics, clinical findings, imaging, management, and follow-up were collected. Six patients (three female, three male; age range 14&amp;amp;ndash;87 years) were included and followed for a median follow-up of 7 months (range 2&amp;amp;ndash;31). All presented with reduced vision and ophthalmoplegia of varying severity. Underlying etiologies included inflammatory disease (n = 2), lymphoma, infection, blunt trauma, and post-surgical OAS of undetermined etiology (n = 1 each). Treatment was directed at the underlying cause. Visual acuity ranged from 20/30 to hand motion (HM) at presentation and 20/15 to 20/60 at the final visit. Improvement in vision and ocular motility occurred after a median time to clinical improvement of 2.37 months (range 0.25&amp;amp;ndash;5 months). Near-complete recovery of ocular motility was observed in all patients, with only one retaining mild abduction limitation. These findings highlight a subset of OAS cases with favorable outcomes and emphasize the importance of early diagnosis and etiology-directed management.</description>
	<pubDate>2026-04-27</pubDate>

	<content:encoded><![CDATA[
	<p><b>JEMR, Vol. 19, Pages 43: Reversible Orbital Apex Syndrome</b></p>
	<p>Journal of Eye Movement Research <a href="https://www.mdpi.com/1995-8692/19/3/43">doi: 10.3390/jemr19030043</a></p>
	<p>Authors:
		Yakov Rabinovich
		Inbal Man Peles
		Zina Almer
		Iris Ben Bassat-Mizrachi
		Jonathan Sapir
		Noa Hadar
		Alon Zahavi
		Nitza Goldenberg-Cohen
		</p>
	<p>Orbital apex syndrome (OAS) is characterized by optic neuropathy and ophthalmoplegia and is generally associated with poor visual prognosis. The aim of this study was to describe patients with acute OAS who demonstrated substantial recovery of visual function and ocular motility. We retrospectively reviewed the medical records of patients treated for OAS at a tertiary medical center between 2019 and 2024 whose condition ultimately proved reversible. Data on demographics, clinical findings, imaging, management, and follow-up were collected. Six patients (three female, three male; age range 14&amp;amp;ndash;87 years) were included and followed for a median follow-up of 7 months (range 2&amp;amp;ndash;31). All presented with reduced vision and ophthalmoplegia of varying severity. Underlying etiologies included inflammatory disease (n = 2), lymphoma, infection, blunt trauma, and post-surgical OAS of undetermined etiology (n = 1 each). Treatment was directed at the underlying cause. Visual acuity ranged from 20/30 to hand motion (HM) at presentation and 20/15 to 20/60 at the final visit. Improvement in vision and ocular motility occurred after a median time to clinical improvement of 2.37 months (range 0.25&amp;amp;ndash;5 months). Near-complete recovery of ocular motility was observed in all patients, with only one retaining mild abduction limitation. These findings highlight a subset of OAS cases with favorable outcomes and emphasize the importance of early diagnosis and etiology-directed management.</p>
	]]></content:encoded>

	<dc:title>Reversible Orbital Apex Syndrome</dc:title>
			<dc:creator>Yakov Rabinovich</dc:creator>
			<dc:creator>Inbal Man Peles</dc:creator>
			<dc:creator>Zina Almer</dc:creator>
			<dc:creator>Iris Ben Bassat-Mizrachi</dc:creator>
			<dc:creator>Jonathan Sapir</dc:creator>
			<dc:creator>Noa Hadar</dc:creator>
			<dc:creator>Alon Zahavi</dc:creator>
			<dc:creator>Nitza Goldenberg-Cohen</dc:creator>
		<dc:identifier>doi: 10.3390/jemr19030043</dc:identifier>
	<dc:source>Journal of Eye Movement Research</dc:source>
	<dc:date>2026-04-27</dc:date>

	<prism:publicationName>Journal of Eye Movement Research</prism:publicationName>
	<prism:publicationDate>2026-04-27</prism:publicationDate>
	<prism:volume>19</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>43</prism:startingPage>
		<prism:doi>10.3390/jemr19030043</prism:doi>
	<prism:url>https://www.mdpi.com/1995-8692/19/3/43</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1995-8692/19/3/42">

	<title>JEMR, Vol. 19, Pages 42: Eye-Tracking-Based Interventions for School-Age Specific Learning Disorders: A Narrative Review of Functional Assessment and Gaze-Contingent Training</title>
	<link>https://www.mdpi.com/1995-8692/19/3/42</link>
	<description>Eye tracking (ET) provides process-level indices of how students sample task-relevant information during core academic activities. In school-age learners (6&amp;amp;ndash;18 years) with specific learning disorders (SLDs; dyslexia, dysgraphia, and dyscalculia), ET can complement behavioural assessment by quantifying oculomotor patterns linked to decoding, model&amp;amp;ndash;production coordination, and stepwise strategy execution. This narrative review synthesises ET findings in SLD across reading, handwriting/copying, and arithmetic and translates them into an applied framework for school-oriented use. We summarise key metrics and Areas of Interest (AOI)-based analyses, highlight technical and data-quality requirements for valid acquisition in educational settings, and outline compact functional assessment protocols integrated with standard academic and neuropsychological measures. Building on these foundations, we propose six hypothesis-driven gaze-contingent paradigms (H1&amp;amp;ndash;H6) as preliminary models for future experimental testing rather than as established interventions, and we map each to its current level of empirical support, specifying primary gaze outcomes and curriculum-relevant behavioural endpoints. We emphasise that eye-movement findings in specific learning disorders are heterogeneous and may vary as a function of age, task demands, and comorbidity. Accordingly, credible training effects require retention and transfer probes under standard, non-contingent display conditions, appropriate controls, and explicit developmental interpretation. Eye tracking is positioned as complementary functional evidence and as a platform for experimentally testable, mechanism-based interventions in school-age specific learning disorders.</description>
	<pubDate>2026-04-24</pubDate>

	<content:encoded><![CDATA[
	<p><b>JEMR, Vol. 19, Pages 42: Eye-Tracking-Based Interventions for School-Age Specific Learning Disorders: A Narrative Review of Functional Assessment and Gaze-Contingent Training</b></p>
	<p>Journal of Eye Movement Research <a href="https://www.mdpi.com/1995-8692/19/3/42">doi: 10.3390/jemr19030042</a></p>
	<p>Authors:
		Pierluigi Diotaiuti
		Francesco Di Siena
		Salvatore Vitiello
		Alessandra Zanon
		Pio Alfredo Di Tore
		Stefania Mancone
		</p>
	<p>Eye tracking (ET) provides process-level indices of how students sample task-relevant information during core academic activities. In school-age learners (6&amp;amp;ndash;18 years) with specific learning disorders (SLDs; dyslexia, dysgraphia, and dyscalculia), ET can complement behavioural assessment by quantifying oculomotor patterns linked to decoding, model&amp;amp;ndash;production coordination, and stepwise strategy execution. This narrative review synthesises ET findings in SLD across reading, handwriting/copying, and arithmetic and translates them into an applied framework for school-oriented use. We summarise key metrics and Areas of Interest (AOI)-based analyses, highlight technical and data-quality requirements for valid acquisition in educational settings, and outline compact functional assessment protocols integrated with standard academic and neuropsychological measures. Building on these foundations, we propose six hypothesis-driven gaze-contingent paradigms (H1&amp;amp;ndash;H6) as preliminary models for future experimental testing rather than as established interventions, and we map each to its current level of empirical support, specifying primary gaze outcomes and curriculum-relevant behavioural endpoints. We emphasise that eye-movement findings in specific learning disorders are heterogeneous and may vary as a function of age, task demands, and comorbidity. Accordingly, credible training effects require retention and transfer probes under standard, non-contingent display conditions, appropriate controls, and explicit developmental interpretation. Eye tracking is positioned as complementary functional evidence and as a platform for experimentally testable, mechanism-based interventions in school-age specific learning disorders.</p>
	]]></content:encoded>

	<dc:title>Eye-Tracking-Based Interventions for School-Age Specific Learning Disorders: A Narrative Review of Functional Assessment and Gaze-Contingent Training</dc:title>
			<dc:creator>Pierluigi Diotaiuti</dc:creator>
			<dc:creator>Francesco Di Siena</dc:creator>
			<dc:creator>Salvatore Vitiello</dc:creator>
			<dc:creator>Alessandra Zanon</dc:creator>
			<dc:creator>Pio Alfredo Di Tore</dc:creator>
			<dc:creator>Stefania Mancone</dc:creator>
		<dc:identifier>doi: 10.3390/jemr19030042</dc:identifier>
	<dc:source>Journal of Eye Movement Research</dc:source>
	<dc:date>2026-04-24</dc:date>

	<prism:publicationName>Journal of Eye Movement Research</prism:publicationName>
	<prism:publicationDate>2026-04-24</prism:publicationDate>
	<prism:volume>19</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Review</prism:section>
	<prism:startingPage>42</prism:startingPage>
		<prism:doi>10.3390/jemr19030042</prism:doi>
	<prism:url>https://www.mdpi.com/1995-8692/19/3/42</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1995-8692/19/2/41">

	<title>JEMR, Vol. 19, Pages 41: Data-Driven Insights into E-Learning: A Comprehensive Review of Eye-Tracking Applications in Learning Systems</title>
	<link>https://www.mdpi.com/1995-8692/19/2/41</link>
	<description>In the last few years, universities have increasingly implemented online learning environments, allowing students to study at their own pace. These environments utilize technological tools and implement methods to support training, deliver content, and promote the acquisition of new knowledge and skills. As an example of these technologies, eye tracking has emerged as a powerful tool for studying visual attention, cognitive processes, and learning behaviors. The main aim of this study is to provide a scoping review of recent eye-tracking research across diverse learner populations, ranging from K-12 students to university-level learners and educators. The present study examined recent advances in eye-tracking technologies, focusing on their potential, especially when combined with artificial intelligence (AI) techniques such as machine learning. It analyzed 54 empirical studies in the last few years, highlighting their applicability, strengths, and limitations. The research findings highlight the promise of eye-tracking technology to transform educational practices by providing data-driven insights regarding student behavior and cognitive processes. Future research must address implementation and data-analysis challenges to maximize the educational benefits of eye tracking.</description>
	<pubDate>2026-04-17</pubDate>

	<content:encoded><![CDATA[
	<p><b>JEMR, Vol. 19, Pages 41: Data-Driven Insights into E-Learning: A Comprehensive Review of Eye-Tracking Applications in Learning Systems</b></p>
	<p>Journal of Eye Movement Research <a href="https://www.mdpi.com/1995-8692/19/2/41">doi: 10.3390/jemr19020041</a></p>
	<p>Authors:
		Safia Bendjebar
		Yacine Lafifi
		Rochdi Boudjehem
		Aissa Laouissi
		</p>
	<p>In the last few years, universities have increasingly implemented online learning environments, allowing students to study at their own pace. These environments utilize technological tools and implement methods to support training, deliver content, and promote the acquisition of new knowledge and skills. As an example of these technologies, eye tracking has emerged as a powerful tool for studying visual attention, cognitive processes, and learning behaviors. The main aim of this study is to provide a scoping review of recent eye-tracking research across diverse learner populations, ranging from K-12 students to university-level learners and educators. The present study examined recent advances in eye-tracking technologies, focusing on their potential, especially when combined with artificial intelligence (AI) techniques such as machine learning. It analyzed 54 empirical studies in the last few years, highlighting their applicability, strengths, and limitations. The research findings highlight the promise of eye-tracking technology to transform educational practices by providing data-driven insights regarding student behavior and cognitive processes. Future research must address implementation and data-analysis challenges to maximize the educational benefits of eye tracking.</p>
	]]></content:encoded>

	<dc:title>Data-Driven Insights into E-Learning: A Comprehensive Review of Eye-Tracking Applications in Learning Systems</dc:title>
			<dc:creator>Safia Bendjebar</dc:creator>
			<dc:creator>Yacine Lafifi</dc:creator>
			<dc:creator>Rochdi Boudjehem</dc:creator>
			<dc:creator>Aissa Laouissi</dc:creator>
		<dc:identifier>doi: 10.3390/jemr19020041</dc:identifier>
	<dc:source>Journal of Eye Movement Research</dc:source>
	<dc:date>2026-04-17</dc:date>

	<prism:publicationName>Journal of Eye Movement Research</prism:publicationName>
	<prism:publicationDate>2026-04-17</prism:publicationDate>
	<prism:volume>19</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Review</prism:section>
	<prism:startingPage>41</prism:startingPage>
		<prism:doi>10.3390/jemr19020041</prism:doi>
	<prism:url>https://www.mdpi.com/1995-8692/19/2/41</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1995-8692/19/2/40">

	<title>JEMR, Vol. 19, Pages 40: Application of Eye-Tracking Technology in Assessing Binocular Vision Function in Paediatric Populations: A Scoping Review</title>
	<link>https://www.mdpi.com/1995-8692/19/2/40</link>
	<description>Background: This review discusses the application of eye-tracking technology in the detection and monitoring of binocular vision anomalies among children. Methods: A scoping review using PRISMA guidelines was conducted through Scopus, ScienceDirect, and PubMed using the keywords “eye-tracking,” “binocular,” “vision,” “anomalies,” “paediatrics,” and “children” from 2015 to 2025. Studies excluded were not written in English, did not apply the eye tracker as a research tool, involved an ineligible population, or involved non-human subjects. Results: The search strategy identified 77 citations, yet only 14 studies met the inclusion criteria. This review revealed a variety of binocular vision anomalies detectable through eye-tracking systems, along with the specific models and parameters employed in these assessments. Application of eye-tracking technology in diagnosing conditions such as strabismus and amblyopia demonstrated potential for improved accuracy and early detection. Discussion: Eye-tracking technology demonstrates considerable potential for the detection and monitoring of binocular vision anomalies in children, particularly as a non-invasive method for early screening, thereby strengthening its clinical applicability. By assessing fixation stability, saccadic movements, and vergence responses, eye-tracking allows for the early detection of subtle visual anomalies, especially in the paediatric population. Conclusions: Eye-tracking technology represents a valuable advancement in paediatric vision care, enabling the more objective and earlier detection of binocular vision anomalies in the paediatric population.</description>
	<pubDate>2026-04-17</pubDate>

	<content:encoded><![CDATA[
	<p><b>JEMR, Vol. 19, Pages 40: Application of Eye-Tracking Technology in Assessing Binocular Vision Function in Paediatric Populations: A Scoping Review</b></p>
	<p>Journal of Eye Movement Research <a href="https://www.mdpi.com/1995-8692/19/2/40">doi: 10.3390/jemr19020040</a></p>
	<p>Authors:
		Ong Koon
		Noor Badarudin
		Byoung-Sun Chu
		</p>
	<p>Background: This review discusses the application of eye-tracking technology in the detection and monitoring of binocular vision anomalies among children. Methods: A scoping review using PRISMA guidelines was conducted through Scopus, ScienceDirect, and PubMed using the keywords “eye-tracking,” “binocular,” “vision,” “anomalies,” “paediatrics,” and “children” from 2015 to 2025. Studies excluded were not written in English, did not apply the eye tracker as a research tool, involved an ineligible population, or involved non-human subjects. Results: The search strategy identified 77 citations, yet only 14 studies met the inclusion criteria. This review revealed a variety of binocular vision anomalies detectable through eye-tracking systems, along with the specific models and parameters employed in these assessments. Application of eye-tracking technology in diagnosing conditions such as strabismus and amblyopia demonstrated potential for improved accuracy and early detection. Discussion: Eye-tracking technology demonstrates considerable potential for the detection and monitoring of binocular vision anomalies in children, particularly as a non-invasive method for early screening, thereby strengthening its clinical applicability. By assessing fixation stability, saccadic movements, and vergence responses, eye-tracking allows for the early detection of subtle visual anomalies, especially in the paediatric population. Conclusions: Eye-tracking technology represents a valuable advancement in paediatric vision care, enabling the more objective and earlier detection of binocular vision anomalies in the paediatric population.</p>
	]]></content:encoded>

	<dc:title>Application of Eye-Tracking Technology in Assessing Binocular Vision Function in Paediatric Populations: A Scoping Review</dc:title>
			<dc:creator>Ong Koon</dc:creator>
			<dc:creator>Noor Badarudin</dc:creator>
			<dc:creator>Byoung-Sun Chu</dc:creator>
		<dc:identifier>doi: 10.3390/jemr19020040</dc:identifier>
	<dc:source>Journal of Eye Movement Research</dc:source>
	<dc:date>2026-04-17</dc:date>

	<prism:publicationName>Journal of Eye Movement Research</prism:publicationName>
	<prism:publicationDate>2026-04-17</prism:publicationDate>
	<prism:volume>19</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Review</prism:section>
	<prism:startingPage>40</prism:startingPage>
		<prism:doi>10.3390/jemr19020040</prism:doi>
	<prism:url>https://www.mdpi.com/1995-8692/19/2/40</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1995-8692/19/2/39">

	<title>JEMR, Vol. 19, Pages 39: Virtual Reality Orthoptic Interventions for Binocular Vision Disorders: A Systematic Review and Meta-Analysis</title>
	<link>https://www.mdpi.com/1995-8692/19/2/39</link>
	<description>Purpose: To systematically review and meta-analyze randomized controlled trials (RCTs) evaluating digital orthoptic interventions, including virtual reality (VR)&amp;amp;ndash;based approaches, for convergence insufficiency and intermittent exotropia. Methods: This systematic review and meta-analysis followed PRISMA 2020 guidelines and AMSTAR-2 standards and was prospectively registered in PROSPERO. PubMed, Web of Science, and Scopus were searched up to December 2025. Eligible studies were RCTs comparing VR-based or digital orthoptic interventions with conventional therapy, placebo VR, or control conditions. Primary outcomes included near point of convergence, ocular deviation, fusional reserves, and stereopsis. Risk of bias was assessed using RoB 2 and certainty of evidence with GRADE. Results: Four RCTs (184 participants) were included. In convergence insufficiency, digital orthoptic interventions, including VR-based approaches, significantly reduced near heterophoria (mean difference [MD] &amp;amp;minus;1.64 prism diopters; 95% CI &amp;amp;minus;3.17 to &amp;amp;minus;0.12), with no significant effects on near point of convergence or positive fusional reserves. In intermittent exotropia, VR-based interventions significantly improved near point of convergence (MD &amp;amp;minus;1.60 cm; 95% CI &amp;amp;minus;2.64 to &amp;amp;minus;0.55), although this change did not reach the &amp;amp;ge;4 cm threshold considered clinically meaningful according to the Convergence Insufficiency Treatment Trial. Improvements were also observed in stereopsis (MD &amp;amp;minus;0.19 log units; 95% CI &amp;amp;minus;0.33 to &amp;amp;minus;0.04), while changes in near deviation were not significant. Evidence certainty ranged from low to moderate. Conclusions: VR-based and digital orthoptic interventions may offer modest, outcome-specific benefits as adjunctive treatments for selected binocular vision disorders. Larger, well-designed RCTs with standardized outcomes are needed.</description>
	<pubDate>2026-04-14</pubDate>

	<content:encoded><![CDATA[
	<p><b>JEMR, Vol. 19, Pages 39: Virtual Reality Orthoptic Interventions for Binocular Vision Disorders: A Systematic Review and Meta-Analysis</b></p>
	<p>Journal of Eye Movement Research <a href="https://www.mdpi.com/1995-8692/19/2/39">doi: 10.3390/jemr19020039</a></p>
	<p>Authors:
		Clara Martinez-Perez
		Noelia Nores-Palmas
		Jacobo Garcia-Queiruga
		Maria J. Giraldez
		Eva Yebra-Pimentel
		</p>
	<p>Purpose: To systematically review and meta-analyze randomized controlled trials (RCTs) evaluating digital orthoptic interventions, including virtual reality (VR)&amp;amp;ndash;based approaches, for convergence insufficiency and intermittent exotropia. Methods: This systematic review and meta-analysis followed PRISMA 2020 guidelines and AMSTAR-2 standards and was prospectively registered in PROSPERO. PubMed, Web of Science, and Scopus were searched up to December 2025. Eligible studies were RCTs comparing VR-based or digital orthoptic interventions with conventional therapy, placebo VR, or control conditions. Primary outcomes included near point of convergence, ocular deviation, fusional reserves, and stereopsis. Risk of bias was assessed using RoB 2 and certainty of evidence with GRADE. Results: Four RCTs (184 participants) were included. In convergence insufficiency, digital orthoptic interventions, including VR-based approaches, significantly reduced near heterophoria (mean difference [MD] &amp;amp;minus;1.64 prism diopters; 95% CI &amp;amp;minus;3.17 to &amp;amp;minus;0.12), with no significant effects on near point of convergence or positive fusional reserves. In intermittent exotropia, VR-based interventions significantly improved near point of convergence (MD &amp;amp;minus;1.60 cm; 95% CI &amp;amp;minus;2.64 to &amp;amp;minus;0.55), although this change did not reach the &amp;amp;ge;4 cm threshold considered clinically meaningful according to the Convergence Insufficiency Treatment Trial. Improvements were also observed in stereopsis (MD &amp;amp;minus;0.19 log units; 95% CI &amp;amp;minus;0.33 to &amp;amp;minus;0.04), while changes in near deviation were not significant. Evidence certainty ranged from low to moderate. Conclusions: VR-based and digital orthoptic interventions may offer modest, outcome-specific benefits as adjunctive treatments for selected binocular vision disorders. Larger, well-designed RCTs with standardized outcomes are needed.</p>
	]]></content:encoded>

	<dc:title>Virtual Reality Orthoptic Interventions for Binocular Vision Disorders: A Systematic Review and Meta-Analysis</dc:title>
			<dc:creator>Clara Martinez-Perez</dc:creator>
			<dc:creator>Noelia Nores-Palmas</dc:creator>
			<dc:creator>Jacobo Garcia-Queiruga</dc:creator>
			<dc:creator>Maria J. Giraldez</dc:creator>
			<dc:creator>Eva Yebra-Pimentel</dc:creator>
		<dc:identifier>doi: 10.3390/jemr19020039</dc:identifier>
	<dc:source>Journal of Eye Movement Research</dc:source>
	<dc:date>2026-04-14</dc:date>

	<prism:publicationName>Journal of Eye Movement Research</prism:publicationName>
	<prism:publicationDate>2026-04-14</prism:publicationDate>
	<prism:volume>19</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Systematic Review</prism:section>
	<prism:startingPage>39</prism:startingPage>
		<prism:doi>10.3390/jemr19020039</prism:doi>
	<prism:url>https://www.mdpi.com/1995-8692/19/2/39</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1995-8692/19/2/38">

	<title>JEMR, Vol. 19, Pages 38: Eye-Tracked Visual Attention to Anthropomorphic Appearance and Empathic Responses in AI Medical Conversational Agents: Dissociating Trust Gains from Attentional Synergy</title>
	<link>https://www.mdpi.com/1995-8692/19/2/38</link>
	<description>Understanding how users perceive and attend to the anthropomorphic appearance and empathic responses of artificial intelligence medical conversational agents (AIMCAs) can help reveal the key judgment cues underlying trust formation and use decisions, while also informing interface and dialog design. To this end, this study employs a 3 (appearance anthropomorphism: high, medium, low) &amp;amp;times; 2 (empathic response: present, absent) within-subject eye-tracking experiment, combined with subjective scales and brief post-task open-ended feedback. During a static prototype viewing task based on hypothetical consultation scenarios, we concurrently recorded trust, behavioral intention, and visual measures for key areas of interest (AOIs; appearance area, conversational content area, and overall interface area). Eye-tracking measures were normalized by AOI coverage proportion to improve cross-AOI comparability. The results show that both anthropomorphic appearance and empathic response significantly increased users&amp;amp;rsquo; trust in AIMCAs and their behavioral intention. An interaction between these two types of social cues was also observed, suggesting that when visual embodiment and linguistic style are aligned at the social level, users are more likely to form favorable overall judgments. At the level of visual processing, however, no interaction effect was found, and the eye-tracking measures showed only partial main effects, indicating that subjective synergy does not necessarily correspond to synergistic changes in attentional allocation. Overall, anthropomorphic appearance and empathic response exerted consistent facilitating effects on outcome variables, but displayed different patterns of attentional allocation and information prioritization at the visual level. Accordingly, AIMCA design should emphasize consistency between appearance cues and conversational strategies, optimize users&amp;amp;rsquo; initial judgments and interface comprehension, and use intention through verifiable information organization and clear boundary cues.</description>
	<pubDate>2026-04-09</pubDate>

	<content:encoded><![CDATA[
	<p><b>JEMR, Vol. 19, Pages 38: Eye-Tracked Visual Attention to Anthropomorphic Appearance and Empathic Responses in AI Medical Conversational Agents: Dissociating Trust Gains from Attentional Synergy</b></p>
	<p>Journal of Eye Movement Research <a href="https://www.mdpi.com/1995-8692/19/2/38">doi: 10.3390/jemr19020038</a></p>
	<p>Authors:
		Wumin Ouyang
		Hemin Du
		Yong Han
		Zihuan Wang
		Yuyu He
		</p>
	<p>Understanding how users perceive and attend to the anthropomorphic appearance and empathic responses of artificial intelligence medical conversational agents (AIMCAs) can help reveal the key judgment cues underlying trust formation and use decisions, while also informing interface and dialog design. To this end, this study employs a 3 (appearance anthropomorphism: high, medium, low) &amp;amp;times; 2 (empathic response: present, absent) within-subject eye-tracking experiment, combined with subjective scales and brief post-task open-ended feedback. During a static prototype viewing task based on hypothetical consultation scenarios, we concurrently recorded trust, behavioral intention, and visual measures for key areas of interest (AOIs; appearance area, conversational content area, and overall interface area). Eye-tracking measures were normalized by AOI coverage proportion to improve cross-AOI comparability. The results show that both anthropomorphic appearance and empathic response significantly increased users&amp;amp;rsquo; trust in AIMCAs and their behavioral intention. An interaction between these two types of social cues was also observed, suggesting that when visual embodiment and linguistic style are aligned at the social level, users are more likely to form favorable overall judgments. At the level of visual processing, however, no interaction effect was found, and the eye-tracking measures showed only partial main effects, indicating that subjective synergy does not necessarily correspond to synergistic changes in attentional allocation. Overall, anthropomorphic appearance and empathic response exerted consistent facilitating effects on outcome variables, but displayed different patterns of attentional allocation and information prioritization at the visual level. Accordingly, AIMCA design should emphasize consistency between appearance cues and conversational strategies, optimize users&amp;amp;rsquo; initial judgments and interface comprehension, and use intention through verifiable information organization and clear boundary cues.</p>
	]]></content:encoded>

	<dc:title>Eye-Tracked Visual Attention to Anthropomorphic Appearance and Empathic Responses in AI Medical Conversational Agents: Dissociating Trust Gains from Attentional Synergy</dc:title>
			<dc:creator>Wumin Ouyang</dc:creator>
			<dc:creator>Hemin Du</dc:creator>
			<dc:creator>Yong Han</dc:creator>
			<dc:creator>Zihuan Wang</dc:creator>
			<dc:creator>Yuyu He</dc:creator>
		<dc:identifier>doi: 10.3390/jemr19020038</dc:identifier>
	<dc:source>Journal of Eye Movement Research</dc:source>
	<dc:date>2026-04-09</dc:date>

	<prism:publicationName>Journal of Eye Movement Research</prism:publicationName>
	<prism:publicationDate>2026-04-09</prism:publicationDate>
	<prism:volume>19</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>38</prism:startingPage>
		<prism:doi>10.3390/jemr19020038</prism:doi>
	<prism:url>https://www.mdpi.com/1995-8692/19/2/38</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1995-8692/19/2/37">

	<title>JEMR, Vol. 19, Pages 37: Simplifying the Diagnosis of Vertical Diplopia: Is It Skew or Not?</title>
	<link>https://www.mdpi.com/1995-8692/19/2/37</link>
	<description>Ocular tilt reaction (OTR) and trochlear nerve palsy (TNP) can induce cyclotorsion. We aimed to assess the utility of fundus photography in distinguishing between these disorders. The database of a neuro-ophthalmology hospital-based clinic was retrospectively searched for patients referred for new-onset vertical diplopia between 2020 and 2023. Medical data were collected, and the angle between the optic disc and fovea was measured using ImageJ software to quantify torsion. Distinct torsional patterns were identified between the groups. OTR was characterized by variable, often conjugate torsion, whereas TNP demonstrated consistent disconjugate extorsion. Analysis of interocular torsional relationships, rather than magnitude alone, provided useful diagnostic discrimination. Fundus photography may be useful for differentiating OTR from TNP in complicated neurological cases, particularly in patients who are difficult to examine. This study emphasizes the practical clinical value of fundus photography as a simple, accessible, and objective tool for differentiating OTR from TNP, by contributing the torsional component of OTR triad, particularly in emergency or diagnostically challenging settings where standard examination may be limited.</description>
	<pubDate>2026-04-08</pubDate>

	<content:encoded><![CDATA[
	<p><b>JEMR, Vol. 19, Pages 37: Simplifying the Diagnosis of Vertical Diplopia: Is It Skew or Not?</b></p>
	<p>Journal of Eye Movement Research <a href="https://www.mdpi.com/1995-8692/19/2/37">doi: 10.3390/jemr19020037</a></p>
	<p>Authors:
		Anas Igbariye
		Noa Hadar
		Basel Obied
		Adi Berco
		Alon Zahavi
		Inbal Man Peles
		Nitza Goldenberg-Cohen
		</p>
	<p>Ocular tilt reaction (OTR) and trochlear nerve palsy (TNP) can induce cyclotorsion. We aimed to assess the utility of fundus photography in distinguishing between these disorders. The database of a neuro-ophthalmology hospital-based clinic was retrospectively searched for patients referred for new-onset vertical diplopia between 2020 and 2023. Medical data were collected, and the angle between the optic disc and fovea was measured using ImageJ software to quantify torsion. Distinct torsional patterns were identified between the groups. OTR was characterized by variable, often conjugate torsion, whereas TNP demonstrated consistent disconjugate extorsion. Analysis of interocular torsional relationships, rather than magnitude alone, provided useful diagnostic discrimination. Fundus photography may be useful for differentiating OTR from TNP in complicated neurological cases, particularly in patients who are difficult to examine. This study emphasizes the practical clinical value of fundus photography as a simple, accessible, and objective tool for differentiating OTR from TNP, by contributing the torsional component of OTR triad, particularly in emergency or diagnostically challenging settings where standard examination may be limited.</p>
	]]></content:encoded>

	<dc:title>Simplifying the Diagnosis of Vertical Diplopia: Is It Skew or Not?</dc:title>
			<dc:creator>Anas Igbariye</dc:creator>
			<dc:creator>Noa Hadar</dc:creator>
			<dc:creator>Basel Obied</dc:creator>
			<dc:creator>Adi Berco</dc:creator>
			<dc:creator>Alon Zahavi</dc:creator>
			<dc:creator>Inbal Man Peles</dc:creator>
			<dc:creator>Nitza Goldenberg-Cohen</dc:creator>
		<dc:identifier>doi: 10.3390/jemr19020037</dc:identifier>
	<dc:source>Journal of Eye Movement Research</dc:source>
	<dc:date>2026-04-08</dc:date>

	<prism:publicationName>Journal of Eye Movement Research</prism:publicationName>
	<prism:publicationDate>2026-04-08</prism:publicationDate>
	<prism:volume>19</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>37</prism:startingPage>
		<prism:doi>10.3390/jemr19020037</prism:doi>
	<prism:url>https://www.mdpi.com/1995-8692/19/2/37</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1995-8692/19/2/36">

	<title>JEMR, Vol. 19, Pages 36: An Exploratory Study of the Relationship Between Phoria, Oculomotor Skills and Visual Symptoms in Children Aged 5 to 8 Years</title>
	<link>https://www.mdpi.com/1995-8692/19/2/36</link>
	<description>Purpose: To investigate the relationship between oculomotor skills, phorias, and visual symptoms in pediatric population aged 5 to 8 years. Methods: A cross-sectional study was conducted with 120 children, divided into three age groups. Each participant underwent a full optometric examination, including the Maddox test for dissociated phoria, and the Northeastern State University College of Optometry (NSUCO) and Developmental Eye Movement (DEM) tests for oculomotor function. In addition, the Convergence Insufficiency Symptom Survey (CISS V-15) questionnaire was administered to assess visual symptoms. Results: The prevalence of binocular and oculomotor dysfunctions varied by age and sex. Differences in saccadic and pursuit eye movement performance were observed between groups. Older children showed patterns of association between phoria measurements, oculomotor performance, and possible visual symptoms, particularly in girls over 6 years of age. Conclusions: This study provides additional descriptive data for the pediatric population and highlights that oculomotor dysfunction and phoria frequently coexist. Symptom scores measured by the CISS V-15 tended to increase with age. The results should be considered preliminary and potentially hypothesis-generating, pending the future availability of a validated questionnaire to assess phoria-related symptoms in children from 5 years of age. Overall, this study underscores the importance of comprehensive binocular vision assessments in school-aged children.</description>
	<pubDate>2026-04-02</pubDate>

	<content:encoded><![CDATA[
	<p><b>JEMR, Vol. 19, Pages 36: An Exploratory Study of the Relationship Between Phoria, Oculomotor Skills and Visual Symptoms in Children Aged 5 to 8 Years</b></p>
	<p>Journal of Eye Movement Research <a href="https://www.mdpi.com/1995-8692/19/2/36">doi: 10.3390/jemr19020036</a></p>
	<p>Authors:
		Carmen Bilbao
		Julia Cavero
		Jorge Ares
		Alba Carrera
		Diana Gargallo
		</p>
	<p>Purpose: To investigate the relationship between oculomotor skills, phorias, and visual symptoms in pediatric population aged 5 to 8 years. Methods: A cross-sectional study was conducted with 120 children, divided into three age groups. Each participant underwent a full optometric examination, including the Maddox test for dissociated phoria, and the Northeastern State University College of Optometry (NSUCO) and Developmental Eye Movement (DEM) tests for oculomotor function. In addition, the Convergence Insufficiency Symptom Survey (CISS V-15) questionnaire was administered to assess visual symptoms. Results: The prevalence of binocular and oculomotor dysfunctions varied by age and sex. Differences in saccadic and pursuit eye movement performance were observed between groups. Older children showed patterns of association between phoria measurements, oculomotor performance, and possible visual symptoms, particularly in girls over 6 years of age. Conclusions: This study provides additional descriptive data for the pediatric population and highlights that oculomotor dysfunction and phoria frequently coexist. Symptom scores measured by the CISS V-15 tended to increase with age. The results should be considered preliminary and potentially hypothesis-generating, pending the future availability of a validated questionnaire to assess phoria-related symptoms in children from 5 years of age. Overall, this study underscores the importance of comprehensive binocular vision assessments in school-aged children.</p>
	]]></content:encoded>

	<dc:title>An Exploratory Study of the Relationship Between Phoria, Oculomotor Skills and Visual Symptoms in Children Aged 5 to 8 Years</dc:title>
			<dc:creator>Carmen Bilbao</dc:creator>
			<dc:creator>Julia Cavero</dc:creator>
			<dc:creator>Jorge Ares</dc:creator>
			<dc:creator>Alba Carrera</dc:creator>
			<dc:creator>Diana Gargallo</dc:creator>
		<dc:identifier>doi: 10.3390/jemr19020036</dc:identifier>
	<dc:source>Journal of Eye Movement Research</dc:source>
	<dc:date>2026-04-02</dc:date>

	<prism:publicationName>Journal of Eye Movement Research</prism:publicationName>
	<prism:publicationDate>2026-04-02</prism:publicationDate>
	<prism:volume>19</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>36</prism:startingPage>
		<prism:doi>10.3390/jemr19020036</prism:doi>
	<prism:url>https://www.mdpi.com/1995-8692/19/2/36</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1995-8692/19/2/35">

	<title>JEMR, Vol. 19, Pages 35: Aging Reduces the Efficiency of Parafoveal Lexical Activation During Chinese Sentence Reading</title>
	<link>https://www.mdpi.com/1995-8692/19/2/35</link>
	<description>This study utilized the gaze-contingent boundary paradigm to examine age-related changes in parafoveal processing during Chinese sentence reading. A community sample of 65 older readers and 68 younger readers from Hong Kong read 130 sentences while their eye movements were recorded. In each sentence, an invisible boundary was placed just before a critical target word. Before the readers&amp;amp;rsquo; eye gaze crossed the boundary, a parafoveal preview was presented in the position of the target word. The preview could be identical, orthographically related, phonologically related, semantically related, or unrelated to the first character of the target word. Once the eye gaze passed the boundary, the preview characters changed to the target. For the younger readers, the related parafoveal previews facilitated the subsequent foveal processing of the target compared to the unrelated previews across early and late eye movement measures. In contrast, the older readers demonstrated a reduced identical preview benefit in early eye movement measures. They also showed benefits in other preview conditions only in later measures. These results suggest that older Chinese readers can extract linguistic information from parafoveal vision despite reduced visual acuity. However, the efficiency of parafoveal processing is reduced, potentially due to slower processing speed and less efficient spreading activation within the lexical network.</description>
	<pubDate>2026-04-01</pubDate>

	<content:encoded><![CDATA[
	<p><b>JEMR, Vol. 19, Pages 35: Aging Reduces the Efficiency of Parafoveal Lexical Activation During Chinese Sentence Reading</b></p>
	<p>Journal of Eye Movement Research <a href="https://www.mdpi.com/1995-8692/19/2/35">doi: 10.3390/jemr19020035</a></p>
	<p>Authors:
		Yiu-Kei Tsang
		Ming Yan
		Jinger Pan
		</p>
	<p>This study utilized the gaze-contingent boundary paradigm to examine age-related changes in parafoveal processing during Chinese sentence reading. A community sample of 65 older readers and 68 younger readers from Hong Kong read 130 sentences while their eye movements were recorded. In each sentence, an invisible boundary was placed just before a critical target word. Before the readers&amp;amp;rsquo; eye gaze crossed the boundary, a parafoveal preview was presented in the position of the target word. The preview could be identical, orthographically related, phonologically related, semantically related, or unrelated to the first character of the target word. Once the eye gaze passed the boundary, the preview characters changed to the target. For the younger readers, the related parafoveal previews facilitated the subsequent foveal processing of the target compared to the unrelated previews across early and late eye movement measures. In contrast, the older readers demonstrated a reduced identical preview benefit in early eye movement measures. They also showed benefits in other preview conditions only in later measures. These results suggest that older Chinese readers can extract linguistic information from parafoveal vision despite reduced visual acuity. However, the efficiency of parafoveal processing is reduced, potentially due to slower processing speed and less efficient spreading activation within the lexical network.</p>
	]]></content:encoded>

	<dc:title>Aging Reduces the Efficiency of Parafoveal Lexical Activation During Chinese Sentence Reading</dc:title>
			<dc:creator>Yiu-Kei Tsang</dc:creator>
			<dc:creator>Ming Yan</dc:creator>
			<dc:creator>Jinger Pan</dc:creator>
		<dc:identifier>doi: 10.3390/jemr19020035</dc:identifier>
	<dc:source>Journal of Eye Movement Research</dc:source>
	<dc:date>2026-04-01</dc:date>

	<prism:publicationName>Journal of Eye Movement Research</prism:publicationName>
	<prism:publicationDate>2026-04-01</prism:publicationDate>
	<prism:volume>19</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>35</prism:startingPage>
		<prism:doi>10.3390/jemr19020035</prism:doi>
	<prism:url>https://www.mdpi.com/1995-8692/19/2/35</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1995-8692/19/2/34">

	<title>JEMR, Vol. 19, Pages 34: The Impact of Emotion Perception and Gaze Sharing on Collaborative Experience and Performance in Multiplayer Games</title>
	<link>https://www.mdpi.com/1995-8692/19/2/34</link>
	<description>Compared to traditional offline collaboration, current online collaboration often lacks nonverbal social cues, resulting in lower efficiency and a reduced emotional connection between teammates. To address this issue, this study used a two-player collaborative puzzle game as the experimental setting to explore the impact of two nonverbal social cues, emotion and gaze, on collaborative experience and performance. Specifically, this study designed four collaborative modes: with and without teammates&amp;amp;rsquo; facial expressions, and with and without teammates&amp;amp;rsquo; gaze points. Sixty-two participants took part in the experiment, and each pair was required to complete these four patterns. Subsequently, we analyzed their collaborative experience through subjective questionnaires, objective facial expressions, and gaze overlap rates. The experimental results revealed that teammates&amp;amp;rsquo; gaze could effectively enhance collaborative efficiency, while facial expression is key to optimizing subjective experience. Combining both cues further acquires advantages in cognitive and emotional dimensions, leading to improved performance outcomes. The study also indicated that facial expressions could alleviate the social pressure triggered by shared gaze from teammates. Additionally, the study also examined how personality differences influenced collaborative experiences and performance. The results indicated that individuals with high agreeableness actively seek social cues, leading to more positive collaborative experiences. This study provides empirical evidence for understanding the interactive mechanisms of cognitive and emotional processes during online collaboration, and points the way toward designing adaptive, personalized intelligent collaborative systems.</description>
	<pubDate>2026-03-25</pubDate>

	<content:encoded><![CDATA[
	<p><b>JEMR, Vol. 19, Pages 34: The Impact of Emotion Perception and Gaze Sharing on Collaborative Experience and Performance in Multiplayer Games</b></p>
	<p>Journal of Eye Movement Research <a href="https://www.mdpi.com/1995-8692/19/2/34">doi: 10.3390/jemr19020034</a></p>
	<p>Authors:
		Lu Yin
		He Zhang
		Renke He
		</p>
	<p>Compared to traditional offline collaboration, current online collaboration often lacks nonverbal social cues, resulting in lower efficiency and a reduced emotional connection between teammates. To address this issue, this study used a two-player collaborative puzzle game as the experimental setting to explore the impact of two nonverbal social cues, emotion and gaze, on collaborative experience and performance. Specifically, this study designed four collaborative modes: with and without teammates&amp;amp;rsquo; facial expressions, and with and without teammates&amp;amp;rsquo; gaze points. Sixty-two participants took part in the experiment, and each pair was required to complete these four patterns. Subsequently, we analyzed their collaborative experience through subjective questionnaires, objective facial expressions, and gaze overlap rates. The experimental results revealed that teammates&amp;amp;rsquo; gaze could effectively enhance collaborative efficiency, while facial expression is key to optimizing subjective experience. Combining both cues further acquires advantages in cognitive and emotional dimensions, leading to improved performance outcomes. The study also indicated that facial expressions could alleviate the social pressure triggered by shared gaze from teammates. Additionally, the study also examined how personality differences influenced collaborative experiences and performance. The results indicated that individuals with high agreeableness actively seek social cues, leading to more positive collaborative experiences. This study provides empirical evidence for understanding the interactive mechanisms of cognitive and emotional processes during online collaboration, and points the way toward designing adaptive, personalized intelligent collaborative systems.</p>
	]]></content:encoded>

	<dc:title>The Impact of Emotion Perception and Gaze Sharing on Collaborative Experience and Performance in Multiplayer Games</dc:title>
			<dc:creator>Lu Yin</dc:creator>
			<dc:creator>He Zhang</dc:creator>
			<dc:creator>Renke He</dc:creator>
		<dc:identifier>doi: 10.3390/jemr19020034</dc:identifier>
	<dc:source>Journal of Eye Movement Research</dc:source>
	<dc:date>2026-03-25</dc:date>

	<prism:publicationName>Journal of Eye Movement Research</prism:publicationName>
	<prism:publicationDate>2026-03-25</prism:publicationDate>
	<prism:volume>19</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>34</prism:startingPage>
		<prism:doi>10.3390/jemr19020034</prism:doi>
	<prism:url>https://www.mdpi.com/1995-8692/19/2/34</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1995-8692/19/2/33">

	<title>JEMR, Vol. 19, Pages 33: A Data-Driven Approach for Comparing Gaze Allocation Across Conditions</title>
	<link>https://www.mdpi.com/1995-8692/19/2/33</link>
	<description>Gaze analysis often relies on hypothesised, subjectively defined regions of interest (ROIs) or heatmaps: ROIs enable condition comparisons but reduce objectivity and exploration; while heatmaps avoid this, they require many pixel-wise comparisons, making differences hard to detect. Here, we propose an advanced data-driven approach for analysing gaze behaviour. We use DNNs (adapted versions of AlexNet) to classify conditions from gaze patterns, paired with reverse correlation to show where and how gaze differs between conditions. We test our approach on data from an experiment investigating the effects of object-specific sounds (e.g., church bell ringing) on gaze allocation. ROI-based analysis shows a significant difference between conditions (congruent sound, no sound, phase-scrambled sound and pink noise), with more gaze allocation on sound-associated objects in the congruent sound condition. However, as expected, significance depends on the definition of the ROIs. Heatmaps show some unclear qualitative differences, but none are significant after correcting for pixelwise comparisons. We showed that, for some scenes, the DNNs could classify the task based on individual fixations with accuracy significantly higher than chance. Our approach shows that sound can alter gaze allocation, revealing task-specific, non-trivial strategies: fixations are not always drawn to the sound source but shift away from salient features, sometimes falling between salient features and the sound source. Crucially, such fixation strategies could not be revealed using a traditional hypothesis-driven approach. Overall, the method is objective, data-driven, and enables clear comparisons of conditions.</description>
	<pubDate>2026-03-18</pubDate>

	<content:encoded><![CDATA[
	<p><b>JEMR, Vol. 19, Pages 33: A Data-Driven Approach for Comparing Gaze Allocation Across Conditions</b></p>
	<p>Journal of Eye Movement Research <a href="https://www.mdpi.com/1995-8692/19/2/33">doi: 10.3390/jemr19020033</a></p>
	<p>Authors:
		Jack Prosser
		Anna Metzger
		Matteo Toscani
		</p>
	<p>Gaze analysis often relies on hypothesised, subjectively defined regions of interest (ROIs) or heatmaps: ROIs enable condition comparisons but reduce objectivity and exploration; while heatmaps avoid this, they require many pixel-wise comparisons, making differences hard to detect. Here, we propose an advanced data-driven approach for analysing gaze behaviour. We use DNNs (adapted versions of AlexNet) to classify conditions from gaze patterns, paired with reverse correlation to show where and how gaze differs between conditions. We test our approach on data from an experiment investigating the effects of object-specific sounds (e.g., church bell ringing) on gaze allocation. ROI-based analysis shows a significant difference between conditions (congruent sound, no sound, phase-scrambled sound and pink noise), with more gaze allocation on sound-associated objects in the congruent sound condition. However, as expected, significance depends on the definition of the ROIs. Heatmaps show some unclear qualitative differences, but none are significant after correcting for pixelwise comparisons. We showed that, for some scenes, the DNNs could classify the task based on individual fixations with accuracy significantly higher than chance. Our approach shows that sound can alter gaze allocation, revealing task-specific, non-trivial strategies: fixations are not always drawn to the sound source but shift away from salient features, sometimes falling between salient features and the sound source. Crucially, such fixation strategies could not be revealed using a traditional hypothesis-driven approach. Overall, the method is objective, data-driven, and enables clear comparisons of conditions.</p>
	]]></content:encoded>

	<dc:title>A Data-Driven Approach for Comparing Gaze Allocation Across Conditions</dc:title>
			<dc:creator>Jack Prosser</dc:creator>
			<dc:creator>Anna Metzger</dc:creator>
			<dc:creator>Matteo Toscani</dc:creator>
		<dc:identifier>doi: 10.3390/jemr19020033</dc:identifier>
	<dc:source>Journal of Eye Movement Research</dc:source>
	<dc:date>2026-03-18</dc:date>

	<prism:publicationName>Journal of Eye Movement Research</prism:publicationName>
	<prism:publicationDate>2026-03-18</prism:publicationDate>
	<prism:volume>19</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>33</prism:startingPage>
		<prism:doi>10.3390/jemr19020033</prism:doi>
	<prism:url>https://www.mdpi.com/1995-8692/19/2/33</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1995-8692/19/2/32">

	<title>JEMR, Vol. 19, Pages 32: Visual Attention in Real Classrooms: A Study with Eye-Tracking in Urban and Rural Schools of Chile</title>
	<link>https://www.mdpi.com/1995-8692/19/2/32</link>
	<description>Student gaze behavior has been scarcely studied in real Latin American primary school classrooms. The objective of this study is to analyze the relationship between primary students&amp;amp;rsquo; eye behavior and cognitive development in urban and rural contexts. A quantitative method was employed, including 126 primary school students aged 6 to 8 years old, from urban and rural schools in Chile. Raven&amp;amp;rsquo;s Colored Progressive Matrices (CPM) measured cognitive development, and students&amp;amp;rsquo; eye behavior was recorded during a real class using eye-tracking glasses. Eye behavior was analyzed in six areas of interest: (1) Own material (2) teacher, (3) teacher&amp;amp;rsquo;s material, (4) peer, (5) peer&amp;amp;rsquo;s material, and (6) non-interactional gaze. The results indicate that the CPM scale demonstrates adequate reliability (&amp;amp;alpha; = 0.89). In addition, no significant differences, nor relationship between eye behavior and cognitive development, were found by sex; however, significant differences were found by environment (urban versus rural). The regression analysis is significant (F(7, 102) = 6.173, p &amp;amp;lt; 0.001) and suggests that gazing at the teacher&amp;amp;rsquo;s material and one&amp;amp;rsquo;s own material are negative predictors of non-interactional gaze or students&amp;amp;rsquo; disconnection from the class. In conclusion, distraction in the classroom is influenced by learning-related contextual variables rather than sex or cognitive development.</description>
	<pubDate>2026-03-18</pubDate>

	<content:encoded><![CDATA[
	<p><b>JEMR, Vol. 19, Pages 32: Visual Attention in Real Classrooms: A Study with Eye-Tracking in Urban and Rural Schools of Chile</b></p>
	<p>Journal of Eye Movement Research <a href="https://www.mdpi.com/1995-8692/19/2/32">doi: 10.3390/jemr19020032</a></p>
	<p>Authors:
		Marco Villalta-Paucar
		Jéssica Verónica Rebolledo-Etchepare
		</p>
	<p>Student gaze behavior has been scarcely studied in real Latin American primary school classrooms. The objective of this study is to analyze the relationship between primary students&amp;amp;rsquo; eye behavior and cognitive development in urban and rural contexts. A quantitative method was employed, including 126 primary school students aged 6 to 8 years old, from urban and rural schools in Chile. Raven&amp;amp;rsquo;s Colored Progressive Matrices (CPM) measured cognitive development, and students&amp;amp;rsquo; eye behavior was recorded during a real class using eye-tracking glasses. Eye behavior was analyzed in six areas of interest: (1) Own material (2) teacher, (3) teacher&amp;amp;rsquo;s material, (4) peer, (5) peer&amp;amp;rsquo;s material, and (6) non-interactional gaze. The results indicate that the CPM scale demonstrates adequate reliability (&amp;amp;alpha; = 0.89). In addition, no significant differences, nor relationship between eye behavior and cognitive development, were found by sex; however, significant differences were found by environment (urban versus rural). The regression analysis is significant (F(7, 102) = 6.173, p &amp;amp;lt; 0.001) and suggests that gazing at the teacher&amp;amp;rsquo;s material and one&amp;amp;rsquo;s own material are negative predictors of non-interactional gaze or students&amp;amp;rsquo; disconnection from the class. In conclusion, distraction in the classroom is influenced by learning-related contextual variables rather than sex or cognitive development.</p>
	]]></content:encoded>

	<dc:title>Visual Attention in Real Classrooms: A Study with Eye-Tracking in Urban and Rural Schools of Chile</dc:title>
			<dc:creator>Marco Villalta-Paucar</dc:creator>
			<dc:creator>Jéssica Verónica Rebolledo-Etchepare</dc:creator>
		<dc:identifier>doi: 10.3390/jemr19020032</dc:identifier>
	<dc:source>Journal of Eye Movement Research</dc:source>
	<dc:date>2026-03-18</dc:date>

	<prism:publicationName>Journal of Eye Movement Research</prism:publicationName>
	<prism:publicationDate>2026-03-18</prism:publicationDate>
	<prism:volume>19</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>32</prism:startingPage>
		<prism:doi>10.3390/jemr19020032</prism:doi>
	<prism:url>https://www.mdpi.com/1995-8692/19/2/32</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1995-8692/19/2/31">

	<title>JEMR, Vol. 19, Pages 31: Towards Rigorous Eye-Tracking Methodology in Interdisciplinary Fields: Insights from and Recommendations for Tourism Research</title>
	<link>https://www.mdpi.com/1995-8692/19/2/31</link>
	<description>Eye-tracking methodology represents a young but rapidly growing approach in tourism research, offering a direct window into the cognitive processes driving tourism stakeholders&amp;amp;rsquo; behaviour. However, a critical gap remains between the rapid adoption of this tool and the methodological rigour required to interpret its neurophysiological data. This critical review synthesizes 23 empirical studies (2020&amp;amp;ndash;2025) from the destination marketing and branding domain to diagnose eye-tracking&amp;amp;rsquo;s state-of-the-art application. Adopting the SALSA framework (Search, Appraisal, Synthesis, Analysis) augmented by PRISMA 2020 guidelines, this study systematically searched Web of Science and Scopus databases. Studies were appraised using an eight-dimensional quality rubric, assessing from theoretical grounding to experimental design to statistical rigour. Findings revealed a &amp;amp;ldquo;tool-first&amp;amp;rdquo; exploratory phenomenon, where the majority of studies relied on basic fixation metrics to infer complex psychological states such as &amp;amp;ldquo;interest&amp;amp;rdquo;, when they could imply other cognitive states. Furthermore, most reviewed studies failed to control for stimulus-level confounds (e.g., luminance, AOI size) and utilized inappropriate data-handling procedures and methods, such as the absence of data cleaning and treating count and binary data as continuous data. These, coupled with transparency deficits, undermined the validity of their conclusions. Hence, a Checklist for Eye-Tracking Rigour (CETR) and a methodological decision tree were developed to guide researchers towards confirmatory and neurobiologically grounded research. Findings also provided a framework for managers/practitioners to more accurately interpret eye-tracking studies.</description>
	<pubDate>2026-03-12</pubDate>

	<content:encoded><![CDATA[
	<p><b>JEMR, Vol. 19, Pages 31: Towards Rigorous Eye-Tracking Methodology in Interdisciplinary Fields: Insights from and Recommendations for Tourism Research</b></p>
	<p>Journal of Eye Movement Research <a href="https://www.mdpi.com/1995-8692/19/2/31">doi: 10.3390/jemr19020031</a></p>
	<p>Authors:
		Wilson Cheong Hin Hong
		</p>
	<p>Eye-tracking methodology represents a young but rapidly growing approach in tourism research, offering a direct window into the cognitive processes driving tourism stakeholders&amp;amp;rsquo; behaviour. However, a critical gap remains between the rapid adoption of this tool and the methodological rigour required to interpret its neurophysiological data. This critical review synthesizes 23 empirical studies (2020&amp;amp;ndash;2025) from the destination marketing and branding domain to diagnose eye-tracking&amp;amp;rsquo;s state-of-the-art application. Adopting the SALSA framework (Search, Appraisal, Synthesis, Analysis) augmented by PRISMA 2020 guidelines, this study systematically searched Web of Science and Scopus databases. Studies were appraised using an eight-dimensional quality rubric, assessing from theoretical grounding to experimental design to statistical rigour. Findings revealed a &amp;amp;ldquo;tool-first&amp;amp;rdquo; exploratory phenomenon, where the majority of studies relied on basic fixation metrics to infer complex psychological states such as &amp;amp;ldquo;interest&amp;amp;rdquo;, when they could imply other cognitive states. Furthermore, most reviewed studies failed to control for stimulus-level confounds (e.g., luminance, AOI size) and utilized inappropriate data-handling procedures and methods, such as the absence of data cleaning and treating count and binary data as continuous data. These, coupled with transparency deficits, undermined the validity of their conclusions. Hence, a Checklist for Eye-Tracking Rigour (CETR) and a methodological decision tree were developed to guide researchers towards confirmatory and neurobiologically grounded research. Findings also provided a framework for managers/practitioners to more accurately interpret eye-tracking studies.</p>
	]]></content:encoded>

	<dc:title>Towards Rigorous Eye-Tracking Methodology in Interdisciplinary Fields: Insights from and Recommendations for Tourism Research</dc:title>
			<dc:creator>Wilson Cheong Hin Hong</dc:creator>
		<dc:identifier>doi: 10.3390/jemr19020031</dc:identifier>
	<dc:source>Journal of Eye Movement Research</dc:source>
	<dc:date>2026-03-12</dc:date>

	<prism:publicationName>Journal of Eye Movement Research</prism:publicationName>
	<prism:publicationDate>2026-03-12</prism:publicationDate>
	<prism:volume>19</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Review</prism:section>
	<prism:startingPage>31</prism:startingPage>
		<prism:doi>10.3390/jemr19020031</prism:doi>
	<prism:url>https://www.mdpi.com/1995-8692/19/2/31</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1995-8692/19/2/30">

	<title>JEMR, Vol. 19, Pages 30: Seeing Through Packaging: Eye-Tracking Evidence on How Product Visual Strategy and Unit Size Shape Visual Attention and Consumer Evaluation</title>
	<link>https://www.mdpi.com/1995-8692/19/2/30</link>
	<description>Product visual strategies (PVS) on food packaging influence how consumers visually inspect products at the point of purchase. However, evidence comparing transparent windows and product images remains mixed, particularly regarding how these strategies interact with food unit size (FUS) and shape visual attention patterns. Moreover, few studies have examined these effects using objective eye-tracking measures within controlled experimental designs. This study employed a 2 &amp;amp;times; 2 between-subjects quasi-experiment to investigate the effects of PVS (transparent window and product image) and FUS (large unit and small unit) on visual attention and subsequent product-related evaluations. A total of 160 participants viewed realistic chocolate package stimuli that varied only in visual strategy and unit size. Eye movements were recorded using Tobii Pro Glasses 2. Visual attention was assessed through Time to First Fixation (TFF) and Fixation Duration (FD), while expected tastiness, expected quality, and purchase intention were measured using standardized self-report scales. The results showed that transparent-window packaging attracted visual attention more rapidly and sustained longer fixations than product-image packaging. These attention differences were accompanied by higher expected tastiness, expected quality, and purchase intention. While food unit size alone showed limited effects on eye-movement measures, a significant interaction was observed: small-unit designs elicited greater visual attention and more favorable evaluations only when the product was directly visible through a transparent window. Overall, the findings demonstrate how product visual strategies and food unit size jointly shape visual attention allocation during packaging inspection. By integrating eye-tracking measures with evaluation and behavioral intention outcomes, this study contributes to applied eye-movement research in food packaging contexts.</description>
	<pubDate>2026-03-10</pubDate>

	<content:encoded><![CDATA[
	<p><b>JEMR, Vol. 19, Pages 30: Seeing Through Packaging: Eye-Tracking Evidence on How Product Visual Strategy and Unit Size Shape Visual Attention and Consumer Evaluation</b></p>
	<p>Journal of Eye Movement Research <a href="https://www.mdpi.com/1995-8692/19/2/30">doi: 10.3390/jemr19020030</a></p>
	<p>Authors:
		Zhiyi Guo
		Zihao Cao
		Yongchun Mao
		Muhizam Mustafa
		Yuqi Luo
		Yueyue Ning
		</p>
	<p>Product visual strategies (PVS) on food packaging influence how consumers visually inspect products at the point of purchase. However, evidence comparing transparent windows and product images remains mixed, particularly regarding how these strategies interact with food unit size (FUS) and shape visual attention patterns. Moreover, few studies have examined these effects using objective eye-tracking measures within controlled experimental designs. This study employed a 2 &amp;amp;times; 2 between-subjects quasi-experiment to investigate the effects of PVS (transparent window and product image) and FUS (large unit and small unit) on visual attention and subsequent product-related evaluations. A total of 160 participants viewed realistic chocolate package stimuli that varied only in visual strategy and unit size. Eye movements were recorded using Tobii Pro Glasses 2. Visual attention was assessed through Time to First Fixation (TFF) and Fixation Duration (FD), while expected tastiness, expected quality, and purchase intention were measured using standardized self-report scales. The results showed that transparent-window packaging attracted visual attention more rapidly and sustained longer fixations than product-image packaging. These attention differences were accompanied by higher expected tastiness, expected quality, and purchase intention. While food unit size alone showed limited effects on eye-movement measures, a significant interaction was observed: small-unit designs elicited greater visual attention and more favorable evaluations only when the product was directly visible through a transparent window. Overall, the findings demonstrate how product visual strategies and food unit size jointly shape visual attention allocation during packaging inspection. By integrating eye-tracking measures with evaluation and behavioral intention outcomes, this study contributes to applied eye-movement research in food packaging contexts.</p>
	]]></content:encoded>

	<dc:title>Seeing Through Packaging: Eye-Tracking Evidence on How Product Visual Strategy and Unit Size Shape Visual Attention and Consumer Evaluation</dc:title>
			<dc:creator>Zhiyi Guo</dc:creator>
			<dc:creator>Zihao Cao</dc:creator>
			<dc:creator>Yongchun Mao</dc:creator>
			<dc:creator>Muhizam Mustafa</dc:creator>
			<dc:creator>Yuqi Luo</dc:creator>
			<dc:creator>Yueyue Ning</dc:creator>
		<dc:identifier>doi: 10.3390/jemr19020030</dc:identifier>
	<dc:source>Journal of Eye Movement Research</dc:source>
	<dc:date>2026-03-10</dc:date>

	<prism:publicationName>Journal of Eye Movement Research</prism:publicationName>
	<prism:publicationDate>2026-03-10</prism:publicationDate>
	<prism:volume>19</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>30</prism:startingPage>
		<prism:doi>10.3390/jemr19020030</prism:doi>
	<prism:url>https://www.mdpi.com/1995-8692/19/2/30</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1995-8692/19/2/29">

	<title>JEMR, Vol. 19, Pages 29: Simultaneous Analysis of Microsaccades and Pupil Size Variations in Age-Related Cognitive Impairment Using Eye-Tracking Technology</title>
	<link>https://www.mdpi.com/1995-8692/19/2/29</link>
	<description>Age-related cognitive impairment represents a critical stage in the continuum of neurodegenerative disorders, including Alzheimer&amp;amp;rsquo;s disease (AD), highlighting the need for objective and non-invasive physiological indicators of early neurological change. This study investigates the simultaneous analysis of microsaccadic eye movements and pupil size variations as ocular biomarkers associated with age-related cognitive impairment using eye-tracking technology. A total of 70 participants were recruited and categorized into three age groups: individuals in their 20s, 60s, and 70s. Participants in their 70s were further categorized based on MMSE-K scores into cognitively normal (&amp;amp;ge;24) and impaired (&amp;amp;le;23) subgroups. Quantitative analyses showed a significant age-related increase in microsaccade frequency along both axes, with significantly higher microsaccade frequencies (p &amp;amp;lt; 0.01) among individuals with lower cognitive scores within the same age group. Pupil size variation, including constriction and dilation rates, declined with age, while response speed remained relatively unchanged across all age groups. These findings highlight a clear association between age related-cognitive decline and involuntary ocular responses. The proposed dual-biomarker method offers a non-invasive and quantitative framework that may complement traditional cognitive screening tools. Future studies involving larger cohorts and clinically diagnosed AD populations are required to determine the diagnostic utility of these ocular biomarkers.</description>
	<pubDate>2026-03-05</pubDate>

	<content:encoded><![CDATA[
	<p><b>JEMR, Vol. 19, Pages 29: Simultaneous Analysis of Microsaccades and Pupil Size Variations in Age-Related Cognitive Impairment Using Eye-Tracking Technology</b></p>
	<p>Journal of Eye Movement Research <a href="https://www.mdpi.com/1995-8692/19/2/29">doi: 10.3390/jemr19020029</a></p>
	<p>Authors:
		Seokjun Oh
		Tahsin Nairuz
		Sung-Jun Park
		Jong-Ha Lee
		</p>
	<p>Age-related cognitive impairment represents a critical stage in the continuum of neurodegenerative disorders, including Alzheimer&amp;amp;rsquo;s disease (AD), highlighting the need for objective and non-invasive physiological indicators of early neurological change. This study investigates the simultaneous analysis of microsaccadic eye movements and pupil size variations as ocular biomarkers associated with age-related cognitive impairment using eye-tracking technology. A total of 70 participants were recruited and categorized into three age groups: individuals in their 20s, 60s, and 70s. Participants in their 70s were further categorized based on MMSE-K scores into cognitively normal (&amp;amp;ge;24) and impaired (&amp;amp;le;23) subgroups. Quantitative analyses showed a significant age-related increase in microsaccade frequency along both axes, with significantly higher microsaccade frequencies (p &amp;amp;lt; 0.01) among individuals with lower cognitive scores within the same age group. Pupil size variation, including constriction and dilation rates, declined with age, while response speed remained relatively unchanged across all age groups. These findings highlight a clear association between age related-cognitive decline and involuntary ocular responses. The proposed dual-biomarker method offers a non-invasive and quantitative framework that may complement traditional cognitive screening tools. Future studies involving larger cohorts and clinically diagnosed AD populations are required to determine the diagnostic utility of these ocular biomarkers.</p>
	]]></content:encoded>

	<dc:title>Simultaneous Analysis of Microsaccades and Pupil Size Variations in Age-Related Cognitive Impairment Using Eye-Tracking Technology</dc:title>
			<dc:creator>Seokjun Oh</dc:creator>
			<dc:creator>Tahsin Nairuz</dc:creator>
			<dc:creator>Sung-Jun Park</dc:creator>
			<dc:creator>Jong-Ha Lee</dc:creator>
		<dc:identifier>doi: 10.3390/jemr19020029</dc:identifier>
	<dc:source>Journal of Eye Movement Research</dc:source>
	<dc:date>2026-03-05</dc:date>

	<prism:publicationName>Journal of Eye Movement Research</prism:publicationName>
	<prism:publicationDate>2026-03-05</prism:publicationDate>
	<prism:volume>19</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>29</prism:startingPage>
		<prism:doi>10.3390/jemr19020029</prism:doi>
	<prism:url>https://www.mdpi.com/1995-8692/19/2/29</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1995-8692/19/2/28">

	<title>JEMR, Vol. 19, Pages 28: Comparing Eye-Tracking Metrics with the Driver Activity Load Index</title>
	<link>https://www.mdpi.com/1995-8692/19/2/28</link>
	<description>This study investigated how perceptual workload in driving situations is captured by subjective ratings versus eye-tracking metrics. Fifty participants completed low- and high-complexity conditions while fixation behavior, blinks, and pupil diameter were recorded, and workload was assessed using the DALI scale. High-load scenes elicited longer fixations, fewer fixations per minute, reduced blinking, and increased pupil dilation, indicating elevated attentional demand. DALI scores increased with scene complexity and were negatively associated with fixation duration, demonstrating that participants&amp;amp;rsquo; subjective ratings were driven primarily by perceptual strain rather than cognitive effort. Eye-tracking patterns supported this interpretation: fixation-based indicators tent to reflect the cognitive component of demand, whereas DALI selectively tracked perceptual overload. Together, these results show that DALI is highly sensitive to visual density, and that eye-movement measures provide converging evidence for its specificity as a perceptual load instrument.</description>
	<pubDate>2026-03-05</pubDate>

	<content:encoded><![CDATA[
	<p><b>JEMR, Vol. 19, Pages 28: Comparing Eye-Tracking Metrics with the Driver Activity Load Index</b></p>
	<p>Journal of Eye Movement Research <a href="https://www.mdpi.com/1995-8692/19/2/28">doi: 10.3390/jemr19020028</a></p>
	<p>Authors:
		Julia Bend
		Markus Gödker
		Elise Sophie Banach
		Thomas Franke
		</p>
	<p>This study investigated how perceptual workload in driving situations is captured by subjective ratings versus eye-tracking metrics. Fifty participants completed low- and high-complexity conditions while fixation behavior, blinks, and pupil diameter were recorded, and workload was assessed using the DALI scale. High-load scenes elicited longer fixations, fewer fixations per minute, reduced blinking, and increased pupil dilation, indicating elevated attentional demand. DALI scores increased with scene complexity and were negatively associated with fixation duration, demonstrating that participants&amp;amp;rsquo; subjective ratings were driven primarily by perceptual strain rather than cognitive effort. Eye-tracking patterns supported this interpretation: fixation-based indicators tent to reflect the cognitive component of demand, whereas DALI selectively tracked perceptual overload. Together, these results show that DALI is highly sensitive to visual density, and that eye-movement measures provide converging evidence for its specificity as a perceptual load instrument.</p>
	]]></content:encoded>

	<dc:title>Comparing Eye-Tracking Metrics with the Driver Activity Load Index</dc:title>
			<dc:creator>Julia Bend</dc:creator>
			<dc:creator>Markus Gödker</dc:creator>
			<dc:creator>Elise Sophie Banach</dc:creator>
			<dc:creator>Thomas Franke</dc:creator>
		<dc:identifier>doi: 10.3390/jemr19020028</dc:identifier>
	<dc:source>Journal of Eye Movement Research</dc:source>
	<dc:date>2026-03-05</dc:date>

	<prism:publicationName>Journal of Eye Movement Research</prism:publicationName>
	<prism:publicationDate>2026-03-05</prism:publicationDate>
	<prism:volume>19</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>28</prism:startingPage>
		<prism:doi>10.3390/jemr19020028</prism:doi>
	<prism:url>https://www.mdpi.com/1995-8692/19/2/28</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1995-8692/19/2/27">

	<title>JEMR, Vol. 19, Pages 27: Calendar Horizon as a Boundary Affordance: An Attempt-Centric Eye-Tracking Analysis of Calendar Scheduling Interfaces</title>
	<link>https://www.mdpi.com/1995-8692/19/2/27</link>
	<description>Digital calendars are interactive representations of time that shape both scheduling outcomes and the micro-process of searching, verifying, and revising candidate placements. We examine calendar horizon&amp;amp;mdash;whether weekend time is visible in the default week view&amp;amp;mdash;as a boundary affordance in scheduling interfaces. Using eye tracking and interaction logs, we model each scheduling episode as a sequence of placement attempts and align gaze to each attempt, partitioning it into Early/Mid/Late phases and summarizing attention across structural AOIs (task panel, calendar grid, and the weekend column when present). Two experiments used drag-and-drop and dropdown slot-picking; weekend visibility was manipulated within the dropdown interface, while evening slots remained available. Across 105 participants (1018 task episodes), AttemptsCount ranged from 1 to 7. AttemptsCount predicted gaze-based process cost: each additional attempt corresponded to ~56% more total fixation duration. Personal tasks required more attempts than work tasks and elicited stronger Late-phase weekend verification when the weekend was visible. Horizon cues also shifted boundary outcomes: hiding the weekend reduced weekend placements and increased reliance on evening scheduling, indicating displacement into adjacent time regions. These findings position calendar horizon as a design lever that shapes both process (verification) and outcomes (boundary placements), with implications for calendar UIs and mixed-initiative scheduling tools.</description>
	<pubDate>2026-03-02</pubDate>

	<content:encoded><![CDATA[
	<p><b>JEMR, Vol. 19, Pages 27: Calendar Horizon as a Boundary Affordance: An Attempt-Centric Eye-Tracking Analysis of Calendar Scheduling Interfaces</b></p>
	<p>Journal of Eye Movement Research <a href="https://www.mdpi.com/1995-8692/19/2/27">doi: 10.3390/jemr19020027</a></p>
	<p>Authors:
		Nina Xie
		Yuanyuan Wang
		Yujun Liu
		</p>
	<p>Digital calendars are interactive representations of time that shape both scheduling outcomes and the micro-process of searching, verifying, and revising candidate placements. We examine calendar horizon&amp;amp;mdash;whether weekend time is visible in the default week view&amp;amp;mdash;as a boundary affordance in scheduling interfaces. Using eye tracking and interaction logs, we model each scheduling episode as a sequence of placement attempts and align gaze to each attempt, partitioning it into Early/Mid/Late phases and summarizing attention across structural AOIs (task panel, calendar grid, and the weekend column when present). Two experiments used drag-and-drop and dropdown slot-picking; weekend visibility was manipulated within the dropdown interface, while evening slots remained available. Across 105 participants (1018 task episodes), AttemptsCount ranged from 1 to 7. AttemptsCount predicted gaze-based process cost: each additional attempt corresponded to ~56% more total fixation duration. Personal tasks required more attempts than work tasks and elicited stronger Late-phase weekend verification when the weekend was visible. Horizon cues also shifted boundary outcomes: hiding the weekend reduced weekend placements and increased reliance on evening scheduling, indicating displacement into adjacent time regions. These findings position calendar horizon as a design lever that shapes both process (verification) and outcomes (boundary placements), with implications for calendar UIs and mixed-initiative scheduling tools.</p>
	]]></content:encoded>

	<dc:title>Calendar Horizon as a Boundary Affordance: An Attempt-Centric Eye-Tracking Analysis of Calendar Scheduling Interfaces</dc:title>
			<dc:creator>Nina Xie</dc:creator>
			<dc:creator>Yuanyuan Wang</dc:creator>
			<dc:creator>Yujun Liu</dc:creator>
		<dc:identifier>doi: 10.3390/jemr19020027</dc:identifier>
	<dc:source>Journal of Eye Movement Research</dc:source>
	<dc:date>2026-03-02</dc:date>

	<prism:publicationName>Journal of Eye Movement Research</prism:publicationName>
	<prism:publicationDate>2026-03-02</prism:publicationDate>
	<prism:volume>19</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>27</prism:startingPage>
		<prism:doi>10.3390/jemr19020027</prism:doi>
	<prism:url>https://www.mdpi.com/1995-8692/19/2/27</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1995-8692/19/2/26">

	<title>JEMR, Vol. 19, Pages 26: Integrating Multi-Task Eye Tracking and Interpretable Machine Learning for High-Accuracy Screening of Amblyopia in Pediatric Populations</title>
	<link>https://www.mdpi.com/1995-8692/19/2/26</link>
	<description>Amblyopia is a developmental disorder of spatial vision in which abnormal visual experience leads to persistent reductions in acuity and contrast sensitivity, even after optimal optical correction. We introduce a brief, child-friendly battery of task-evoked eye tracking that probes fixation stability, fine pattern processing, and smooth pursuit control across three simple paradigms. Oculomotor traces are transformed into physiologically interpretable markers&amp;amp;mdash;fixation dispersion and saccadic strategy, orientation-dependent drift and stability, pursuit gain, and tracking error&amp;amp;mdash;and used to train a compact classifier with subject-wise validation and probability calibration. In a cohort of school-aged participants with clinically diagnosed unilateral amblyopia and age-matched visually normal controls tested under best-corrected viewing conditions, the approach consistently separated groups with stable performance across folds; feature-importance analyses indicated that pursuit- and orientation-dependent markers contributed most. The protocol runs in minutes, is objective and noninvasive, and is well tolerated in pediatric settings. By quantifying functional consequences of amblyopic vision that complement conventional acuity testing, this work positions task-evoked eye movements as practical biomarkers for screening and monitoring, and lays the groundwork for prospective validation and age-stratified norms in community and school-based vision care.</description>
	<pubDate>2026-03-02</pubDate>

	<content:encoded><![CDATA[
	<p><b>JEMR, Vol. 19, Pages 26: Integrating Multi-Task Eye Tracking and Interpretable Machine Learning for High-Accuracy Screening of Amblyopia in Pediatric Populations</b></p>
	<p>Journal of Eye Movement Research <a href="https://www.mdpi.com/1995-8692/19/2/26">doi: 10.3390/jemr19020026</a></p>
	<p>Authors:
		Xiumei Song
		Yunhan Zhang
		Hongyu Chen
		Chenyu Tang
		Bohan Yao
		Hubin Zhao
		Luigi G. Occhipinti
		Arokia Nathan
		Changbin Zhai
		Shuo Gao
		</p>
	<p>Amblyopia is a developmental disorder of spatial vision in which abnormal visual experience leads to persistent reductions in acuity and contrast sensitivity, even after optimal optical correction. We introduce a brief, child-friendly battery of task-evoked eye tracking that probes fixation stability, fine pattern processing, and smooth pursuit control across three simple paradigms. Oculomotor traces are transformed into physiologically interpretable markers&amp;amp;mdash;fixation dispersion and saccadic strategy, orientation-dependent drift and stability, pursuit gain, and tracking error&amp;amp;mdash;and used to train a compact classifier with subject-wise validation and probability calibration. In a cohort of school-aged participants with clinically diagnosed unilateral amblyopia and age-matched visually normal controls tested under best-corrected viewing conditions, the approach consistently separated groups with stable performance across folds; feature-importance analyses indicated that pursuit- and orientation-dependent markers contributed most. The protocol runs in minutes, is objective and noninvasive, and is well tolerated in pediatric settings. By quantifying functional consequences of amblyopic vision that complement conventional acuity testing, this work positions task-evoked eye movements as practical biomarkers for screening and monitoring, and lays the groundwork for prospective validation and age-stratified norms in community and school-based vision care.</p>
	]]></content:encoded>

	<dc:title>Integrating Multi-Task Eye Tracking and Interpretable Machine Learning for High-Accuracy Screening of Amblyopia in Pediatric Populations</dc:title>
			<dc:creator>Xiumei Song</dc:creator>
			<dc:creator>Yunhan Zhang</dc:creator>
			<dc:creator>Hongyu Chen</dc:creator>
			<dc:creator>Chenyu Tang</dc:creator>
			<dc:creator>Bohan Yao</dc:creator>
			<dc:creator>Hubin Zhao</dc:creator>
			<dc:creator>Luigi G. Occhipinti</dc:creator>
			<dc:creator>Arokia Nathan</dc:creator>
			<dc:creator>Changbin Zhai</dc:creator>
			<dc:creator>Shuo Gao</dc:creator>
		<dc:identifier>doi: 10.3390/jemr19020026</dc:identifier>
	<dc:source>Journal of Eye Movement Research</dc:source>
	<dc:date>2026-03-02</dc:date>

	<prism:publicationName>Journal of Eye Movement Research</prism:publicationName>
	<prism:publicationDate>2026-03-02</prism:publicationDate>
	<prism:volume>19</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>26</prism:startingPage>
		<prism:doi>10.3390/jemr19020026</prism:doi>
	<prism:url>https://www.mdpi.com/1995-8692/19/2/26</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1995-8692/19/2/25">

	<title>JEMR, Vol. 19, Pages 25: Characteristics of Eye Movements and Correlation to Cognitive Functions in Relation to the Location of Guide Signs and Driving Speed</title>
	<link>https://www.mdpi.com/1995-8692/19/2/25</link>
	<description>Driving safety critically depends on the ability of drivers to efficiently recognize and process guide sign information under varying traffic conditions. This study examined how driving speed (slow/fast) and guide sign location (front/left) influence eye-movement behavior during guide sign recognition, and how these effects relate to drivers’ cognitive functions and basic demographics. Twenty-four licensed drivers performed a guide sign recognition task using onboard video stimuli, and eye movements based on fixations and saccades were recorded. Generalized linear mixed models with participants as random effects were used to analyze the interactions between driving conditions, cognitive functions, demographics, and eye movement measures. Under low-load conditions, such as slow driving and front-positioned signs, individual differences in cognitive functions, including verbal memory and useful field of view, were strongly reflected in eye-movement behavior. Under high-load conditions characterized by fast driving and left-positioned signs, the influence of cognitive function was reduced, and eye movements were more strongly associated with driving experience. Increasing driving speed was associated with fewer eye movements, whereas the saccade amplitude remained unchanged, indicating the suppression of exploratory eye movements. For left-positioned signs, the fixation duration on the target was maintained, whereas gaze shifts between the forward environment and the sign were reduced.</description>
	<pubDate>2026-03-02</pubDate>

	<content:encoded><![CDATA[
	<p><b>JEMR, Vol. 19, Pages 25: Characteristics of Eye Movements and Correlation to Cognitive Functions in Relation to the Location of Guide Signs and Driving Speed</b></p>
	<p>Journal of Eye Movement Research <a href="https://www.mdpi.com/1995-8692/19/2/25">doi: 10.3390/jemr19020025</a></p>
	<p>Authors:
		Takaya Maeyama
		Hiroki Okada
		Daisuke Sawamura
		</p>
	<p>Driving safety critically depends on the ability of drivers to efficiently recognize and process guide sign information under varying traffic conditions. This study examined how driving speed (slow/fast) and guide sign location (front/left) influence eye-movement behavior during guide sign recognition, and how these effects relate to drivers’ cognitive functions and basic demographics. Twenty-four licensed drivers performed a guide sign recognition task using onboard video stimuli, and eye movements based on fixations and saccades were recorded. Generalized linear mixed models with participants as random effects were used to analyze the interactions between driving conditions, cognitive functions, demographics, and eye movement measures. Under low-load conditions, such as slow driving and front-positioned signs, individual differences in cognitive functions, including verbal memory and useful field of view, were strongly reflected in eye-movement behavior. Under high-load conditions characterized by fast driving and left-positioned signs, the influence of cognitive function was reduced, and eye movements were more strongly associated with driving experience. Increasing driving speed was associated with fewer eye movements, whereas the saccade amplitude remained unchanged, indicating the suppression of exploratory eye movements. For left-positioned signs, the fixation duration on the target was maintained, whereas gaze shifts between the forward environment and the sign were reduced.</p>
	]]></content:encoded>

	<dc:title>Characteristics of Eye Movements and Correlation to Cognitive Functions in Relation to the Location of Guide Signs and Driving Speed</dc:title>
			<dc:creator>Takaya Maeyama</dc:creator>
			<dc:creator>Hiroki Okada</dc:creator>
			<dc:creator>Daisuke Sawamura</dc:creator>
		<dc:identifier>doi: 10.3390/jemr19020025</dc:identifier>
	<dc:source>Journal of Eye Movement Research</dc:source>
	<dc:date>2026-03-02</dc:date>

	<prism:publicationName>Journal of Eye Movement Research</prism:publicationName>
	<prism:publicationDate>2026-03-02</prism:publicationDate>
	<prism:volume>19</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>25</prism:startingPage>
		<prism:doi>10.3390/jemr19020025</prism:doi>
	<prism:url>https://www.mdpi.com/1995-8692/19/2/25</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1995-8692/19/2/24">

	<title>JEMR, Vol. 19, Pages 24: A Feasibility Study of Tablet-Based Eye Movement Assessment Using a Built-In Camera: A Pilot Study</title>
	<link>https://www.mdpi.com/1995-8692/19/2/24</link>
	<description>This study developed a tablet PC&amp;amp;ndash;based eye movement assessment application and conducted a pilot investigation to explore whether tablet-based ocular motor metrics demonstrate functional sensitivity to variations in conventional visual function parameters. Twenty-three healthy adults (10 males, 13 females; mean age: 24.41 &amp;amp;plusmn; 1.91 years) without a history of ocular disease performed smooth pursuit and saccadic eye movement tests at three difficulty levels. For exploratory analysis, participants were stratified into above- and below-mean groups based on conventional visual function test results. For smooth pursuit movements, mean pursuit traversal time demonstrated statistically significant differences between the low&amp;amp;ndash;medium (1.11 s) and low&amp;amp;ndash;high (1.14 s) difficulty levels (p &amp;amp;lt; 0.05), with corresponding differences in derived velocity. Saccadic movements showed significant mean accuracy differences between low-high (1.02 points) and medium-high (0.95 points) difficulty levels (p &amp;amp;lt; 0.05). Participants with higher-than-average horizontal phoria values (distance and near) and the blur/break points of near convergence amplitude exhibited significantly longer smooth pursuit traversal times (corresponding to slower derived velocities) (p &amp;amp;lt; 0.05). The high-value group for blur point of near convergence amplitude demonstrated significantly superior saccadic accuracy (1.63 points) compared with the low-value group (1.30 points) (p &amp;amp;lt; 0.05). Exploratory associations between visual function parameters and ocular motor performance were observed within the healthy participant group, suggesting exploratory associations between tablet-based smooth pursuit and saccadic eye movement performance and conventional visual function measures. These findings suggest that tablet PC&amp;amp;ndash;based eye movement assessment may serve as a feasible, low-cost approach for exploratory screening and functional monitoring, rather than a validated diagnostic tool.</description>
	<pubDate>2026-02-24</pubDate>

	<content:encoded><![CDATA[
	<p><b>JEMR, Vol. 19, Pages 24: A Feasibility Study of Tablet-Based Eye Movement Assessment Using a Built-In Camera: A Pilot Study</b></p>
	<p>Journal of Eye Movement Research <a href="https://www.mdpi.com/1995-8692/19/2/24">doi: 10.3390/jemr19020024</a></p>
	<p>Authors:
		Kyunghyun Park
		Unseok Lee
		Sejoon Moon
		Hyungsik Bae
		Hyungoo Kang
		</p>
	<p>This study developed a tablet PC&amp;amp;ndash;based eye movement assessment application and conducted a pilot investigation to explore whether tablet-based ocular motor metrics demonstrate functional sensitivity to variations in conventional visual function parameters. Twenty-three healthy adults (10 males, 13 females; mean age: 24.41 &amp;amp;plusmn; 1.91 years) without a history of ocular disease performed smooth pursuit and saccadic eye movement tests at three difficulty levels. For exploratory analysis, participants were stratified into above- and below-mean groups based on conventional visual function test results. For smooth pursuit movements, mean pursuit traversal time demonstrated statistically significant differences between the low&amp;amp;ndash;medium (1.11 s) and low&amp;amp;ndash;high (1.14 s) difficulty levels (p &amp;amp;lt; 0.05), with corresponding differences in derived velocity. Saccadic movements showed significant mean accuracy differences between low-high (1.02 points) and medium-high (0.95 points) difficulty levels (p &amp;amp;lt; 0.05). Participants with higher-than-average horizontal phoria values (distance and near) and the blur/break points of near convergence amplitude exhibited significantly longer smooth pursuit traversal times (corresponding to slower derived velocities) (p &amp;amp;lt; 0.05). The high-value group for blur point of near convergence amplitude demonstrated significantly superior saccadic accuracy (1.63 points) compared with the low-value group (1.30 points) (p &amp;amp;lt; 0.05). Exploratory associations between visual function parameters and ocular motor performance were observed within the healthy participant group, suggesting exploratory associations between tablet-based smooth pursuit and saccadic eye movement performance and conventional visual function measures. These findings suggest that tablet PC&amp;amp;ndash;based eye movement assessment may serve as a feasible, low-cost approach for exploratory screening and functional monitoring, rather than a validated diagnostic tool.</p>
	]]></content:encoded>

	<dc:title>A Feasibility Study of Tablet-Based Eye Movement Assessment Using a Built-In Camera: A Pilot Study</dc:title>
			<dc:creator>Kyunghyun Park</dc:creator>
			<dc:creator>Unseok Lee</dc:creator>
			<dc:creator>Sejoon Moon</dc:creator>
			<dc:creator>Hyungsik Bae</dc:creator>
			<dc:creator>Hyungoo Kang</dc:creator>
		<dc:identifier>doi: 10.3390/jemr19020024</dc:identifier>
	<dc:source>Journal of Eye Movement Research</dc:source>
	<dc:date>2026-02-24</dc:date>

	<prism:publicationName>Journal of Eye Movement Research</prism:publicationName>
	<prism:publicationDate>2026-02-24</prism:publicationDate>
	<prism:volume>19</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>24</prism:startingPage>
		<prism:doi>10.3390/jemr19020024</prism:doi>
	<prism:url>https://www.mdpi.com/1995-8692/19/2/24</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1995-8692/19/1/23">

	<title>JEMR, Vol. 19, Pages 23: Mapping Eye-Tracking Research in Human&amp;ndash;Computer Interaction: A Science-Mapping and Content-Analysis Study</title>
	<link>https://www.mdpi.com/1995-8692/19/1/23</link>
	<description>Eye tracking has become a central method in human&amp;amp;ndash;computer interaction (HCI), supported by advances in sensing technologies and AI-based gaze analysis. Despite this rapid growth, a comprehensive and up-to-date overview of eye-tracking research across the broader HCI landscape remains lacking. This study combines records from Web of Science (WoS) and Scopus to analyse 1033 publications on eye tracking in HCI published between 2020 and 2025. After merging and deduplicating the datasets, we conducted bibliometric network analyses (keyword co-occurrence, co-citation, co-authorship, and source mapping) using VOSviewer and performed a qualitative content analysis of the 50 most-cited papers. The literature is dominated by journal articles and conference papers produced by small- to medium-sized research teams (mean: 3.9 authors per paper; h-index: 29). Keyword and overlay visualisations reveal four principal research axes: deep-learning-based gaze estimation; XR-related interaction paradigms within HCI; cognitive load and human factors; and usability- and accessibility-oriented interface design. The most-cited studies focus on gaze interaction in immersive environments, deep learning for gaze estimation, multimodal interaction, and physiological approaches to assessing cognitive load. Overall, the findings indicate that eye tracking in HCI is evolving from a measurement-oriented technique into a core enabling technology that supports interaction design, cognitive assessment, accessibility, and ethical considerations such as privacy. This review identifies research gaps and outlines future directions for benchmarking practices, real-world deployments, and privacy-preserving gaze analytics in HCI.</description>
	<pubDate>2026-02-12</pubDate>

	<content:encoded><![CDATA[
	<p><b>JEMR, Vol. 19, Pages 23: Mapping Eye-Tracking Research in Human&amp;ndash;Computer Interaction: A Science-Mapping and Content-Analysis Study</b></p>
	<p>Journal of Eye Movement Research <a href="https://www.mdpi.com/1995-8692/19/1/23">doi: 10.3390/jemr19010023</a></p>
	<p>Authors:
		Adem Korkmaz
		</p>
	<p>Eye tracking has become a central method in human&amp;amp;ndash;computer interaction (HCI), supported by advances in sensing technologies and AI-based gaze analysis. Despite this rapid growth, a comprehensive and up-to-date overview of eye-tracking research across the broader HCI landscape remains lacking. This study combines records from Web of Science (WoS) and Scopus to analyse 1033 publications on eye tracking in HCI published between 2020 and 2025. After merging and deduplicating the datasets, we conducted bibliometric network analyses (keyword co-occurrence, co-citation, co-authorship, and source mapping) using VOSviewer and performed a qualitative content analysis of the 50 most-cited papers. The literature is dominated by journal articles and conference papers produced by small- to medium-sized research teams (mean: 3.9 authors per paper; h-index: 29). Keyword and overlay visualisations reveal four principal research axes: deep-learning-based gaze estimation; XR-related interaction paradigms within HCI; cognitive load and human factors; and usability- and accessibility-oriented interface design. The most-cited studies focus on gaze interaction in immersive environments, deep learning for gaze estimation, multimodal interaction, and physiological approaches to assessing cognitive load. Overall, the findings indicate that eye tracking in HCI is evolving from a measurement-oriented technique into a core enabling technology that supports interaction design, cognitive assessment, accessibility, and ethical considerations such as privacy. This review identifies research gaps and outlines future directions for benchmarking practices, real-world deployments, and privacy-preserving gaze analytics in HCI.</p>
	]]></content:encoded>

	<dc:title>Mapping Eye-Tracking Research in Human&amp;amp;ndash;Computer Interaction: A Science-Mapping and Content-Analysis Study</dc:title>
			<dc:creator>Adem Korkmaz</dc:creator>
		<dc:identifier>doi: 10.3390/jemr19010023</dc:identifier>
	<dc:source>Journal of Eye Movement Research</dc:source>
	<dc:date>2026-02-12</dc:date>

	<prism:publicationName>Journal of Eye Movement Research</prism:publicationName>
	<prism:publicationDate>2026-02-12</prism:publicationDate>
	<prism:volume>19</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Review</prism:section>
	<prism:startingPage>23</prism:startingPage>
		<prism:doi>10.3390/jemr19010023</prism:doi>
	<prism:url>https://www.mdpi.com/1995-8692/19/1/23</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1995-8692/19/1/22">

	<title>JEMR, Vol. 19, Pages 22: Influence of Multimodal AR-HUD Navigation Prompt Design on Driving Behavior at F-Type-5 M Intersections</title>
	<link>https://www.mdpi.com/1995-8692/19/1/22</link>
	<description>In complex urban traffic environments, the design of multimodal prompts in augmented reality head-up displays (AR-HUDs) plays a critical role in driving safety and operational efficiency. Despite growing interest in audiovisual navigation assistance, empirical evidence remains limited regarding when prompts should be delivered and whether visual and auditory information should remain temporally aligned. To address this gap, this study aims to examine how audiovisual prompt timing and prompt mode influence driving behavior in AR-HUD navigation systems at complex F-type-5 m intersections through a within-subject experimental design. A 2 (prompt mode: synchronized vs. asynchronous) × 3 (prompt timing: −1000 m, −600 m, −400 m) design was employed to assess driver response time, situational awareness, and eye-movement measures, including average fixation duration and fixation count. The results showed clear main effects of both prompt mode and prompt timing. Compared with asynchronous prompts, synchronized prompts consistently resulted in shorter response times, reduced visual demand, and higher situational awareness. Driving performance also improved as prompt timing shifted closer to the intersection, from −1000 m to −400 m. But no significant interaction effects were found, suggesting that prompt mode and prompt timing can be treated as relatively independent design factors. In addition, among the six experimental conditions, the −400 m synchronized condition yielded the most favorable overall performance, whereas the −1000 m asynchronous condition performed worst. These findings indicate that in time-critical and low-tolerance scenarios, such as F-type-5 m intersections, near-distance synchronized multimodal prompts should be prioritized. This study provides empirical support for optimizing prompt timing and cross-modal temporal alignment in AR-HUD systems and offers actionable implications for interface and timing design.</description>
	<pubDate>2026-02-11</pubDate>

	<content:encoded><![CDATA[
	<p><b>JEMR, Vol. 19, Pages 22: Influence of Multimodal AR-HUD Navigation Prompt Design on Driving Behavior at F-Type-5 M Intersections</b></p>
	<p>Journal of Eye Movement Research <a href="https://www.mdpi.com/1995-8692/19/1/22">doi: 10.3390/jemr19010022</a></p>
	<p>Authors:
		Ziqi Liu
		Zhengxing Yang
		Yifan Du
		</p>
	<p>In complex urban traffic environments, the design of multimodal prompts in augmented reality head-up displays (AR-HUDs) plays a critical role in driving safety and operational efficiency. Despite growing interest in audiovisual navigation assistance, empirical evidence remains limited regarding when prompts should be delivered and whether visual and auditory information should remain temporally aligned. To address this gap, this study aims to examine how audiovisual prompt timing and prompt mode influence driving behavior in AR-HUD navigation systems at complex F-type-5 m intersections through a within-subject experimental design. A 2 (prompt mode: synchronized vs. asynchronous) × 3 (prompt timing: −1000 m, −600 m, −400 m) design was employed to assess driver response time, situational awareness, and eye-movement measures, including average fixation duration and fixation count. The results showed clear main effects of both prompt mode and prompt timing. Compared with asynchronous prompts, synchronized prompts consistently resulted in shorter response times, reduced visual demand, and higher situational awareness. Driving performance also improved as prompt timing shifted closer to the intersection, from −1000 m to −400 m. But no significant interaction effects were found, suggesting that prompt mode and prompt timing can be treated as relatively independent design factors. In addition, among the six experimental conditions, the −400 m synchronized condition yielded the most favorable overall performance, whereas the −1000 m asynchronous condition performed worst. These findings indicate that in time-critical and low-tolerance scenarios, such as F-type-5 m intersections, near-distance synchronized multimodal prompts should be prioritized. This study provides empirical support for optimizing prompt timing and cross-modal temporal alignment in AR-HUD systems and offers actionable implications for interface and timing design.</p>
	]]></content:encoded>

	<dc:title>Influence of Multimodal AR-HUD Navigation Prompt Design on Driving Behavior at F-Type-5 M Intersections</dc:title>
			<dc:creator>Ziqi Liu</dc:creator>
			<dc:creator>Zhengxing Yang</dc:creator>
			<dc:creator>Yifan Du</dc:creator>
		<dc:identifier>doi: 10.3390/jemr19010022</dc:identifier>
	<dc:source>Journal of Eye Movement Research</dc:source>
	<dc:date>2026-02-11</dc:date>

	<prism:publicationName>Journal of Eye Movement Research</prism:publicationName>
	<prism:publicationDate>2026-02-11</prism:publicationDate>
	<prism:volume>19</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>22</prism:startingPage>
		<prism:doi>10.3390/jemr19010022</prism:doi>
	<prism:url>https://www.mdpi.com/1995-8692/19/1/22</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1995-8692/19/1/21">

	<title>JEMR, Vol. 19, Pages 21: Influence of Stimulus Layout and Social Presence on Deception-Related Eye Movements and Blinks in the Concealed Information Test</title>
	<link>https://www.mdpi.com/1995-8692/19/1/21</link>
	<description>Over the past decades, eye movements and blinks have been integrated into Concealed Information Test (CIT) paradigms as indicators of deception. Recent findings suggested that fixation patterns in CITs depend on stimulus layout, particularly the distinction between sequential and simultaneous stimulus presentation. In addition, the impact of social presence on deceptive eye movements, critical for application of the CIT in real-world social settings, remains insufficiently examined. The present study addresses these issues through two experiments. In both, participants selected a card and had to reveal, conceal, or fake its value while all possible cards were displayed in pairs. Experiment 1 examined whether deceptive intentions could be differentiated using fixations and blinks, and extended previous findings on the effect of stimulus layout. Experiment 2 assessed the stability of deception-related eye movements and blinks across various levels of social presence (without, per video, being observed by a real person). Our findings replicate effects previously observed with simultaneous stimulus presentation of more cards, demonstrating how stimulus layout modulates deception-related eye movement patterns in CITs. The levels of social presence realised in this study did not significantly alter these patterns, indicating that deception-related eye movements and blinks in CITs remain stable under passive social presence.</description>
	<pubDate>2026-02-11</pubDate>

	<content:encoded><![CDATA[
	<p><b>JEMR, Vol. 19, Pages 21: Influence of Stimulus Layout and Social Presence on Deception-Related Eye Movements and Blinks in the Concealed Information Test</b></p>
	<p>Journal of Eye Movement Research <a href="https://www.mdpi.com/1995-8692/19/1/21">doi: 10.3390/jemr19010021</a></p>
	<p>Authors:
		Valentin Foucher
		Anke Huckauf
		</p>
	<p>Over the past decades, eye movements and blinks have been integrated into Concealed Information Test (CIT) paradigms as indicators of deception. Recent findings suggested that fixation patterns in CITs depend on stimulus layout, particularly the distinction between sequential and simultaneous stimulus presentation. In addition, the impact of social presence on deceptive eye movements, critical for application of the CIT in real-world social settings, remains insufficiently examined. The present study addresses these issues through two experiments. In both, participants selected a card and had to reveal, conceal, or fake its value while all possible cards were displayed in pairs. Experiment 1 examined whether deceptive intentions could be differentiated using fixations and blinks, and extended previous findings on the effect of stimulus layout. Experiment 2 assessed the stability of deception-related eye movements and blinks across various levels of social presence (without, per video, being observed by a real person). Our findings replicate effects previously observed with simultaneous stimulus presentation of more cards, demonstrating how stimulus layout modulates deception-related eye movement patterns in CITs. The levels of social presence realised in this study did not significantly alter these patterns, indicating that deception-related eye movements and blinks in CITs remain stable under passive social presence.</p>
	]]></content:encoded>

	<dc:title>Influence of Stimulus Layout and Social Presence on Deception-Related Eye Movements and Blinks in the Concealed Information Test</dc:title>
			<dc:creator>Valentin Foucher</dc:creator>
			<dc:creator>Anke Huckauf</dc:creator>
		<dc:identifier>doi: 10.3390/jemr19010021</dc:identifier>
	<dc:source>Journal of Eye Movement Research</dc:source>
	<dc:date>2026-02-11</dc:date>

	<prism:publicationName>Journal of Eye Movement Research</prism:publicationName>
	<prism:publicationDate>2026-02-11</prism:publicationDate>
	<prism:volume>19</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>21</prism:startingPage>
		<prism:doi>10.3390/jemr19010021</prism:doi>
	<prism:url>https://www.mdpi.com/1995-8692/19/1/21</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1995-8692/19/1/20">

	<title>JEMR, Vol. 19, Pages 20: An Open-Source Horizontal Strabismus Simulator as an Evaluation Platform for Monocular Gaze Estimation Using Deep Learning Models</title>
	<link>https://www.mdpi.com/1995-8692/19/1/20</link>
	<description>Strabismus affects 2&amp;amp;ndash;4% of the global population, with horizontal cases accounting for more than 90%. Automated screening using monocular gaze estimation technology shows promise for early detection. However, existing models assume normal binocular vision, and their applicability to strabismus remains unvalidated due to the lack of evaluation platforms capable of reproducing disconjugate eye movements with known ground-truth angles. To address this gap, we developed an open-source, low-cost (approximately 200 USD) horizontal strabismus simulator. The simulator features two independently controllable artificial eyeballs mounted on a two-axis gimbal mechanism with servo motors and gyro sensors for real-time angle measurement. Mechanical accuracy achieved a mean absolute error of less than 0.1&amp;amp;deg; across all axes, well below the clinical detection threshold of 1 prism diopter (&amp;amp;asymp;0.57&amp;amp;deg;). An evaluation of three representative AI models (Single Eye, GazeNet, and EyeNet) revealed estimation errors of 6.44&amp;amp;ndash;8.75&amp;amp;deg;, substantially exceeding the clinical target of 2.8&amp;amp;deg;. At this error level, small-angle strabismus (&amp;amp;lt;15 prism diopters) would likely be missed, underscoring the need for strabismus-specific model development. Moreover, rapid accuracy degradation was observed beyond &amp;amp;plusmn;15&amp;amp;deg; gaze angles. This platform establishes baseline performance metrics and provides a foundation for advancing gaze estimation technology for strabismus screening.</description>
	<pubDate>2026-02-09</pubDate>

	<content:encoded><![CDATA[
	<p><b>JEMR, Vol. 19, Pages 20: An Open-Source Horizontal Strabismus Simulator as an Evaluation Platform for Monocular Gaze Estimation Using Deep Learning Models</b></p>
	<p>Journal of Eye Movement Research <a href="https://www.mdpi.com/1995-8692/19/1/20">doi: 10.3390/jemr19010020</a></p>
	<p>Authors:
		Shumpei Takinami
		Yuka Morita
		Jun Seita
		Tetsuro Oshika
		</p>
	<p>Strabismus affects 2&amp;amp;ndash;4% of the global population, with horizontal cases accounting for more than 90%. Automated screening using monocular gaze estimation technology shows promise for early detection. However, existing models assume normal binocular vision, and their applicability to strabismus remains unvalidated due to the lack of evaluation platforms capable of reproducing disconjugate eye movements with known ground-truth angles. To address this gap, we developed an open-source, low-cost (approximately 200 USD) horizontal strabismus simulator. The simulator features two independently controllable artificial eyeballs mounted on a two-axis gimbal mechanism with servo motors and gyro sensors for real-time angle measurement. Mechanical accuracy achieved a mean absolute error of less than 0.1&amp;amp;deg; across all axes, well below the clinical detection threshold of 1 prism diopter (&amp;amp;asymp;0.57&amp;amp;deg;). An evaluation of three representative AI models (Single Eye, GazeNet, and EyeNet) revealed estimation errors of 6.44&amp;amp;ndash;8.75&amp;amp;deg;, substantially exceeding the clinical target of 2.8&amp;amp;deg;. At this error level, small-angle strabismus (&amp;amp;lt;15 prism diopters) would likely be missed, underscoring the need for strabismus-specific model development. Moreover, rapid accuracy degradation was observed beyond &amp;amp;plusmn;15&amp;amp;deg; gaze angles. This platform establishes baseline performance metrics and provides a foundation for advancing gaze estimation technology for strabismus screening.</p>
	]]></content:encoded>

	<dc:title>An Open-Source Horizontal Strabismus Simulator as an Evaluation Platform for Monocular Gaze Estimation Using Deep Learning Models</dc:title>
			<dc:creator>Shumpei Takinami</dc:creator>
			<dc:creator>Yuka Morita</dc:creator>
			<dc:creator>Jun Seita</dc:creator>
			<dc:creator>Tetsuro Oshika</dc:creator>
		<dc:identifier>doi: 10.3390/jemr19010020</dc:identifier>
	<dc:source>Journal of Eye Movement Research</dc:source>
	<dc:date>2026-02-09</dc:date>

	<prism:publicationName>Journal of Eye Movement Research</prism:publicationName>
	<prism:publicationDate>2026-02-09</prism:publicationDate>
	<prism:volume>19</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>20</prism:startingPage>
		<prism:doi>10.3390/jemr19010020</prism:doi>
	<prism:url>https://www.mdpi.com/1995-8692/19/1/20</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1995-8692/19/1/19">

	<title>JEMR, Vol. 19, Pages 19: The Impact of 3D Interactive Prompts on College Students&amp;rsquo; Learning Outcomes in Desktop Virtual Learning Environments: A Study Based on Eye-Tracking Experiments</title>
	<link>https://www.mdpi.com/1995-8692/19/1/19</link>
	<description>Despite the increasing adoption of desktop virtual reality (VR) in higher education, the specific instructional efficacy of 3D interactive prompts remains inadequately understood. This study examines how such prompts&amp;amp;mdash;specifically dynamic spatial annotations and 3D animated demonstrations&amp;amp;mdash;influence learning outcomes within a desktop virtual learning environment (DVLE). Employing a quasi-experimental design integrated with eye-tracking and multimodal learning analytics, university students were assigned to either an experimental group (DVLE with 3D prompts) or a control group (basic DVLE) while completing physics tasks. Data collection encompassed eye-tracking metrics (fixation heatmaps, pupil diameter and dwell time), post-test performance (assessing knowledge comprehension and spatial problem-solving), and cognitive load ratings. Results indicated that the experimental group achieved significantly superior learning outcomes, particularly in spatial understanding and dynamic reasoning, alongside optimized visual attention patterns&amp;amp;mdash;characterized by shorter initial fixation latency and prolonged fixation on key 3D elements&amp;amp;mdash;and reduced cognitive load. Eye-tracking metrics were positively correlated with post-test scores, confirming that 3D prompts enhance learning by improving spatial attention guidance. These findings demonstrate that embedding 3D interactive prompts in DVLEs effectively directs visual attention, alleviates cognitive burden, and improves learning efficiency, offering valuable implications for the design of immersive educational settings.</description>
	<pubDate>2026-02-05</pubDate>

	<content:encoded><![CDATA[
	<p><b>JEMR, Vol. 19, Pages 19: The Impact of 3D Interactive Prompts on College Students&amp;rsquo; Learning Outcomes in Desktop Virtual Learning Environments: A Study Based on Eye-Tracking Experiments</b></p>
	<p>Journal of Eye Movement Research <a href="https://www.mdpi.com/1995-8692/19/1/19">doi: 10.3390/jemr19010019</a></p>
	<p>Authors:
		Xinyi Wu
		Xiangen Wu
		Weixing Hu
		Jian Sun
		</p>
	<p>Despite the increasing adoption of desktop virtual reality (VR) in higher education, the specific instructional efficacy of 3D interactive prompts remains inadequately understood. This study examines how such prompts&amp;amp;mdash;specifically dynamic spatial annotations and 3D animated demonstrations&amp;amp;mdash;influence learning outcomes within a desktop virtual learning environment (DVLE). Employing a quasi-experimental design integrated with eye-tracking and multimodal learning analytics, university students were assigned to either an experimental group (DVLE with 3D prompts) or a control group (basic DVLE) while completing physics tasks. Data collection encompassed eye-tracking metrics (fixation heatmaps, pupil diameter and dwell time), post-test performance (assessing knowledge comprehension and spatial problem-solving), and cognitive load ratings. Results indicated that the experimental group achieved significantly superior learning outcomes, particularly in spatial understanding and dynamic reasoning, alongside optimized visual attention patterns&amp;amp;mdash;characterized by shorter initial fixation latency and prolonged fixation on key 3D elements&amp;amp;mdash;and reduced cognitive load. Eye-tracking metrics were positively correlated with post-test scores, confirming that 3D prompts enhance learning by improving spatial attention guidance. These findings demonstrate that embedding 3D interactive prompts in DVLEs effectively directs visual attention, alleviates cognitive burden, and improves learning efficiency, offering valuable implications for the design of immersive educational settings.</p>
	]]></content:encoded>

	<dc:title>The Impact of 3D Interactive Prompts on College Students&amp;amp;rsquo; Learning Outcomes in Desktop Virtual Learning Environments: A Study Based on Eye-Tracking Experiments</dc:title>
			<dc:creator>Xinyi Wu</dc:creator>
			<dc:creator>Xiangen Wu</dc:creator>
			<dc:creator>Weixing Hu</dc:creator>
			<dc:creator>Jian Sun</dc:creator>
		<dc:identifier>doi: 10.3390/jemr19010019</dc:identifier>
	<dc:source>Journal of Eye Movement Research</dc:source>
	<dc:date>2026-02-05</dc:date>

	<prism:publicationName>Journal of Eye Movement Research</prism:publicationName>
	<prism:publicationDate>2026-02-05</prism:publicationDate>
	<prism:volume>19</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>19</prism:startingPage>
		<prism:doi>10.3390/jemr19010019</prism:doi>
	<prism:url>https://www.mdpi.com/1995-8692/19/1/19</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1995-8692/19/1/18">

	<title>JEMR, Vol. 19, Pages 18: The Influence of Noise Perception and Parent-Rated Developmental Characteristics on White Noise Benefits in Children</title>
	<link>https://www.mdpi.com/1995-8692/19/1/18</link>
	<description>White noise has been proposed to enhance cognitive performance in children with ADHD, but findings are inconsistent, and benefits vary across tasks and individuals. Such variability suggests that diagnostic comparisons may overlook meaningful developmental differences. This exploratory study examined whether developmental characteristics and subjective evaluations of auditory and visual white noise predicted performance changes in two eye-movement tasks: Prolonged Fixation (PF) and Memory-Guided Saccades (MGS). Children with varying degrees of ADHD symptoms completed both tasks under noise and no-noise conditions, and noise benefit scores were calculated as the performance difference between conditions. Overall, white-noise effects were small and dependent on noise modality and task. In the PF task, large parent-rated perceptual difficulties and high visual noise discomfort were associated with improved performance under noise. In the MGS task, poor motor skills predicted visual noise benefit, whereas large visual noise discomfort predicted reduced noise benefit. These findings suggest that beneficial effects of white noise are influenced by developmental characteristics and subjective perception in task-dependent ways. The results highlight the need for individualized, transdiagnostic approaches in future noise research and challenge the notion of white noise as categorically beneficial for ADHD.</description>
	<pubDate>2026-02-05</pubDate>

	<content:encoded><![CDATA[
	<p><b>JEMR, Vol. 19, Pages 18: The Influence of Noise Perception and Parent-Rated Developmental Characteristics on White Noise Benefits in Children</b></p>
	<p>Journal of Eye Movement Research <a href="https://www.mdpi.com/1995-8692/19/1/18">doi: 10.3390/jemr19010018</a></p>
	<p>Authors:
		Erica Jostrup
		Marcus Nyström
		Göran Söderlund
		Emma Claesdotter-Knutsson
		Peik Gustafsson
		Pia Tallberg
		</p>
	<p>White noise has been proposed to enhance cognitive performance in children with ADHD, but findings are inconsistent, and benefits vary across tasks and individuals. Such variability suggests that diagnostic comparisons may overlook meaningful developmental differences. This exploratory study examined whether developmental characteristics and subjective evaluations of auditory and visual white noise predicted performance changes in two eye-movement tasks: Prolonged Fixation (PF) and Memory-Guided Saccades (MGS). Children with varying degrees of ADHD symptoms completed both tasks under noise and no-noise conditions, and noise benefit scores were calculated as the performance difference between conditions. Overall, white-noise effects were small and dependent on noise modality and task. In the PF task, large parent-rated perceptual difficulties and high visual noise discomfort were associated with improved performance under noise. In the MGS task, poor motor skills predicted visual noise benefit, whereas large visual noise discomfort predicted reduced noise benefit. These findings suggest that beneficial effects of white noise are influenced by developmental characteristics and subjective perception in task-dependent ways. The results highlight the need for individualized, transdiagnostic approaches in future noise research and challenge the notion of white noise as categorically beneficial for ADHD.</p>
	]]></content:encoded>

	<dc:title>The Influence of Noise Perception and Parent-Rated Developmental Characteristics on White Noise Benefits in Children</dc:title>
			<dc:creator>Erica Jostrup</dc:creator>
			<dc:creator>Marcus Nyström</dc:creator>
			<dc:creator>Göran Söderlund</dc:creator>
			<dc:creator>Emma Claesdotter-Knutsson</dc:creator>
			<dc:creator>Peik Gustafsson</dc:creator>
			<dc:creator>Pia Tallberg</dc:creator>
		<dc:identifier>doi: 10.3390/jemr19010018</dc:identifier>
	<dc:source>Journal of Eye Movement Research</dc:source>
	<dc:date>2026-02-05</dc:date>

	<prism:publicationName>Journal of Eye Movement Research</prism:publicationName>
	<prism:publicationDate>2026-02-05</prism:publicationDate>
	<prism:volume>19</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>18</prism:startingPage>
		<prism:doi>10.3390/jemr19010018</prism:doi>
	<prism:url>https://www.mdpi.com/1995-8692/19/1/18</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1995-8692/19/1/17">

	<title>JEMR, Vol. 19, Pages 17: Eye Movement Classification Using Neuromorphic Vision Sensors</title>
	<link>https://www.mdpi.com/1995-8692/19/1/17</link>
	<description>Eye movement classification, particularly the identification of fixations and saccades, plays a vital role in advancing our understanding of neurological functions and cognitive processing. Conventional modalities of data, such as RGB webcams, often face limitations such as motion blur, latency and susceptibility to noise. Neuromorphic Vision Sensors, also known as event cameras (ECs), capture pixel-level changes asynchronously and at a high temporal resolution, making them well suited for detecting the swift transitions inherent to eye movements. However, the resulting data are sparse, which makes them less well suited for use with conventional algorithms. Spiking Neural Networks (SNNs) are gaining attention due to their discrete spatio-temporal spike mechanism ideally suited for sparse data. These networks offer a biologically inspired computational paradigm capable of modeling the temporal dynamics captured by event cameras. This study validates the use of Spiking Neural Networks (SNNs) with event cameras for efficient eye movement classification. We manually annotated the EV-Eye dataset, the largest publicly available event-based eye-tracking benchmark, into sequences of saccades and fixations, and we propose a convolutional SNN architecture operating directly on spike streams. Our model achieves an accuracy of 94% and a precision of 0.92 across annotated data from 10 users. As the first work to apply SNNs to eye movement classification using event data, we benchmark our approach against spiking baselines such as SpikingVGG and SpikingDenseNet, and additionally provide a detailed computational complexity comparison between SNN and ANN counterparts. Our results highlight the efficiency and robustness of SNNs for event-based vision tasks, with over one order of magnitude improvement in computational efficiency, with implications for fast and low-power neurocognitive diagnostic systems.</description>
	<pubDate>2026-02-04</pubDate>

	<content:encoded><![CDATA[
	<p><b>JEMR, Vol. 19, Pages 17: Eye Movement Classification Using Neuromorphic Vision Sensors</b></p>
	<p>Journal of Eye Movement Research <a href="https://www.mdpi.com/1995-8692/19/1/17">doi: 10.3390/jemr19010017</a></p>
	<p>Authors:
		Khadija Iddrisu
		Waseem Shariff
		Maciej Stec
		Noel O’Connor
		Suzanne Little
		</p>
	<p>Eye movement classification, particularly the identification of fixations and saccades, plays a vital role in advancing our understanding of neurological functions and cognitive processing. Conventional modalities of data, such as RGB webcams, often face limitations such as motion blur, latency and susceptibility to noise. Neuromorphic Vision Sensors, also known as event cameras (ECs), capture pixel-level changes asynchronously and at a high temporal resolution, making them well suited for detecting the swift transitions inherent to eye movements. However, the resulting data are sparse, which makes them less well suited for use with conventional algorithms. Spiking Neural Networks (SNNs) are gaining attention due to their discrete spatio-temporal spike mechanism ideally suited for sparse data. These networks offer a biologically inspired computational paradigm capable of modeling the temporal dynamics captured by event cameras. This study validates the use of Spiking Neural Networks (SNNs) with event cameras for efficient eye movement classification. We manually annotated the EV-Eye dataset, the largest publicly available event-based eye-tracking benchmark, into sequences of saccades and fixations, and we propose a convolutional SNN architecture operating directly on spike streams. Our model achieves an accuracy of 94% and a precision of 0.92 across annotated data from 10 users. As the first work to apply SNNs to eye movement classification using event data, we benchmark our approach against spiking baselines such as SpikingVGG and SpikingDenseNet, and additionally provide a detailed computational complexity comparison between SNN and ANN counterparts. Our results highlight the efficiency and robustness of SNNs for event-based vision tasks, with over one order of magnitude improvement in computational efficiency, with implications for fast and low-power neurocognitive diagnostic systems.</p>
	]]></content:encoded>

	<dc:title>Eye Movement Classification Using Neuromorphic Vision Sensors</dc:title>
			<dc:creator>Khadija Iddrisu</dc:creator>
			<dc:creator>Waseem Shariff</dc:creator>
			<dc:creator>Maciej Stec</dc:creator>
			<dc:creator>Noel O’Connor</dc:creator>
			<dc:creator>Suzanne Little</dc:creator>
		<dc:identifier>doi: 10.3390/jemr19010017</dc:identifier>
	<dc:source>Journal of Eye Movement Research</dc:source>
	<dc:date>2026-02-04</dc:date>

	<prism:publicationName>Journal of Eye Movement Research</prism:publicationName>
	<prism:publicationDate>2026-02-04</prism:publicationDate>
	<prism:volume>19</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>17</prism:startingPage>
		<prism:doi>10.3390/jemr19010017</prism:doi>
	<prism:url>https://www.mdpi.com/1995-8692/19/1/17</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1995-8692/19/1/16">

	<title>JEMR, Vol. 19, Pages 16: Rapid Automatized Naming (RAN) and Word Reading Fluency in Early School-Aged Children: A Pilot Eye-Tracking Study</title>
	<link>https://www.mdpi.com/1995-8692/19/1/16</link>
	<description>Fluent word reading is a key literacy skill, yet the full extent of the oculomotor underpinnings in developing readers remains unknown. Rapid automatized naming (RAN) is a useful clinical measure that has been shown to predict word reading fluency. Here we use RAN scores to predict early, mid, and late local stages of word reading as measured by eye tracking in children who are at a critical time in their literacy development. Thirty-three children participated in two RAN tasks (rapid letter naming (RLN) and rapid digit naming (RDN)) and an eye-tracking task, which included sentence-level reading with an embedded target word. The eye-tracking measures of first fixation duration, regression path duration, and total word reading time were used as early, mid, and late local measures, respectively. RLN and RDN significantly predicted only the mid-stage of the reading process (regression path duration). Faster RLN and RDN times were associated with briefer regressions from target words. Preliminary results link behavioral RAN performance to a mid-stage oculomotor variable, indicating that children with slower RAN times may exhibit longer regressions during reading, suggesting possible difficulties with the integration of phonological processing skills.</description>
	<pubDate>2026-02-04</pubDate>

	<content:encoded><![CDATA[
	<p><b>JEMR, Vol. 19, Pages 16: Rapid Automatized Naming (RAN) and Word Reading Fluency in Early School-Aged Children: A Pilot Eye-Tracking Study</b></p>
	<p>Journal of Eye Movement Research <a href="https://www.mdpi.com/1995-8692/19/1/16">doi: 10.3390/jemr19010016</a></p>
	<p>Authors:
		Alisa Baron
		Alexia Martins
		Gavino Puggioni
		Vanessa Harwood
		</p>
	<p>Fluent word reading is a key literacy skill, yet the full extent of the oculomotor underpinnings in developing readers remains unknown. Rapid automatized naming (RAN) is a useful clinical measure that has been shown to predict word reading fluency. Here we use RAN scores to predict early, mid, and late local stages of word reading as measured by eye tracking in children who are at a critical time in their literacy development. Thirty-three children participated in two RAN tasks (rapid letter naming (RLN) and rapid digit naming (RDN)) and an eye-tracking task, which included sentence-level reading with an embedded target word. The eye-tracking measures of first fixation duration, regression path duration, and total word reading time were used as early, mid, and late local measures, respectively. RLN and RDN significantly predicted only the mid-stage of the reading process (regression path duration). Faster RLN and RDN times were associated with briefer regressions from target words. Preliminary results link behavioral RAN performance to a mid-stage oculomotor variable, indicating that children with slower RAN times may exhibit longer regressions during reading, suggesting possible difficulties with the integration of phonological processing skills.</p>
	]]></content:encoded>

	<dc:title>Rapid Automatized Naming (RAN) and Word Reading Fluency in Early School-Aged Children: A Pilot Eye-Tracking Study</dc:title>
			<dc:creator>Alisa Baron</dc:creator>
			<dc:creator>Alexia Martins</dc:creator>
			<dc:creator>Gavino Puggioni</dc:creator>
			<dc:creator>Vanessa Harwood</dc:creator>
		<dc:identifier>doi: 10.3390/jemr19010016</dc:identifier>
	<dc:source>Journal of Eye Movement Research</dc:source>
	<dc:date>2026-02-04</dc:date>

	<prism:publicationName>Journal of Eye Movement Research</prism:publicationName>
	<prism:publicationDate>2026-02-04</prism:publicationDate>
	<prism:volume>19</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>16</prism:startingPage>
		<prism:doi>10.3390/jemr19010016</prism:doi>
	<prism:url>https://www.mdpi.com/1995-8692/19/1/16</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1995-8692/19/1/15">

	<title>JEMR, Vol. 19, Pages 15: Analysis of Saccade Characteristics During Fusional Vergence Tests in Normal Binocular Vision Participants</title>
	<link>https://www.mdpi.com/1995-8692/19/1/15</link>
	<description>The purpose of the study was to analyze, characterize, and compare the measurements of saccades that occurred during the positive and negative fusional vergence test (PFV and NFV, respectively) as a function of the disparity vergence demand. Thirty-four participants&amp;amp;rsquo; PFV and NFV amplitudes were measured in a haploscopic setup, recording eye movements with an Eyelink 1000 Plus (SR Research). The visual stimulus was a column of letters. Break and recovery points were determined objectively offline, and saccades were detected with a velocity-threshold-based method. A total of 13,103 and 14,381 saccades were detected during the measurement of the PFV and NFV ranges, respectively. Saccades followed the main sequence (&amp;amp;rho; = 0.97, p &amp;amp;lt; 0.001). The distributions of saccadic amplitudes during PFV and NFV differed significantly (U = 4.28, p &amp;amp;lt; 0.001). The amplitude of saccades that occurred while fusion was maintained (median (IQR) 0.73 (0.92) deg) was significantly smaller than that of saccades during diplopia (2.10 (3.90) deg) (U = &amp;amp;minus;75.63, p &amp;amp;lt; 0.001). The distributions of saccade direction during the measurement of PFV and NFV amplitudes were statistically significantly different (p &amp;amp;lt; 0.01). These findings contribute to a better understanding of how the visual system adjusts saccades in response to different disparity vergence demand during fusional vergence amplitudes evaluation.</description>
	<pubDate>2026-02-03</pubDate>

	<content:encoded><![CDATA[
	<p><b>JEMR, Vol. 19, Pages 15: Analysis of Saccade Characteristics During Fusional Vergence Tests in Normal Binocular Vision Participants</b></p>
	<p>Journal of Eye Movement Research <a href="https://www.mdpi.com/1995-8692/19/1/15">doi: 10.3390/jemr19010015</a></p>
	<p>Authors:
		Cristina Rovira-Gay
		Clara Mestre
		Marc Argilés
		Jaume Pujol
		</p>
	<p>The purpose of the study was to analyze, characterize, and compare the measurements of saccades that occurred during the positive and negative fusional vergence test (PFV and NFV, respectively) as a function of the disparity vergence demand. Thirty-four participants&amp;amp;rsquo; PFV and NFV amplitudes were measured in a haploscopic setup, recording eye movements with an Eyelink 1000 Plus (SR Research). The visual stimulus was a column of letters. Break and recovery points were determined objectively offline, and saccades were detected with a velocity-threshold-based method. A total of 13,103 and 14,381 saccades were detected during the measurement of the PFV and NFV ranges, respectively. Saccades followed the main sequence (&amp;amp;rho; = 0.97, p &amp;amp;lt; 0.001). The distributions of saccadic amplitudes during PFV and NFV differed significantly (U = 4.28, p &amp;amp;lt; 0.001). The amplitude of saccades that occurred while fusion was maintained (median (IQR) 0.73 (0.92) deg) was significantly smaller than that of saccades during diplopia (2.10 (3.90) deg) (U = &amp;amp;minus;75.63, p &amp;amp;lt; 0.001). The distributions of saccade direction during the measurement of PFV and NFV amplitudes were statistically significantly different (p &amp;amp;lt; 0.01). These findings contribute to a better understanding of how the visual system adjusts saccades in response to different disparity vergence demand during fusional vergence amplitudes evaluation.</p>
	]]></content:encoded>

	<dc:title>Analysis of Saccade Characteristics During Fusional Vergence Tests in Normal Binocular Vision Participants</dc:title>
			<dc:creator>Cristina Rovira-Gay</dc:creator>
			<dc:creator>Clara Mestre</dc:creator>
			<dc:creator>Marc Argilés</dc:creator>
			<dc:creator>Jaume Pujol</dc:creator>
		<dc:identifier>doi: 10.3390/jemr19010015</dc:identifier>
	<dc:source>Journal of Eye Movement Research</dc:source>
	<dc:date>2026-02-03</dc:date>

	<prism:publicationName>Journal of Eye Movement Research</prism:publicationName>
	<prism:publicationDate>2026-02-03</prism:publicationDate>
	<prism:volume>19</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>15</prism:startingPage>
		<prism:doi>10.3390/jemr19010015</prism:doi>
	<prism:url>https://www.mdpi.com/1995-8692/19/1/15</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1995-8692/19/1/14">

	<title>JEMR, Vol. 19, Pages 14: Visual Evaluation Strategies in Art Image Viewing: An Eye-Tracking Comparison of Art-Educated and Non-Art Participants</title>
	<link>https://www.mdpi.com/1995-8692/19/1/14</link>
	<description>Understanding how tacit knowledge embedded in visual materials is accessed and utilized during evaluation tasks remains a key challenge in human&amp;amp;ndash;computer interaction and visual expertise research. Although eye-tracking studies have identified systematic differences between experts and novices, findings remain inconsistent, particularly in art-related visual evaluation contexts. This study examines whether tacit aspects of visual evaluation can be inferred from gaze behavior by comparing individuals with and without formal art education. Visual evaluation was assessed using a structured, prompt-based task in which participants inspected artistic images and responded to items targeting specific visual elements. Eye movements were recorded using a screen-based eye-tracking system. Areas of Interest (AOIs) corresponding to correct-answer regions were defined a priori based on expert judgment and item prompts. Both AOI-level metrics (e.g., fixation count, mean, and total visit and gaze durations) and image-level metrics (e.g., fixation count, saccade count, and pupil size) were analyzed using appropriate parametric and non-parametric statistical tests. The results showed that participants with an art-education background produced more fixations within AOIs, exhibited longer mean and total AOI visit and gaze durations, and demonstrated lower saccade counts than participants without art education. These patterns indicate more systematic and goal-directed gaze behavior during visual evaluation, suggesting that formal art education may shape tacit visual evaluation strategies. The findings also highlight the potential of eye tracking as a methodological tool for studying expertise-related differences in visual evaluation processes.</description>
	<pubDate>2026-01-30</pubDate>

	<content:encoded><![CDATA[
	<p><b>JEMR, Vol. 19, Pages 14: Visual Evaluation Strategies in Art Image Viewing: An Eye-Tracking Comparison of Art-Educated and Non-Art Participants</b></p>
	<p>Journal of Eye Movement Research <a href="https://www.mdpi.com/1995-8692/19/1/14">doi: 10.3390/jemr19010014</a></p>
	<p>Authors:
		Adem Korkmaz
		Sevinc Gülsecen
		Grigor Mihaylov
		</p>
	<p>Understanding how tacit knowledge embedded in visual materials is accessed and utilized during evaluation tasks remains a key challenge in human&amp;amp;ndash;computer interaction and visual expertise research. Although eye-tracking studies have identified systematic differences between experts and novices, findings remain inconsistent, particularly in art-related visual evaluation contexts. This study examines whether tacit aspects of visual evaluation can be inferred from gaze behavior by comparing individuals with and without formal art education. Visual evaluation was assessed using a structured, prompt-based task in which participants inspected artistic images and responded to items targeting specific visual elements. Eye movements were recorded using a screen-based eye-tracking system. Areas of Interest (AOIs) corresponding to correct-answer regions were defined a priori based on expert judgment and item prompts. Both AOI-level metrics (e.g., fixation count, mean, and total visit and gaze durations) and image-level metrics (e.g., fixation count, saccade count, and pupil size) were analyzed using appropriate parametric and non-parametric statistical tests. The results showed that participants with an art-education background produced more fixations within AOIs, exhibited longer mean and total AOI visit and gaze durations, and demonstrated lower saccade counts than participants without art education. These patterns indicate more systematic and goal-directed gaze behavior during visual evaluation, suggesting that formal art education may shape tacit visual evaluation strategies. The findings also highlight the potential of eye tracking as a methodological tool for studying expertise-related differences in visual evaluation processes.</p>
	]]></content:encoded>

	<dc:title>Visual Evaluation Strategies in Art Image Viewing: An Eye-Tracking Comparison of Art-Educated and Non-Art Participants</dc:title>
			<dc:creator>Adem Korkmaz</dc:creator>
			<dc:creator>Sevinc Gülsecen</dc:creator>
			<dc:creator>Grigor Mihaylov</dc:creator>
		<dc:identifier>doi: 10.3390/jemr19010014</dc:identifier>
	<dc:source>Journal of Eye Movement Research</dc:source>
	<dc:date>2026-01-30</dc:date>

	<prism:publicationName>Journal of Eye Movement Research</prism:publicationName>
	<prism:publicationDate>2026-01-30</prism:publicationDate>
	<prism:volume>19</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>14</prism:startingPage>
		<prism:doi>10.3390/jemr19010014</prism:doi>
	<prism:url>https://www.mdpi.com/1995-8692/19/1/14</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1995-8692/19/1/13">

	<title>JEMR, Vol. 19, Pages 13: Including Eye Movement in the Assessment of Physical Fatigue Under Different Loading Types and Road Slopes</title>
	<link>https://www.mdpi.com/1995-8692/19/1/13</link>
	<description>Background: Emergency rescuers frequently carry heavy equipment for extended periods, making musculoskeletal disorders a major occupational concern. Loading type and road slope play important roles in inducing physical fatigue; however, the assessment of physical fatigue under these conditions remains limited. Aim: This study aims to investigate physical fatigue under different loading types and road slope conditions using both electromyography (EMG) and eye movement metrics. In particular, this work focuses on eye movement metrics as a non-contact data source in comparison with EMG, which remains largely unexplored for physical fatigue assessment. Method: Prolonged load-bearing walking was simulated to replicate the physical demands experienced by emergency rescuers. Eighteen male participants completed experimental trials incorporating four loading types and three road slope conditions. Results: (1) Loading type and road slope significantly affected EMG activity, eye movement metrics, and perceptual responses. (2) Saccade time (ST), saccade speed (SS), and saccade amplitude (SA) exhibited significant differences in their rates of change across three stages defined by perceptual fatigue. ST, SS, and SA showed strong correlations with subjective fatigue throughout the entire load-bearing walking process, whereas pupil diameter demonstrated only a moderate correlation with subjective ratings. (3) Eye movement metrics were incorporated into multivariate quadratic regression models to quantify physical fatigue under different loading types and road slope conditions. Conclusions: These findings enhance the understanding of physical fatigue mechanisms by demonstrating the potential of eye movement metrics as non-invasive indicators for multidimensional fatigue monitoring in work environments involving varying loading types and road slopes.</description>
	<pubDate>2026-01-27</pubDate>

	<content:encoded><![CDATA[
	<p><b>JEMR, Vol. 19, Pages 13: Including Eye Movement in the Assessment of Physical Fatigue Under Different Loading Types and Road Slopes</b></p>
	<p>Journal of Eye Movement Research <a href="https://www.mdpi.com/1995-8692/19/1/13">doi: 10.3390/jemr19010013</a></p>
	<p>Authors:
		Yixuan Wei
		Xueli Wen
		Shu Wang
		Lanyun Zhang
		Jianwu Chen
		Longzhe Jin
		</p>
	<p>Background: Emergency rescuers frequently carry heavy equipment for extended periods, making musculoskeletal disorders a major occupational concern. Loading type and road slope play important roles in inducing physical fatigue; however, the assessment of physical fatigue under these conditions remains limited. Aim: This study aims to investigate physical fatigue under different loading types and road slope conditions using both electromyography (EMG) and eye movement metrics. In particular, this work focuses on eye movement metrics as a non-contact data source in comparison with EMG, which remains largely unexplored for physical fatigue assessment. Method: Prolonged load-bearing walking was simulated to replicate the physical demands experienced by emergency rescuers. Eighteen male participants completed experimental trials incorporating four loading types and three road slope conditions. Results: (1) Loading type and road slope significantly affected EMG activity, eye movement metrics, and perceptual responses. (2) Saccade time (ST), saccade speed (SS), and saccade amplitude (SA) exhibited significant differences in their rates of change across three stages defined by perceptual fatigue. ST, SS, and SA showed strong correlations with subjective fatigue throughout the entire load-bearing walking process, whereas pupil diameter demonstrated only a moderate correlation with subjective ratings. (3) Eye movement metrics were incorporated into multivariate quadratic regression models to quantify physical fatigue under different loading types and road slope conditions. Conclusions: These findings enhance the understanding of physical fatigue mechanisms by demonstrating the potential of eye movement metrics as non-invasive indicators for multidimensional fatigue monitoring in work environments involving varying loading types and road slopes.</p>
	]]></content:encoded>

	<dc:title>Including Eye Movement in the Assessment of Physical Fatigue Under Different Loading Types and Road Slopes</dc:title>
			<dc:creator>Yixuan Wei</dc:creator>
			<dc:creator>Xueli Wen</dc:creator>
			<dc:creator>Shu Wang</dc:creator>
			<dc:creator>Lanyun Zhang</dc:creator>
			<dc:creator>Jianwu Chen</dc:creator>
			<dc:creator>Longzhe Jin</dc:creator>
		<dc:identifier>doi: 10.3390/jemr19010013</dc:identifier>
	<dc:source>Journal of Eye Movement Research</dc:source>
	<dc:date>2026-01-27</dc:date>

	<prism:publicationName>Journal of Eye Movement Research</prism:publicationName>
	<prism:publicationDate>2026-01-27</prism:publicationDate>
	<prism:volume>19</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>13</prism:startingPage>
		<prism:doi>10.3390/jemr19010013</prism:doi>
	<prism:url>https://www.mdpi.com/1995-8692/19/1/13</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1995-8692/19/1/12">

	<title>JEMR, Vol. 19, Pages 12: A Comparison of Centroid Tracking and Image Phase for Improved Optokinetic Nystagmus Detection</title>
	<link>https://www.mdpi.com/1995-8692/19/1/12</link>
	<description>Optokinetic nystagmus (OKN) is an involuntary sawtooth eye movement that occurs in the presence of a drifting stimulus. Our experience is that low-amplitude/short-duration OKN can challenge the limits of our commercially available Pupil Neon eye-tracker, leading to false negative OKN detection results. We sought to investigate whether such instances could be remediated. We compared automated OKN detection using: (1) the gaze signal from the Pupil Neon (OKN-G), (2) centroid tracking (OKN-C), and (3) an image-phase-based &amp;amp;ldquo;motion microscopy&amp;amp;rdquo; technique (OKN-MMIC). The OKN-C and OKN-MMIC methods were also tested as a remediated step after a negative OKN-G result (OKN-C-STEP, OKN-MMIC-STEP). To validate the approaches adults (n = 22) with normal visual acuity was measured whilst viewing trials of an OKN induction stimulus shown at four levels of visibility. Confusion matrices and performance measures were determined for a &amp;amp;ldquo;main&amp;amp;rdquo; dataset that included all methods, and a &amp;amp;ldquo;retest&amp;amp;rdquo; set, which contained instances where centroid tracking failed. For the main set, all tested methods improved upon OKN-G by Matthew&amp;amp;rsquo;s correlation coefficient (0.80&amp;amp;ndash;0.85 vs. 0.76), sensitivity (0.89&amp;amp;ndash;0.95 vs. 0.85), and accuracy (0.91&amp;amp;ndash;0.93 vs. 0.88); but only OKN-C yielded better specificity (0.90&amp;amp;ndash;0.96 vs. 0.95). For the retest set, MMIC and MMIC-STEP methods consistently improved upon the performance of OKN-G across all measures.</description>
	<pubDate>2026-01-26</pubDate>

	<content:encoded><![CDATA[
	<p><b>JEMR, Vol. 19, Pages 12: A Comparison of Centroid Tracking and Image Phase for Improved Optokinetic Nystagmus Detection</b></p>
	<p>Journal of Eye Movement Research <a href="https://www.mdpi.com/1995-8692/19/1/12">doi: 10.3390/jemr19010012</a></p>
	<p>Authors:
		Jason Turuwhenua
		Mohammad Norouzifard
		Zaw LinTun
		Misty Edmonds
		Rebecca Findlay
		Joanna Black
		Benjamin Thompson
		</p>
	<p>Optokinetic nystagmus (OKN) is an involuntary sawtooth eye movement that occurs in the presence of a drifting stimulus. Our experience is that low-amplitude/short-duration OKN can challenge the limits of our commercially available Pupil Neon eye-tracker, leading to false negative OKN detection results. We sought to investigate whether such instances could be remediated. We compared automated OKN detection using: (1) the gaze signal from the Pupil Neon (OKN-G), (2) centroid tracking (OKN-C), and (3) an image-phase-based &amp;amp;ldquo;motion microscopy&amp;amp;rdquo; technique (OKN-MMIC). The OKN-C and OKN-MMIC methods were also tested as a remediated step after a negative OKN-G result (OKN-C-STEP, OKN-MMIC-STEP). To validate the approaches adults (n = 22) with normal visual acuity was measured whilst viewing trials of an OKN induction stimulus shown at four levels of visibility. Confusion matrices and performance measures were determined for a &amp;amp;ldquo;main&amp;amp;rdquo; dataset that included all methods, and a &amp;amp;ldquo;retest&amp;amp;rdquo; set, which contained instances where centroid tracking failed. For the main set, all tested methods improved upon OKN-G by Matthew&amp;amp;rsquo;s correlation coefficient (0.80&amp;amp;ndash;0.85 vs. 0.76), sensitivity (0.89&amp;amp;ndash;0.95 vs. 0.85), and accuracy (0.91&amp;amp;ndash;0.93 vs. 0.88); but only OKN-C yielded better specificity (0.90&amp;amp;ndash;0.96 vs. 0.95). For the retest set, MMIC and MMIC-STEP methods consistently improved upon the performance of OKN-G across all measures.</p>
	]]></content:encoded>

	<dc:title>A Comparison of Centroid Tracking and Image Phase for Improved Optokinetic Nystagmus Detection</dc:title>
			<dc:creator>Jason Turuwhenua</dc:creator>
			<dc:creator>Mohammad Norouzifard</dc:creator>
			<dc:creator>Zaw LinTun</dc:creator>
			<dc:creator>Misty Edmonds</dc:creator>
			<dc:creator>Rebecca Findlay</dc:creator>
			<dc:creator>Joanna Black</dc:creator>
			<dc:creator>Benjamin Thompson</dc:creator>
		<dc:identifier>doi: 10.3390/jemr19010012</dc:identifier>
	<dc:source>Journal of Eye Movement Research</dc:source>
	<dc:date>2026-01-26</dc:date>

	<prism:publicationName>Journal of Eye Movement Research</prism:publicationName>
	<prism:publicationDate>2026-01-26</prism:publicationDate>
	<prism:volume>19</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>12</prism:startingPage>
		<prism:doi>10.3390/jemr19010012</prism:doi>
	<prism:url>https://www.mdpi.com/1995-8692/19/1/12</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1995-8692/19/1/11">

	<title>JEMR, Vol. 19, Pages 11: Inspecting the Retina: Oculomotor Patterns and Accuracy in Fundus Image Interpretation by Novice Versus Experienced Eye Care Practitioners</title>
	<link>https://www.mdpi.com/1995-8692/19/1/11</link>
	<description>Visual search behavior, influenced by expertise, prior knowledge, training, and visual fatigue, is crucial in ophthalmic diagnostics. This study investigates differences in eye-tracking strategies between novice and experienced eye care practitioners during fundus image interpretation. Forty-seven participants, including 37 novices (first- to fourth-year optometry students) and 10 experienced optometrists (&amp;amp;ge;2 years of experience), viewed 20 fundus images (10 normal, 10 abnormal) while their eye movements were recorded using an Eyelink1000 Plus gaze tracker (2000 Hz). Diagnostic and laterality accuracy were assessed, and statistical analyses were conducted using Sigma Plot 12.0. Results showed that experienced practitioners had significantly higher diagnostic accuracy (83 &amp;amp;plusmn; 6.3%) than novices (70 &amp;amp;plusmn; 12.9%, p &amp;amp;lt; 0.005). Significant differences in oculomotor behavior were observed, including median latency (p &amp;amp;lt; 0.001), while no significant differences were found in median peak velocity (p = 0.11) or laterality accuracy (p = 0.97). Diagnostic accuracy correlated with fixation count in novices (r = 0.54, p &amp;amp;lt; 0.001), while laterality accuracy correlated with total dwelling time (r = &amp;amp;minus;0.62, p &amp;amp;lt; 0.005). The experienced practitioners demonstrated systematic and focused visual search patterns, whereas the novices exhibited unorganized scan paths. Enhancing training with visual feedback could improve fundus image analysis accuracy in novice clinicians.</description>
	<pubDate>2026-01-21</pubDate>

	<content:encoded><![CDATA[
	<p><b>JEMR, Vol. 19, Pages 11: Inspecting the Retina: Oculomotor Patterns and Accuracy in Fundus Image Interpretation by Novice Versus Experienced Eye Care Practitioners</b></p>
	<p>Journal of Eye Movement Research <a href="https://www.mdpi.com/1995-8692/19/1/11">doi: 10.3390/jemr19010011</a></p>
	<p>Authors:
		Suraj Upadhyaya
		</p>
	<p>Visual search behavior, influenced by expertise, prior knowledge, training, and visual fatigue, is crucial in ophthalmic diagnostics. This study investigates differences in eye-tracking strategies between novice and experienced eye care practitioners during fundus image interpretation. Forty-seven participants, including 37 novices (first- to fourth-year optometry students) and 10 experienced optometrists (&amp;amp;ge;2 years of experience), viewed 20 fundus images (10 normal, 10 abnormal) while their eye movements were recorded using an Eyelink1000 Plus gaze tracker (2000 Hz). Diagnostic and laterality accuracy were assessed, and statistical analyses were conducted using Sigma Plot 12.0. Results showed that experienced practitioners had significantly higher diagnostic accuracy (83 &amp;amp;plusmn; 6.3%) than novices (70 &amp;amp;plusmn; 12.9%, p &amp;amp;lt; 0.005). Significant differences in oculomotor behavior were observed, including median latency (p &amp;amp;lt; 0.001), while no significant differences were found in median peak velocity (p = 0.11) or laterality accuracy (p = 0.97). Diagnostic accuracy correlated with fixation count in novices (r = 0.54, p &amp;amp;lt; 0.001), while laterality accuracy correlated with total dwelling time (r = &amp;amp;minus;0.62, p &amp;amp;lt; 0.005). The experienced practitioners demonstrated systematic and focused visual search patterns, whereas the novices exhibited unorganized scan paths. Enhancing training with visual feedback could improve fundus image analysis accuracy in novice clinicians.</p>
	]]></content:encoded>

	<dc:title>Inspecting the Retina: Oculomotor Patterns and Accuracy in Fundus Image Interpretation by Novice Versus Experienced Eye Care Practitioners</dc:title>
			<dc:creator>Suraj Upadhyaya</dc:creator>
		<dc:identifier>doi: 10.3390/jemr19010011</dc:identifier>
	<dc:source>Journal of Eye Movement Research</dc:source>
	<dc:date>2026-01-21</dc:date>

	<prism:publicationName>Journal of Eye Movement Research</prism:publicationName>
	<prism:publicationDate>2026-01-21</prism:publicationDate>
	<prism:volume>19</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>11</prism:startingPage>
		<prism:doi>10.3390/jemr19010011</prism:doi>
	<prism:url>https://www.mdpi.com/1995-8692/19/1/11</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1995-8692/19/1/10">

	<title>JEMR, Vol. 19, Pages 10: Eye Movement Analysis: A Kernel Density Estimation Approach for Saccade Direction and Amplitude</title>
	<link>https://www.mdpi.com/1995-8692/19/1/10</link>
	<description>Eye movements are important indicators of problem-solving or solution strategies and are recorded using eye-tracking technologies. As they reveal how viewers interact with presented information during task processing, their analysis is crucial for educational research. Traditional methods for analyzing saccades, such as histograms or polar diagrams, are limited in capturing patterns in direction and amplitude. To address this, we propose a kernel density estimation approach that explicitly accounts for the data structure: for the circular distribution of saccade direction, we use the von Mises kernel, and for saccade amplitude, a Gaussian kernel. This yields continuous probability distributions that not only improve accuracy of representations but also model the underlying distribution of eye movements. This method enables the identification of strategies used during task processing and reveals the connections to the underlying cognitive processes. It allows for a deeper understanding of information processing during learning. By applying our new method to an empirical dataset, we uncovered differences in solution strategies that conventional techniques could not reveal. The insights gained can contribute to the development of more effective teaching methods, better tailored to the individual needs of learners, thereby enhancing their academic success.</description>
	<pubDate>2026-01-19</pubDate>

	<content:encoded><![CDATA[
	<p><b>JEMR, Vol. 19, Pages 10: Eye Movement Analysis: A Kernel Density Estimation Approach for Saccade Direction and Amplitude</b></p>
	<p>Journal of Eye Movement Research <a href="https://www.mdpi.com/1995-8692/19/1/10">doi: 10.3390/jemr19010010</a></p>
	<p>Authors:
		Paula Fehlinger
		Bernhard Ertl
		Bianca Watzka
		</p>
	<p>Eye movements are important indicators of problem-solving or solution strategies and are recorded using eye-tracking technologies. As they reveal how viewers interact with presented information during task processing, their analysis is crucial for educational research. Traditional methods for analyzing saccades, such as histograms or polar diagrams, are limited in capturing patterns in direction and amplitude. To address this, we propose a kernel density estimation approach that explicitly accounts for the data structure: for the circular distribution of saccade direction, we use the von Mises kernel, and for saccade amplitude, a Gaussian kernel. This yields continuous probability distributions that not only improve accuracy of representations but also model the underlying distribution of eye movements. This method enables the identification of strategies used during task processing and reveals the connections to the underlying cognitive processes. It allows for a deeper understanding of information processing during learning. By applying our new method to an empirical dataset, we uncovered differences in solution strategies that conventional techniques could not reveal. The insights gained can contribute to the development of more effective teaching methods, better tailored to the individual needs of learners, thereby enhancing their academic success.</p>
	]]></content:encoded>

	<dc:title>Eye Movement Analysis: A Kernel Density Estimation Approach for Saccade Direction and Amplitude</dc:title>
			<dc:creator>Paula Fehlinger</dc:creator>
			<dc:creator>Bernhard Ertl</dc:creator>
			<dc:creator>Bianca Watzka</dc:creator>
		<dc:identifier>doi: 10.3390/jemr19010010</dc:identifier>
	<dc:source>Journal of Eye Movement Research</dc:source>
	<dc:date>2026-01-19</dc:date>

	<prism:publicationName>Journal of Eye Movement Research</prism:publicationName>
	<prism:publicationDate>2026-01-19</prism:publicationDate>
	<prism:volume>19</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>10</prism:startingPage>
		<prism:doi>10.3390/jemr19010010</prism:doi>
	<prism:url>https://www.mdpi.com/1995-8692/19/1/10</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1995-8692/19/1/9">

	<title>JEMR, Vol. 19, Pages 9: Advanced Consumer Behaviour Analysis: Integrating Eye Tracking, Machine Learning, and Facial Recognition</title>
	<link>https://www.mdpi.com/1995-8692/19/1/9</link>
	<description>This study presents DeepVisionAnalytics, an integrated framework that combines eye tracking, OpenCV-based computer vision (CV), and machine learning (ML) to support objective analysis of consumer behaviour in visually driven tasks. Unlike conventional self-reported surveys, which are prone to cognitive bias, recall errors, and social desirability effects, the proposed approach relies on direct behavioural measurements of visual attention. The system captures gaze distribution and fixation dynamics during interaction with products or interfaces. It uses AOI-level eye tracking metrics as the sole behavioural signal to infer candidate choice under constrained experimental conditions. In parallel, OpenCV and ML perform facial analysis to estimate demographic attributes (age, gender, and ethnicity). These attributes are collected independently and linked post hoc to gaze-derived outcomes. Demographics are not used as predictive features for choice inference. Instead, they are used as contextual metadata to support stratified, segment-level interpretation. Empirical results show that gaze-based inference closely reproduces observed choice distributions in short-horizon, visually driven tasks. Demographic estimates enable meaningful post hoc segmentation without affecting the decision mechanism. Together, these results show that multimodal integration can move beyond descriptive heatmaps. The platform produces reproducible decision-support artefacts, including AOI rankings, heatmaps, and segment-level summaries, grounded in objective behavioural data. By separating the decision signal (gaze) from contextual descriptors (demographics), this work contributes a reusable end-to-end platform for marketing and UX research. It supports choice inference under constrained conditions and segment-level interpretation without demographic priors in the decision mechanism.</description>
	<pubDate>2026-01-19</pubDate>

	<content:encoded><![CDATA[
	<p><b>JEMR, Vol. 19, Pages 9: Advanced Consumer Behaviour Analysis: Integrating Eye Tracking, Machine Learning, and Facial Recognition</b></p>
	<p>Journal of Eye Movement Research <a href="https://www.mdpi.com/1995-8692/19/1/9">doi: 10.3390/jemr19010009</a></p>
	<p>Authors:
		José Augusto Rodrigues
		António Vieira de Castro
		Martín Llamas-Nistal
		</p>
	<p>This study presents DeepVisionAnalytics, an integrated framework that combines eye tracking, OpenCV-based computer vision (CV), and machine learning (ML) to support objective analysis of consumer behaviour in visually driven tasks. Unlike conventional self-reported surveys, which are prone to cognitive bias, recall errors, and social desirability effects, the proposed approach relies on direct behavioural measurements of visual attention. The system captures gaze distribution and fixation dynamics during interaction with products or interfaces. It uses AOI-level eye tracking metrics as the sole behavioural signal to infer candidate choice under constrained experimental conditions. In parallel, OpenCV and ML perform facial analysis to estimate demographic attributes (age, gender, and ethnicity). These attributes are collected independently and linked post hoc to gaze-derived outcomes. Demographics are not used as predictive features for choice inference. Instead, they are used as contextual metadata to support stratified, segment-level interpretation. Empirical results show that gaze-based inference closely reproduces observed choice distributions in short-horizon, visually driven tasks. Demographic estimates enable meaningful post hoc segmentation without affecting the decision mechanism. Together, these results show that multimodal integration can move beyond descriptive heatmaps. The platform produces reproducible decision-support artefacts, including AOI rankings, heatmaps, and segment-level summaries, grounded in objective behavioural data. By separating the decision signal (gaze) from contextual descriptors (demographics), this work contributes a reusable end-to-end platform for marketing and UX research. It supports choice inference under constrained conditions and segment-level interpretation without demographic priors in the decision mechanism.</p>
	]]></content:encoded>

	<dc:title>Advanced Consumer Behaviour Analysis: Integrating Eye Tracking, Machine Learning, and Facial Recognition</dc:title>
			<dc:creator>José Augusto Rodrigues</dc:creator>
			<dc:creator>António Vieira de Castro</dc:creator>
			<dc:creator>Martín Llamas-Nistal</dc:creator>
		<dc:identifier>doi: 10.3390/jemr19010009</dc:identifier>
	<dc:source>Journal of Eye Movement Research</dc:source>
	<dc:date>2026-01-19</dc:date>

	<prism:publicationName>Journal of Eye Movement Research</prism:publicationName>
	<prism:publicationDate>2026-01-19</prism:publicationDate>
	<prism:volume>19</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>9</prism:startingPage>
		<prism:doi>10.3390/jemr19010009</prism:doi>
	<prism:url>https://www.mdpi.com/1995-8692/19/1/9</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1995-8692/19/1/8">

	<title>JEMR, Vol. 19, Pages 8: Analysis of Top-Down Perceptual Modulation Considering Eye Fixations Made on a Bistable Logo</title>
	<link>https://www.mdpi.com/1995-8692/19/1/8</link>
	<description>Within the framework of brand communication, several companies choose to use bistable logos. These types of logos fall within the mechanisms inherent to bistable perception, where the interpretation of the two possible percepts involved may depend on the areas being observed or on prior instructions given to the observer to search for a particular shape within the ambiguous image. Perceptual factors related to the stimulus and the areas of eye fixation are called bottom-up aspects. The information exogenous to the bistable stimulus that determines perception is called top-down modulation. In order to determine whether certain bottom-up perceptual modulation areas for the Toblerone bistable logo are related to the search for each percept previously modulated by a written instruction, an experimental task was carried out with 34 participants using a Tobii T-120 eye tracker device, manufactured by Tobii in Danderyd, Sweden. Seven bottom-up modulation clusters were analyzed for ocular responses manifested in two different top-down modulation conditions. The results show that for each of the percepts, some areas correspond to the textual information offered as a top-down modulator. It is concluded that for the perception of the Toblerone&amp;amp;reg; logo, some areas are related to each percept, and the unimodal top-down modulation mechanisms operate in certain areas, while others can be assumed to be parts of the logo that contribute to the recognition of the two percepts involved.</description>
	<pubDate>2026-01-14</pubDate>

	<content:encoded><![CDATA[
	<p><b>JEMR, Vol. 19, Pages 8: Analysis of Top-Down Perceptual Modulation Considering Eye Fixations Made on a Bistable Logo</b></p>
	<p>Journal of Eye Movement Research <a href="https://www.mdpi.com/1995-8692/19/1/8">doi: 10.3390/jemr19010008</a></p>
	<p>Authors:
		Guillermo Rodríguez-Martínez
		Juan Camilo Giraldo-Aristizábal
		</p>
	<p>Within the framework of brand communication, several companies choose to use bistable logos. These types of logos fall within the mechanisms inherent to bistable perception, where the interpretation of the two possible percepts involved may depend on the areas being observed or on prior instructions given to the observer to search for a particular shape within the ambiguous image. Perceptual factors related to the stimulus and the areas of eye fixation are called bottom-up aspects. The information exogenous to the bistable stimulus that determines perception is called top-down modulation. In order to determine whether certain bottom-up perceptual modulation areas for the Toblerone bistable logo are related to the search for each percept previously modulated by a written instruction, an experimental task was carried out with 34 participants using a Tobii T-120 eye tracker device, manufactured by Tobii in Danderyd, Sweden. Seven bottom-up modulation clusters were analyzed for ocular responses manifested in two different top-down modulation conditions. The results show that for each of the percepts, some areas correspond to the textual information offered as a top-down modulator. It is concluded that for the perception of the Toblerone&amp;amp;reg; logo, some areas are related to each percept, and the unimodal top-down modulation mechanisms operate in certain areas, while others can be assumed to be parts of the logo that contribute to the recognition of the two percepts involved.</p>
	]]></content:encoded>

	<dc:title>Analysis of Top-Down Perceptual Modulation Considering Eye Fixations Made on a Bistable Logo</dc:title>
			<dc:creator>Guillermo Rodríguez-Martínez</dc:creator>
			<dc:creator>Juan Camilo Giraldo-Aristizábal</dc:creator>
		<dc:identifier>doi: 10.3390/jemr19010008</dc:identifier>
	<dc:source>Journal of Eye Movement Research</dc:source>
	<dc:date>2026-01-14</dc:date>

	<prism:publicationName>Journal of Eye Movement Research</prism:publicationName>
	<prism:publicationDate>2026-01-14</prism:publicationDate>
	<prism:volume>19</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>8</prism:startingPage>
		<prism:doi>10.3390/jemr19010008</prism:doi>
	<prism:url>https://www.mdpi.com/1995-8692/19/1/8</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1995-8692/19/1/7">

	<title>JEMR, Vol. 19, Pages 7: Analyzing the “Opposite” Approach in Additions to Historic Buildings Using Visual Attention Tools: Dresden Military History Museum Case</title>
	<link>https://www.mdpi.com/1995-8692/19/1/7</link>
	<description>From past to present, modern additions have continued to transform historic environments. While some argue that contemporary extensions disrupt the integrity of historic buildings, others suggest that the contrast between past and present creates a meaningful architectural dialog. This debate raises a key question: in contrasting compositions, which architectural elements draw more visual attention, the historic or the modern? To address this, a visual attention-based analytical approach is adopted. In this study, eye-tracking-based visual attention analysis is used to examine how viewers perceive the relationship between historical and contemporary architectural elements. Instead of conventional laboratory-based eye-tracking, artificial intelligence-supported visual attention software developed from eye-tracking datasets is employed. Four tools—3M-VAS, EyeQuant, Attention Insight, and Expoze—were used to generate heat maps, gaze sequence maps, hotspots, focus maps, attention distribution diagrams, and saliency predictions. These visualizations enabled both a qualitative and quantitative comparison of viewer focus. The case study is the Military History Museum in Dresden, Germany, known for its widely debated contemporary addition representing an oppositional design approach. The results illustrate which architectural components are visually prioritized, offering insight into how contrasting architectural languages are cognitively perceived in historic settings.</description>
	<pubDate>2026-01-12</pubDate>

	<content:encoded><![CDATA[
	<p><b>JEMR, Vol. 19, Pages 7: Analyzing the “Opposite” Approach in Additions to Historic Buildings Using Visual Attention Tools: Dresden Military History Museum Case</b></p>
	<p>Journal of Eye Movement Research <a href="https://www.mdpi.com/1995-8692/19/1/7">doi: 10.3390/jemr19010007</a></p>
	<p>Authors:
		Nuray Özkaraca Özalp
		Hicran Halaç
		Mehmet Özalp
		Fikret Bademci
		</p>
	<p>From past to present, modern additions have continued to transform historic environments. While some argue that contemporary extensions disrupt the integrity of historic buildings, others suggest that the contrast between past and present creates a meaningful architectural dialog. This debate raises a key question: in contrasting compositions, which architectural elements draw more visual attention, the historic or the modern? To address this, a visual attention-based analytical approach is adopted. In this study, eye-tracking-based visual attention analysis is used to examine how viewers perceive the relationship between historical and contemporary architectural elements. Instead of conventional laboratory-based eye-tracking, artificial intelligence-supported visual attention software developed from eye-tracking datasets is employed. Four tools—3M-VAS, EyeQuant, Attention Insight, and Expoze—were used to generate heat maps, gaze sequence maps, hotspots, focus maps, attention distribution diagrams, and saliency predictions. These visualizations enabled both a qualitative and quantitative comparison of viewer focus. The case study is the Military History Museum in Dresden, Germany, known for its widely debated contemporary addition representing an oppositional design approach. The results illustrate which architectural components are visually prioritized, offering insight into how contrasting architectural languages are cognitively perceived in historic settings.</p>
	]]></content:encoded>

	<dc:title>Analyzing the “Opposite” Approach in Additions to Historic Buildings Using Visual Attention Tools: Dresden Military History Museum Case</dc:title>
			<dc:creator>Nuray Özkaraca Özalp</dc:creator>
			<dc:creator>Hicran Halaç</dc:creator>
			<dc:creator>Mehmet Özalp</dc:creator>
			<dc:creator>Fikret Bademci</dc:creator>
		<dc:identifier>doi: 10.3390/jemr19010007</dc:identifier>
	<dc:source>Journal of Eye Movement Research</dc:source>
	<dc:date>2026-01-12</dc:date>

	<prism:publicationName>Journal of Eye Movement Research</prism:publicationName>
	<prism:publicationDate>2026-01-12</prism:publicationDate>
	<prism:volume>19</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>7</prism:startingPage>
		<prism:doi>10.3390/jemr19010007</prism:doi>
	<prism:url>https://www.mdpi.com/1995-8692/19/1/7</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1995-8692/19/1/6">

	<title>JEMR, Vol. 19, Pages 6: Spanish Readers Skip Articles Regardless of Gender and Number Agreement</title>
	<link>https://www.mdpi.com/1995-8692/19/1/6</link>
	<description>Articles are among the most frequently encountered words during reading; however, it is not clear how deeply they are usually processed. This study examines whether native Spanish speakers use parafoveal article–noun agreement information to guide eye movements during reading. Using the gaze-contingent boundary paradigm, we manipulated the parafoveal preview of articles across two experiments. In Experiment 1, we manipulated gender agreement between the previews readers received of definite articles and the subsequent nouns (e.g., la mesa vs. el* mesa). In Experiment 2, we manipulated grammatical gender and number agreement between parafoveal article previews and the subsequent nouns jointly (e.g., los* mesa vs. una mesa). We found no evidence that parafoveal article–noun gender or number agreement affected article skipping probability, suggesting that initial parafoveal processing of articles does not extend to their grammatical properties. However, we observed increased total viewing time on the noun following mismatching previews, suggesting that, while the decision of whether to skip an article is taken largely without considering the grammatical properties of the upcoming words, readers do need more time to recover from the grammatical mismatch afterwards. We discuss the results in the context of current models of eye-movement control during reading.</description>
	<pubDate>2026-01-09</pubDate>

	<content:encoded><![CDATA[
	<p><b>JEMR, Vol. 19, Pages 6: Spanish Readers Skip Articles Regardless of Gender and Number Agreement</b></p>
	<p>Journal of Eye Movement Research <a href="https://www.mdpi.com/1995-8692/19/1/6">doi: 10.3390/jemr19010006</a></p>
	<p>Authors:
		Marina Serrano-Carot
		Bernhard Angele
		</p>
	<p>Articles are among the most frequently encountered words during reading; however, it is not clear how deeply they are usually processed. This study examines whether native Spanish speakers use parafoveal article–noun agreement information to guide eye movements during reading. Using the gaze-contingent boundary paradigm, we manipulated the parafoveal preview of articles across two experiments. In Experiment 1, we manipulated gender agreement between the previews readers received of definite articles and the subsequent nouns (e.g., la mesa vs. el* mesa). In Experiment 2, we manipulated grammatical gender and number agreement between parafoveal article previews and the subsequent nouns jointly (e.g., los* mesa vs. una mesa). We found no evidence that parafoveal article–noun gender or number agreement affected article skipping probability, suggesting that initial parafoveal processing of articles does not extend to their grammatical properties. However, we observed increased total viewing time on the noun following mismatching previews, suggesting that, while the decision of whether to skip an article is taken largely without considering the grammatical properties of the upcoming words, readers do need more time to recover from the grammatical mismatch afterwards. We discuss the results in the context of current models of eye-movement control during reading.</p>
	]]></content:encoded>

	<dc:title>Spanish Readers Skip Articles Regardless of Gender and Number Agreement</dc:title>
			<dc:creator>Marina Serrano-Carot</dc:creator>
			<dc:creator>Bernhard Angele</dc:creator>
		<dc:identifier>doi: 10.3390/jemr19010006</dc:identifier>
	<dc:source>Journal of Eye Movement Research</dc:source>
	<dc:date>2026-01-09</dc:date>

	<prism:publicationName>Journal of Eye Movement Research</prism:publicationName>
	<prism:publicationDate>2026-01-09</prism:publicationDate>
	<prism:volume>19</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>6</prism:startingPage>
		<prism:doi>10.3390/jemr19010006</prism:doi>
	<prism:url>https://www.mdpi.com/1995-8692/19/1/6</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1995-8692/19/1/5">

	<title>JEMR, Vol. 19, Pages 5: Visual Strategies of Avoidantly Attached Individuals: Attachment Avoidance and Gaze Behavior in Deceptive Interactions</title>
	<link>https://www.mdpi.com/1995-8692/19/1/5</link>
	<description>Gaze behavior is a critical component of social interaction, reflecting emotional recognition and social regulation. While previous research has emphasized either situational influences (e.g., deception) or stable individual differences (e.g., attachment avoidance) on gaze patterns, studies exploring how these factors interact to shape gaze behavior in interpersonal contexts remain scarce. In this vein, the aim of the present study was to experimentally determine whether the gaze direction of individuals differs, with respect to their avoidant orientation, under changing situational conditions, including truthful and deceptive communication towards a counterpart. Using a within-person experimental design and the eye-tracking methodology, 31 participants took part in both rehearsed and spontaneous truth-telling and lie-telling tasks. Consistent with expectations, higher attachment avoidance was associated with significantly fewer fixations on emotionally expressive facial regions (e.g., mouth, jaw), and non-significant but visually consistent increases in fixations on the upper face (e.g., eyes) and background. These findings indicate that stable dispositional tendencies, rather than situational demands such as deception, predominantly shape gaze allocation during interpersonal interactions. They further provide a foundation for future investigations into the dynamic interplay between personality and situational context in interactive communicative settings.</description>
	<pubDate>2026-01-07</pubDate>

	<content:encoded><![CDATA[
	<p><b>JEMR, Vol. 19, Pages 5: Visual Strategies of Avoidantly Attached Individuals: Attachment Avoidance and Gaze Behavior in Deceptive Interactions</b></p>
	<p>Journal of Eye Movement Research <a href="https://www.mdpi.com/1995-8692/19/1/5">doi: 10.3390/jemr19010005</a></p>
	<p>Authors:
		Petra Hypšová
		Martin Seitl
		Stanislav Popelka
		</p>
	<p>Gaze behavior is a critical component of social interaction, reflecting emotional recognition and social regulation. While previous research has emphasized either situational influences (e.g., deception) or stable individual differences (e.g., attachment avoidance) on gaze patterns, studies exploring how these factors interact to shape gaze behavior in interpersonal contexts remain scarce. In this vein, the aim of the present study was to experimentally determine whether the gaze direction of individuals differs, with respect to their avoidant orientation, under changing situational conditions, including truthful and deceptive communication towards a counterpart. Using a within-person experimental design and the eye-tracking methodology, 31 participants took part in both rehearsed and spontaneous truth-telling and lie-telling tasks. Consistent with expectations, higher attachment avoidance was associated with significantly fewer fixations on emotionally expressive facial regions (e.g., mouth, jaw), and non-significant but visually consistent increases in fixations on the upper face (e.g., eyes) and background. These findings indicate that stable dispositional tendencies, rather than situational demands such as deception, predominantly shape gaze allocation during interpersonal interactions. They further provide a foundation for future investigations into the dynamic interplay between personality and situational context in interactive communicative settings.</p>
	]]></content:encoded>

	<dc:title>Visual Strategies of Avoidantly Attached Individuals: Attachment Avoidance and Gaze Behavior in Deceptive Interactions</dc:title>
			<dc:creator>Petra Hypšová</dc:creator>
			<dc:creator>Martin Seitl</dc:creator>
			<dc:creator>Stanislav Popelka</dc:creator>
		<dc:identifier>doi: 10.3390/jemr19010005</dc:identifier>
	<dc:source>Journal of Eye Movement Research</dc:source>
	<dc:date>2026-01-07</dc:date>

	<prism:publicationName>Journal of Eye Movement Research</prism:publicationName>
	<prism:publicationDate>2026-01-07</prism:publicationDate>
	<prism:volume>19</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>5</prism:startingPage>
		<prism:doi>10.3390/jemr19010005</prism:doi>
	<prism:url>https://www.mdpi.com/1995-8692/19/1/5</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1995-8692/19/1/4">

	<title>JEMR, Vol. 19, Pages 4: The Impact of Ophthalmic Lens Power and Treatments on Eye Tracking Performance</title>
	<link>https://www.mdpi.com/1995-8692/19/1/4</link>
	<description>Eye tracking (ET) technology is increasingly used in both research and clinical practice, but its accuracy may be compromised by the presence of ophthalmic lenses. This study systematically evaluated the influence of different optical prescriptions and lens treatments on ET performance using DIVE (Device for an Integral Visual Examination). Fourteen healthy participants underwent oculomotor control tests under thirteen optical conditions: six with varying dioptric powers and six with optical filters, compared against a no-lens control. Key parameters analysed included angle error, fixation stability (bivariate contour ellipse area, BCEA), saccadic accuracy, number of data gaps, and proportion of valid frames. High-powered spherical lenses (+6.00 D and −6.00 D) significantly increased gaze angle error, and the negative lens also increased data gaps, while cylindrical lenses had a moderate effect. Among filters, the Natural IR coating caused the greatest deterioration in ET performance, reducing valid samples and increasing the number of gaps with data loss, likely due to interference with the infrared-based detection system. The lens with basic anti-reflective treatment (SV Org 1.5 AR) also showed some deterioration in interaction with the ET. Other filters showed minimal or no significant impact. These findings demonstrate that both high-powered prescriptions and certain lens treatments can compromise ET data quality, highlighting the importance of accounting for optical conditions in experimental design and clinical applications.</description>
	<pubDate>2025-12-29</pubDate>

	<content:encoded><![CDATA[
	<p><b>JEMR, Vol. 19, Pages 4: The Impact of Ophthalmic Lens Power and Treatments on Eye Tracking Performance</b></p>
	<p>Journal of Eye Movement Research <a href="https://www.mdpi.com/1995-8692/19/1/4">doi: 10.3390/jemr19010004</a></p>
	<p>Authors:
		Marta Lacort-Beltrán
		Adrián Alejandre
		Sara Guillén
		Marina Vilella
		Xian Pan
		Victoria Pueyo
		Marta Ortin
		Eduardo Esteban-Ibañez
		</p>
	<p>Eye tracking (ET) technology is increasingly used in both research and clinical practice, but its accuracy may be compromised by the presence of ophthalmic lenses. This study systematically evaluated the influence of different optical prescriptions and lens treatments on ET performance using DIVE (Device for an Integral Visual Examination). Fourteen healthy participants underwent oculomotor control tests under thirteen optical conditions: six with varying dioptric powers and six with optical filters, compared against a no-lens control. Key parameters analysed included angle error, fixation stability (bivariate contour ellipse area, BCEA), saccadic accuracy, number of data gaps, and proportion of valid frames. High-powered spherical lenses (+6.00 D and −6.00 D) significantly increased gaze angle error, and the negative lens also increased data gaps, while cylindrical lenses had a moderate effect. Among filters, the Natural IR coating caused the greatest deterioration in ET performance, reducing valid samples and increasing the number of gaps with data loss, likely due to interference with the infrared-based detection system. The lens with basic anti-reflective treatment (SV Org 1.5 AR) also showed some deterioration in interaction with the ET. Other filters showed minimal or no significant impact. These findings demonstrate that both high-powered prescriptions and certain lens treatments can compromise ET data quality, highlighting the importance of accounting for optical conditions in experimental design and clinical applications.</p>
	]]></content:encoded>

	<dc:title>The Impact of Ophthalmic Lens Power and Treatments on Eye Tracking Performance</dc:title>
			<dc:creator>Marta Lacort-Beltrán</dc:creator>
			<dc:creator>Adrián Alejandre</dc:creator>
			<dc:creator>Sara Guillén</dc:creator>
			<dc:creator>Marina Vilella</dc:creator>
			<dc:creator>Xian Pan</dc:creator>
			<dc:creator>Victoria Pueyo</dc:creator>
			<dc:creator>Marta Ortin</dc:creator>
			<dc:creator>Eduardo Esteban-Ibañez</dc:creator>
		<dc:identifier>doi: 10.3390/jemr19010004</dc:identifier>
	<dc:source>Journal of Eye Movement Research</dc:source>
	<dc:date>2025-12-29</dc:date>

	<prism:publicationName>Journal of Eye Movement Research</prism:publicationName>
	<prism:publicationDate>2025-12-29</prism:publicationDate>
	<prism:volume>19</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>4</prism:startingPage>
		<prism:doi>10.3390/jemr19010004</prism:doi>
	<prism:url>https://www.mdpi.com/1995-8692/19/1/4</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1995-8692/19/1/3">

	<title>JEMR, Vol. 19, Pages 3: Reading Music or Reading Notes? Rethinking Musical Stimuli in Eye-Movement Research</title>
	<link>https://www.mdpi.com/1995-8692/19/1/3</link>
	<description>This article examines the nature of musical stimuli used in eye-movement research on music reading, with a focus on syntactic elements essential for fluent reading: melody, rhythm, and harmony. Drawing parallels between language and music as syntactic systems, the study critiques the widespread use of stimuli that lack coherent musical structure, such as random pitch sequences or rhythmically ambiguous patterns. Eight peer-reviewed studies were analyzed based on their use of stimuli specifically composed for research purposes. The findings reveal that most stimuli do not reflect authentic musical syntax, limiting the validity of conclusions about music reading processes. The article also explores how researchers interpret the concept of “complexity” in musical stimuli, noting inconsistencies and a lack of standardized criteria. Additionally, it highlights the importance of considering motor planning and instrument-specific challenges, which are often overlooked in experimental design. The study calls for more deliberate and informed stimulus design in future research, emphasizing the need for syntactically meaningful musical excerpts and standardized definitions of complexity. Such improvements are essential for advancing the understanding of syntactic processing in music reading and ensuring methodological consistency across studies.</description>
	<pubDate>2025-12-29</pubDate>

	<content:encoded><![CDATA[
	<p><b>JEMR, Vol. 19, Pages 3: Reading Music or Reading Notes? Rethinking Musical Stimuli in Eye-Movement Research</b></p>
	<p>Journal of Eye Movement Research <a href="https://www.mdpi.com/1995-8692/19/1/3">doi: 10.3390/jemr19010003</a></p>
	<p>Authors:
		Katarzyna Leikvoll
		</p>
	<p>This article examines the nature of musical stimuli used in eye-movement research on music reading, with a focus on syntactic elements essential for fluent reading: melody, rhythm, and harmony. Drawing parallels between language and music as syntactic systems, the study critiques the widespread use of stimuli that lack coherent musical structure, such as random pitch sequences or rhythmically ambiguous patterns. Eight peer-reviewed studies were analyzed based on their use of stimuli specifically composed for research purposes. The findings reveal that most stimuli do not reflect authentic musical syntax, limiting the validity of conclusions about music reading processes. The article also explores how researchers interpret the concept of “complexity” in musical stimuli, noting inconsistencies and a lack of standardized criteria. Additionally, it highlights the importance of considering motor planning and instrument-specific challenges, which are often overlooked in experimental design. The study calls for more deliberate and informed stimulus design in future research, emphasizing the need for syntactically meaningful musical excerpts and standardized definitions of complexity. Such improvements are essential for advancing the understanding of syntactic processing in music reading and ensuring methodological consistency across studies.</p>
	]]></content:encoded>

	<dc:title>Reading Music or Reading Notes? Rethinking Musical Stimuli in Eye-Movement Research</dc:title>
			<dc:creator>Katarzyna Leikvoll</dc:creator>
		<dc:identifier>doi: 10.3390/jemr19010003</dc:identifier>
	<dc:source>Journal of Eye Movement Research</dc:source>
	<dc:date>2025-12-29</dc:date>

	<prism:publicationName>Journal of Eye Movement Research</prism:publicationName>
	<prism:publicationDate>2025-12-29</prism:publicationDate>
	<prism:volume>19</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>3</prism:startingPage>
		<prism:doi>10.3390/jemr19010003</prism:doi>
	<prism:url>https://www.mdpi.com/1995-8692/19/1/3</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1995-8692/19/1/2">

	<title>JEMR, Vol. 19, Pages 2: Comparing Eye-Tracking and Verbal Reports in L2 Reading Process Research: Three Qualitative Studies</title>
	<link>https://www.mdpi.com/1995-8692/19/1/2</link>
	<description>This study compares the roles of eye-tracking and verbal reports (think-alouds and retrospective verbal reports, RVRs) in L2 reading process research through three qualitative studies. Findings indicate that eye-tracking provided precise, quantitative data on visual attention and reading patterns (e.g., fixation duration, gaze plots) and choice-making during gap-filling. Based on our mapping, it was mostly effective in identifying 13 out of 47 reading processing strategies, primarily those involving skimming or scanning that had distinctive eye-movement signatures. Verbal reports, while less exact in measurement, offered direct access to cognitive processes (e.g., strategy use, reasoning) and uncovered content-specific thoughts inaccessible to eye-tracking. Both methods exhibited reactivity: eye-tracking could cause physical discomfort or altered reading behavior, whereas think-alouds could disrupt task flow or enhance reflection. This study reveals the respective strengths and limitations of eye-tracking and verbal reports in L2 reading research. It facilitates a more informed selection and application of these methodological approaches in alignment with specific research objectives, whether employed in isolation or in an integrated manner.</description>
	<pubDate>2025-12-25</pubDate>

	<content:encoded><![CDATA[
	<p><b>JEMR, Vol. 19, Pages 2: Comparing Eye-Tracking and Verbal Reports in L2 Reading Process Research: Three Qualitative Studies</b></p>
	<p>Journal of Eye Movement Research <a href="https://www.mdpi.com/1995-8692/19/1/2">doi: 10.3390/jemr19010002</a></p>
	<p>Authors:
		Chengsong Yang
		Guangwei Hu
		Keyu Que
		Na Fan
		</p>
	<p>This study compares the roles of eye-tracking and verbal reports (think-alouds and retrospective verbal reports, RVRs) in L2 reading process research through three qualitative studies. Findings indicate that eye-tracking provided precise, quantitative data on visual attention and reading patterns (e.g., fixation duration, gaze plots) and choice-making during gap-filling. Based on our mapping, it was mostly effective in identifying 13 out of 47 reading processing strategies, primarily those involving skimming or scanning that had distinctive eye-movement signatures. Verbal reports, while less exact in measurement, offered direct access to cognitive processes (e.g., strategy use, reasoning) and uncovered content-specific thoughts inaccessible to eye-tracking. Both methods exhibited reactivity: eye-tracking could cause physical discomfort or altered reading behavior, whereas think-alouds could disrupt task flow or enhance reflection. This study reveals the respective strengths and limitations of eye-tracking and verbal reports in L2 reading research. It facilitates a more informed selection and application of these methodological approaches in alignment with specific research objectives, whether employed in isolation or in an integrated manner.</p>
	]]></content:encoded>

	<dc:title>Comparing Eye-Tracking and Verbal Reports in L2 Reading Process Research: Three Qualitative Studies</dc:title>
			<dc:creator>Chengsong Yang</dc:creator>
			<dc:creator>Guangwei Hu</dc:creator>
			<dc:creator>Keyu Que</dc:creator>
			<dc:creator>Na Fan</dc:creator>
		<dc:identifier>doi: 10.3390/jemr19010002</dc:identifier>
	<dc:source>Journal of Eye Movement Research</dc:source>
	<dc:date>2025-12-25</dc:date>

	<prism:publicationName>Journal of Eye Movement Research</prism:publicationName>
	<prism:publicationDate>2025-12-25</prism:publicationDate>
	<prism:volume>19</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>2</prism:startingPage>
		<prism:doi>10.3390/jemr19010002</prism:doi>
	<prism:url>https://www.mdpi.com/1995-8692/19/1/2</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1995-8692/19/1/1">

	<title>JEMR, Vol. 19, Pages 1: Discriminative Capabilities of Eye Gaze Measures for Cognitive Load Evaluation in a Driving Simulation Task</title>
	<link>https://www.mdpi.com/1995-8692/19/1/1</link>
	<description>Driving is a cognitively demanding task engaging attentional effort and working memory resources, which increases cognitive load. The aim of this study was to evaluate the discriminative capabilities of an objective eye tracking method in comparison to a subjective self-report scale (the NASA–Task Load Index) in distinguishing cognitive load levels during driving. Participants (N = 685) performed highway and urban driving in a fixed-base driving simulator. The N-Back test was used as a secondary task to increase cognitive load. In line with previous studies, the NASA–Task Load Index was shown to be an accurate self-report tool in distinguishing conditions with higher and lower levels of cognitive load due to the additional N-Back task, with best average accuracy of 0.81 within the highway driving scenario. Eye gaze metrics worked best when differentiating between stages of highway and urban driving, with an average accuracy of 0.82. Eye gaze entropy measures were the best indicators for cognitive load dynamics, with average accuracy reaching 0.95 for gaze transition entropy in the urban vs. highway comparison. Eye gaze metrics showed significant correlations with the NASA–Task Load Index results in urban driving stages, but not in highway driving. The results demonstrate that eye gaze metrics can be used in combination with self-reports for developing algorithms of cognitive load evaluation and reliable driver state prediction in different road conditions.</description>
	<pubDate>2025-12-24</pubDate>

	<content:encoded><![CDATA[
	<p><b>JEMR, Vol. 19, Pages 1: Discriminative Capabilities of Eye Gaze Measures for Cognitive Load Evaluation in a Driving Simulation Task</b></p>
	<p>Journal of Eye Movement Research <a href="https://www.mdpi.com/1995-8692/19/1/1">doi: 10.3390/jemr19010001</a></p>
	<p>Authors:
		Anastasiia Bakhchina
		Karina Arutyunova
		Evgenii Burashnikov
		Anastasiya Filatova
		Andrei Filimonov
		Ivan Shishalov
		</p>
	<p>Driving is a cognitively demanding task engaging attentional effort and working memory resources, which increases cognitive load. The aim of this study was to evaluate the discriminative capabilities of an objective eye tracking method in comparison to a subjective self-report scale (the NASA–Task Load Index) in distinguishing cognitive load levels during driving. Participants (N = 685) performed highway and urban driving in a fixed-base driving simulator. The N-Back test was used as a secondary task to increase cognitive load. In line with previous studies, the NASA–Task Load Index was shown to be an accurate self-report tool in distinguishing conditions with higher and lower levels of cognitive load due to the additional N-Back task, with best average accuracy of 0.81 within the highway driving scenario. Eye gaze metrics worked best when differentiating between stages of highway and urban driving, with an average accuracy of 0.82. Eye gaze entropy measures were the best indicators for cognitive load dynamics, with average accuracy reaching 0.95 for gaze transition entropy in the urban vs. highway comparison. Eye gaze metrics showed significant correlations with the NASA–Task Load Index results in urban driving stages, but not in highway driving. The results demonstrate that eye gaze metrics can be used in combination with self-reports for developing algorithms of cognitive load evaluation and reliable driver state prediction in different road conditions.</p>
	]]></content:encoded>

	<dc:title>Discriminative Capabilities of Eye Gaze Measures for Cognitive Load Evaluation in a Driving Simulation Task</dc:title>
			<dc:creator>Anastasiia Bakhchina</dc:creator>
			<dc:creator>Karina Arutyunova</dc:creator>
			<dc:creator>Evgenii Burashnikov</dc:creator>
			<dc:creator>Anastasiya Filatova</dc:creator>
			<dc:creator>Andrei Filimonov</dc:creator>
			<dc:creator>Ivan Shishalov</dc:creator>
		<dc:identifier>doi: 10.3390/jemr19010001</dc:identifier>
	<dc:source>Journal of Eye Movement Research</dc:source>
	<dc:date>2025-12-24</dc:date>

	<prism:publicationName>Journal of Eye Movement Research</prism:publicationName>
	<prism:publicationDate>2025-12-24</prism:publicationDate>
	<prism:volume>19</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>1</prism:startingPage>
		<prism:doi>10.3390/jemr19010001</prism:doi>
	<prism:url>https://www.mdpi.com/1995-8692/19/1/1</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1995-8692/18/6/77">

	<title>JEMR, Vol. 18, Pages 77: Stimulus Center Bias Persists Irrespective of Its Position on the Display</title>
	<link>https://www.mdpi.com/1995-8692/18/6/77</link>
	<description>Since the earliest studies on human eye movements, it has been repeatedly demonstrated that observers fixate the center of visual stimuli more than their periphery, regardless of visual content. Subsequent research suggested only little effect of typical biases in experimental setups, such as observer’s position relative to the screen or the relative location of the cue marker. While comparative studies of the screen center vs. stimulus center revealed that both conspire in the process, much of the prior art is still confounded by experimental details that leave the origins of the center-bias debatable. We thus propose methodological novelties to rigorously test the effect of the stimulus center, isolated from other factors. In particular, eye movements were tracked in a free-viewing experiment in which stimuli were presented at a wide range of horizontal displacements from a counterbalanced cue marker in a wide visual field. Stimuli spanned diverse natural scene images to allow inherent biases to surface in the pooled data. Various analyses of the first few fixations show a robust bias toward the center of the stimulus, independent of its position on the display, but affected by its distance to the cue marker. Center bias is thus a tangible phenomenon related to the stimulus.</description>
	<pubDate>2025-12-16</pubDate>

	<content:encoded><![CDATA[
	<p><b>JEMR, Vol. 18, Pages 77: Stimulus Center Bias Persists Irrespective of Its Position on the Display</b></p>
	<p>Journal of Eye Movement Research <a href="https://www.mdpi.com/1995-8692/18/6/77">doi: 10.3390/jemr18060077</a></p>
	<p>Authors:
		Rotem Mairon
		Ohad Ben-Shahar
		</p>
	<p>Since the earliest studies on human eye movements, it has been repeatedly demonstrated that observers fixate the center of visual stimuli more than their periphery, regardless of visual content. Subsequent research suggested only little effect of typical biases in experimental setups, such as observer’s position relative to the screen or the relative location of the cue marker. While comparative studies of the screen center vs. stimulus center revealed that both conspire in the process, much of the prior art is still confounded by experimental details that leave the origins of the center-bias debatable. We thus propose methodological novelties to rigorously test the effect of the stimulus center, isolated from other factors. In particular, eye movements were tracked in a free-viewing experiment in which stimuli were presented at a wide range of horizontal displacements from a counterbalanced cue marker in a wide visual field. Stimuli spanned diverse natural scene images to allow inherent biases to surface in the pooled data. Various analyses of the first few fixations show a robust bias toward the center of the stimulus, independent of its position on the display, but affected by its distance to the cue marker. Center bias is thus a tangible phenomenon related to the stimulus.</p>
	]]></content:encoded>

	<dc:title>Stimulus Center Bias Persists Irrespective of Its Position on the Display</dc:title>
			<dc:creator>Rotem Mairon</dc:creator>
			<dc:creator>Ohad Ben-Shahar</dc:creator>
		<dc:identifier>doi: 10.3390/jemr18060077</dc:identifier>
	<dc:source>Journal of Eye Movement Research</dc:source>
	<dc:date>2025-12-16</dc:date>

	<prism:publicationName>Journal of Eye Movement Research</prism:publicationName>
	<prism:publicationDate>2025-12-16</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>6</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>77</prism:startingPage>
		<prism:doi>10.3390/jemr18060077</prism:doi>
	<prism:url>https://www.mdpi.com/1995-8692/18/6/77</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1995-8692/18/6/76">

	<title>JEMR, Vol. 18, Pages 76: The Role of Spontaneous Eye Blinks in Temporal Perception: An Eye Tracking Study</title>
	<link>https://www.mdpi.com/1995-8692/18/6/76</link>
	<description>Our interaction with the world depends on our ability to process temporal information, which is a key component of human cognition that directly impacts decision-making, planning, and prediction of events. Visual information plays a crucial role in shaping our subjective perception of time, and even brief interruptions, such as those caused by eye blinks, can disrupt the continuity of our perception and alter how we estimate durations. The purpose of this study is to investigate the relationship between spontaneous eye blinks and time perception using a temporal bisection task. In particular, we focus on how blinks preceding stimulus presentation impact the perceived duration of that stimulus. The results of fitting a generalized linear mixed-effects model revealed that blinking can indeed influence the duration estimation. Specifically, the presence of a single blink before the stimulus presentation had a significant effect on subjective time perception; participants were more likely to categorize a duration as shorter compared to when they did not blink. In contrast, two or more blinks before stimulus presentation did not have a significant effect compared to not blinking. This study further elucidates the complex interaction between the momentary suppression of visual input and the perception of time.</description>
	<pubDate>2025-12-16</pubDate>

	<content:encoded><![CDATA[
	<p><b>JEMR, Vol. 18, Pages 76: The Role of Spontaneous Eye Blinks in Temporal Perception: An Eye Tracking Study</b></p>
	<p>Journal of Eye Movement Research <a href="https://www.mdpi.com/1995-8692/18/6/76">doi: 10.3390/jemr18060076</a></p>
	<p>Authors:
		Domenica Abad-Malo
		Omar Alvarado-Cando
		Hakan Karsilar
		</p>
	<p>Our interaction with the world depends on our ability to process temporal information, which is a key component of human cognition that directly impacts decision-making, planning, and prediction of events. Visual information plays a crucial role in shaping our subjective perception of time, and even brief interruptions, such as those caused by eye blinks, can disrupt the continuity of our perception and alter how we estimate durations. The purpose of this study is to investigate the relationship between spontaneous eye blinks and time perception using a temporal bisection task. In particular, we focus on how blinks preceding stimulus presentation impact the perceived duration of that stimulus. The results of fitting a generalized linear mixed-effects model revealed that blinking can indeed influence the duration estimation. Specifically, the presence of a single blink before the stimulus presentation had a significant effect on subjective time perception; participants were more likely to categorize a duration as shorter compared to when they did not blink. In contrast, two or more blinks before stimulus presentation did not have a significant effect compared to not blinking. This study further elucidates the complex interaction between the momentary suppression of visual input and the perception of time.</p>
	]]></content:encoded>

	<dc:title>The Role of Spontaneous Eye Blinks in Temporal Perception: An Eye Tracking Study</dc:title>
			<dc:creator>Domenica Abad-Malo</dc:creator>
			<dc:creator>Omar Alvarado-Cando</dc:creator>
			<dc:creator>Hakan Karsilar</dc:creator>
		<dc:identifier>doi: 10.3390/jemr18060076</dc:identifier>
	<dc:source>Journal of Eye Movement Research</dc:source>
	<dc:date>2025-12-16</dc:date>

	<prism:publicationName>Journal of Eye Movement Research</prism:publicationName>
	<prism:publicationDate>2025-12-16</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>6</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>76</prism:startingPage>
		<prism:doi>10.3390/jemr18060076</prism:doi>
	<prism:url>https://www.mdpi.com/1995-8692/18/6/76</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1995-8692/18/6/75">

	<title>JEMR, Vol. 18, Pages 75: Eyes on Prevention: An Eye-Tracking Analysis of Visual Attention Patterns in Breast Cancer Screening Ads</title>
	<link>https://www.mdpi.com/1995-8692/18/6/75</link>
	<description>Strong communication is central to the translation of breast cancer screening availability into uptake. This experiment tests the role of design features of screening advertisements in directing visual attention in screening-eligible women (&amp;amp;ge;40 years). To this end, a within-subjects eye-tracking experiment (N = 30) was conducted in which women viewed six static public service advertisements. Predefined Areas of Interest (AOIs), Text, Image/Visual, Symbol, Logo, Website/CTA, and Source/Authority&amp;amp;mdash;were annotated, and three standard measures were calculated: Time to First Fixation (TTFF), Fixation Count (FC), and Fixation Duration (FD). Analyses combined descriptive summaries with subgroup analyses using nonparametric methods and generalized linear mixed models (GLMMs) employing participant-level random intercepts. Within each category of stimuli, detected differences were small in magnitude yet trended towards few revisits in each category for the FC mode; TTFF and FD showed no significant differences across categories. Viewing data from the perspective of Areas of Interest (AOIs) highlighted pronounced individual differences. Narratives/efficacy text and dense icon/text callouts prolonged processing times, although institutional logos and abstract/anatomical symbols generally received brief treatment except when coupled with action-oriented communication triggers. TTFF timing also tended toward individual areas of interest aligned with the Scan-Then-Read strategy, in which smaller labels/sources/CTAs are exploited first in comparison with larger headlines/statistical text. Practically, screening messages should co-locate access and credibility information in early-attention areas and employ brief, fluent efficacy text to hold gaze. The study adds PSA-specific eye-tracking evidence for breast cancer screening and provides immediately testable design recommendations for programs in Greece and the EU.</description>
	<pubDate>2025-12-13</pubDate>

	<content:encoded><![CDATA[
	<p><b>JEMR, Vol. 18, Pages 75: Eyes on Prevention: An Eye-Tracking Analysis of Visual Attention Patterns in Breast Cancer Screening Ads</b></p>
	<p>Journal of Eye Movement Research <a href="https://www.mdpi.com/1995-8692/18/6/75">doi: 10.3390/jemr18060075</a></p>
	<p>Authors:
		Stefanos Balaskas
		Ioanna Yfantidou
		Dimitra Skandali
		</p>
	<p>Strong communication is central to the translation of breast cancer screening availability into uptake. This experiment tests the role of design features of screening advertisements in directing visual attention in screening-eligible women (&amp;amp;ge;40 years). To this end, a within-subjects eye-tracking experiment (N = 30) was conducted in which women viewed six static public service advertisements. Predefined Areas of Interest (AOIs), Text, Image/Visual, Symbol, Logo, Website/CTA, and Source/Authority&amp;amp;mdash;were annotated, and three standard measures were calculated: Time to First Fixation (TTFF), Fixation Count (FC), and Fixation Duration (FD). Analyses combined descriptive summaries with subgroup analyses using nonparametric methods and generalized linear mixed models (GLMMs) employing participant-level random intercepts. Within each category of stimuli, detected differences were small in magnitude yet trended towards few revisits in each category for the FC mode; TTFF and FD showed no significant differences across categories. Viewing data from the perspective of Areas of Interest (AOIs) highlighted pronounced individual differences. Narratives/efficacy text and dense icon/text callouts prolonged processing times, although institutional logos and abstract/anatomical symbols generally received brief treatment except when coupled with action-oriented communication triggers. TTFF timing also tended toward individual areas of interest aligned with the Scan-Then-Read strategy, in which smaller labels/sources/CTAs are exploited first in comparison with larger headlines/statistical text. Practically, screening messages should co-locate access and credibility information in early-attention areas and employ brief, fluent efficacy text to hold gaze. The study adds PSA-specific eye-tracking evidence for breast cancer screening and provides immediately testable design recommendations for programs in Greece and the EU.</p>
	]]></content:encoded>

	<dc:title>Eyes on Prevention: An Eye-Tracking Analysis of Visual Attention Patterns in Breast Cancer Screening Ads</dc:title>
			<dc:creator>Stefanos Balaskas</dc:creator>
			<dc:creator>Ioanna Yfantidou</dc:creator>
			<dc:creator>Dimitra Skandali</dc:creator>
		<dc:identifier>doi: 10.3390/jemr18060075</dc:identifier>
	<dc:source>Journal of Eye Movement Research</dc:source>
	<dc:date>2025-12-13</dc:date>

	<prism:publicationName>Journal of Eye Movement Research</prism:publicationName>
	<prism:publicationDate>2025-12-13</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>6</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>75</prism:startingPage>
		<prism:doi>10.3390/jemr18060075</prism:doi>
	<prism:url>https://www.mdpi.com/1995-8692/18/6/75</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1995-8692/18/6/74">

	<title>JEMR, Vol. 18, Pages 74: Where Vision Meets Memory: An Eye-Tracking Study of In-App Ads in Mobile Sports Games with Mixed Visual-Quantitative Analytics</title>
	<link>https://www.mdpi.com/1995-8692/18/6/74</link>
	<description>Mobile games have become one of the fastest-growing segments of the digital economy, and in-app advertisements represent a major source of revenue while shaping consumer attention and memory processes. This study examined the relationship between visual attention and brand recall of in-app advertisements in a mobile sports game using mobile eye-tracking technology. A total of 79 participants (47 male, 32 female; Mage = 25.8) actively played a mobile sports game for ten minutes while their eye movements were recorded with Tobii Pro Glasses 2. Areas of interest (AOIs) were defined for embedded advertisements, and fixation-related measures were analyzed. Brand recall was assessed through unaided, verbal-aided, and visual-aided measures, followed by demographic comparisons based on gender, mobile sports game experience and interest in tennis. Results from Generalized Linear Mixed Models (GLMMs) revealed that brand placement was the strongest predictor of recall (p &amp;amp;lt; 0.001), overriding raw fixation duration. Specifically, brands integrated into task-relevant zones (e.g., the central net area) achieved significantly higher recall odds compared to peripheral ads, regardless of marginal variations in dwell time. While eye movement metrics varied by gender and interest, the multivariate model confirmed that in active gameplay, task-integration drives memory encoding more effectively than passive visual salience. These findings suggest that active gameplay imposes unique cognitive demands, altering how attention and memory interact. The study contributes both theoretically by extending advertising research into ecologically valid gaming contexts and practically by informing strategies for optimizing mobile in-app advertising.</description>
	<pubDate>2025-12-10</pubDate>

	<content:encoded><![CDATA[
	<p><b>JEMR, Vol. 18, Pages 74: Where Vision Meets Memory: An Eye-Tracking Study of In-App Ads in Mobile Sports Games with Mixed Visual-Quantitative Analytics</b></p>
	<p>Journal of Eye Movement Research <a href="https://www.mdpi.com/1995-8692/18/6/74">doi: 10.3390/jemr18060074</a></p>
	<p>Authors:
		Ümit Büyükakgül
		Arif Yüce
		Hakan Katırcı
		</p>
	<p>Mobile games have become one of the fastest-growing segments of the digital economy, and in-app advertisements represent a major source of revenue while shaping consumer attention and memory processes. This study examined the relationship between visual attention and brand recall of in-app advertisements in a mobile sports game using mobile eye-tracking technology. A total of 79 participants (47 male, 32 female; Mage = 25.8) actively played a mobile sports game for ten minutes while their eye movements were recorded with Tobii Pro Glasses 2. Areas of interest (AOIs) were defined for embedded advertisements, and fixation-related measures were analyzed. Brand recall was assessed through unaided, verbal-aided, and visual-aided measures, followed by demographic comparisons based on gender, mobile sports game experience and interest in tennis. Results from Generalized Linear Mixed Models (GLMMs) revealed that brand placement was the strongest predictor of recall (p &amp;amp;lt; 0.001), overriding raw fixation duration. Specifically, brands integrated into task-relevant zones (e.g., the central net area) achieved significantly higher recall odds compared to peripheral ads, regardless of marginal variations in dwell time. While eye movement metrics varied by gender and interest, the multivariate model confirmed that in active gameplay, task-integration drives memory encoding more effectively than passive visual salience. These findings suggest that active gameplay imposes unique cognitive demands, altering how attention and memory interact. The study contributes both theoretically by extending advertising research into ecologically valid gaming contexts and practically by informing strategies for optimizing mobile in-app advertising.</p>
	]]></content:encoded>

	<dc:title>Where Vision Meets Memory: An Eye-Tracking Study of In-App Ads in Mobile Sports Games with Mixed Visual-Quantitative Analytics</dc:title>
			<dc:creator>Ümit Büyükakgül</dc:creator>
			<dc:creator>Arif Yüce</dc:creator>
			<dc:creator>Hakan Katırcı</dc:creator>
		<dc:identifier>doi: 10.3390/jemr18060074</dc:identifier>
	<dc:source>Journal of Eye Movement Research</dc:source>
	<dc:date>2025-12-10</dc:date>

	<prism:publicationName>Journal of Eye Movement Research</prism:publicationName>
	<prism:publicationDate>2025-12-10</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>6</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>74</prism:startingPage>
		<prism:doi>10.3390/jemr18060074</prism:doi>
	<prism:url>https://www.mdpi.com/1995-8692/18/6/74</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1995-8692/18/6/73">

	<title>JEMR, Vol. 18, Pages 73: Investigating the Effect of Presentation Mode on Cognitive Load in English&amp;ndash;Chinese Distance Simultaneous Interpreting: An Eye-Tracking Study</title>
	<link>https://www.mdpi.com/1995-8692/18/6/73</link>
	<description>Distance simultaneous interpreting is a typical example of technology-mediated interpreting, bridging participants (i.e., interpreters, audience, and speakers) in various events and conferences. This study explores how presentation mode affects cognitive load in DSI, utilizing eye-tracking sensor technology. A controlled experiment was conducted involving 36 participants, comprising 19 professional interpreters and 17 student interpreters, to assess the effects of presentation mode on their cognitive load during English-to-Chinese DSI. A Tobii Pro X3-120 screen-based eye tracker was used to collect eye-tracking data as the participants sequentially performed a DSI task involving four distinct presentation modes: the Speaker, Slides, Split, and Corner modes. The findings, derived from the integration of eye-tracking data and interpreting performance scores, indicate that both presentation mode and experience level significantly influence interpreters&amp;amp;rsquo; cognitive load. Notably, student interpreters demonstrated longer fixation durations in the Slides mode, indicating a reliance on visual aids for DSI. These results have implications for language learning, suggesting that the integration of visual supports can aid in the acquisition and performance of interpreting skills, particularly for less experienced interpreters. This study contributes to our understanding of the interplay between technology, cognitive load, and language learning in the context of DSI.</description>
	<pubDate>2025-12-01</pubDate>

	<content:encoded><![CDATA[
	<p><b>JEMR, Vol. 18, Pages 73: Investigating the Effect of Presentation Mode on Cognitive Load in English&amp;ndash;Chinese Distance Simultaneous Interpreting: An Eye-Tracking Study</b></p>
	<p>Journal of Eye Movement Research <a href="https://www.mdpi.com/1995-8692/18/6/73">doi: 10.3390/jemr18060073</a></p>
	<p>Authors:
		Xuelian (Rachel) Zhu
		</p>
	<p>Distance simultaneous interpreting is a typical example of technology-mediated interpreting, bridging participants (i.e., interpreters, audience, and speakers) in various events and conferences. This study explores how presentation mode affects cognitive load in DSI, utilizing eye-tracking sensor technology. A controlled experiment was conducted involving 36 participants, comprising 19 professional interpreters and 17 student interpreters, to assess the effects of presentation mode on their cognitive load during English-to-Chinese DSI. A Tobii Pro X3-120 screen-based eye tracker was used to collect eye-tracking data as the participants sequentially performed a DSI task involving four distinct presentation modes: the Speaker, Slides, Split, and Corner modes. The findings, derived from the integration of eye-tracking data and interpreting performance scores, indicate that both presentation mode and experience level significantly influence interpreters&amp;amp;rsquo; cognitive load. Notably, student interpreters demonstrated longer fixation durations in the Slides mode, indicating a reliance on visual aids for DSI. These results have implications for language learning, suggesting that the integration of visual supports can aid in the acquisition and performance of interpreting skills, particularly for less experienced interpreters. This study contributes to our understanding of the interplay between technology, cognitive load, and language learning in the context of DSI.</p>
	]]></content:encoded>

	<dc:title>Investigating the Effect of Presentation Mode on Cognitive Load in English&amp;amp;ndash;Chinese Distance Simultaneous Interpreting: An Eye-Tracking Study</dc:title>
			<dc:creator>Xuelian (Rachel) Zhu</dc:creator>
		<dc:identifier>doi: 10.3390/jemr18060073</dc:identifier>
	<dc:source>Journal of Eye Movement Research</dc:source>
	<dc:date>2025-12-01</dc:date>

	<prism:publicationName>Journal of Eye Movement Research</prism:publicationName>
	<prism:publicationDate>2025-12-01</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>6</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>73</prism:startingPage>
		<prism:doi>10.3390/jemr18060073</prism:doi>
	<prism:url>https://www.mdpi.com/1995-8692/18/6/73</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1995-8692/18/6/72">

	<title>JEMR, Vol. 18, Pages 72: Initial and Sustained Attentional Bias Toward Emotional Faces in Patients with Major Depressive Disorder</title>
	<link>https://www.mdpi.com/1995-8692/18/6/72</link>
	<description>Major depressive disorder (MDD) represents a prevalent mental health condition characterized by prominent attentional biases, particularly toward negative stimuli. While extensive research has established the significance of negative attentional bias in depression, critical gaps remain in understanding the temporal dynamics and valence-specificity of these biases. This study employed eye-tracking technology to systematically examine the attentional processing of emotional faces (happy, fearful, sad) in MDD patients (n = 61) versus healthy controls (HC, n = 47), assessing both the initial orientation (initial gaze preference) and sustained attention (first dwell time). Key findings revealed the following: (1) while both groups showed an initial vigilance toward threatening faces (fearful/sad), only MDD patients displayed an additional attentional capture by happy faces; (2) a significant emotion main effect (F (2, 216) = 10.19, p &amp;amp;lt; 0.001) indicated a stronger initial orientation to fearful versus happy faces, with Bayesian analyses (BF &amp;amp;lt; 0.3) confirming the absence of group differences; and (3) no group disparities emerged in sustained attentional maintenance (all ps &amp;amp;gt; 0.05). These results challenge conventional negativity-focused models by demonstrating valence-specific early-stage abnormalities in MDD, suggesting that depressive attentional dysfunction may be most pronounced during initial automatic processing rather than later strategic stages. The findings advance the theoretical understanding of attentional bias in depression while highlighting the need for stage-specific intervention approaches.</description>
	<pubDate>2025-12-01</pubDate>

	<content:encoded><![CDATA[
	<p><b>JEMR, Vol. 18, Pages 72: Initial and Sustained Attentional Bias Toward Emotional Faces in Patients with Major Depressive Disorder</b></p>
	<p>Journal of Eye Movement Research <a href="https://www.mdpi.com/1995-8692/18/6/72">doi: 10.3390/jemr18060072</a></p>
	<p>Authors:
		Hanliang Wei
		Tak Lam
		Weijian Liu
		Waxun Su
		Zheng Wang
		Qiandong Wang
		Xiao Lin
		Peng Li
		</p>
	<p>Major depressive disorder (MDD) represents a prevalent mental health condition characterized by prominent attentional biases, particularly toward negative stimuli. While extensive research has established the significance of negative attentional bias in depression, critical gaps remain in understanding the temporal dynamics and valence-specificity of these biases. This study employed eye-tracking technology to systematically examine the attentional processing of emotional faces (happy, fearful, sad) in MDD patients (n = 61) versus healthy controls (HC, n = 47), assessing both the initial orientation (initial gaze preference) and sustained attention (first dwell time). Key findings revealed the following: (1) while both groups showed an initial vigilance toward threatening faces (fearful/sad), only MDD patients displayed an additional attentional capture by happy faces; (2) a significant emotion main effect (F (2, 216) = 10.19, p &amp;amp;lt; 0.001) indicated a stronger initial orientation to fearful versus happy faces, with Bayesian analyses (BF &amp;amp;lt; 0.3) confirming the absence of group differences; and (3) no group disparities emerged in sustained attentional maintenance (all ps &amp;amp;gt; 0.05). These results challenge conventional negativity-focused models by demonstrating valence-specific early-stage abnormalities in MDD, suggesting that depressive attentional dysfunction may be most pronounced during initial automatic processing rather than later strategic stages. The findings advance the theoretical understanding of attentional bias in depression while highlighting the need for stage-specific intervention approaches.</p>
	]]></content:encoded>

	<dc:title>Initial and Sustained Attentional Bias Toward Emotional Faces in Patients with Major Depressive Disorder</dc:title>
			<dc:creator>Hanliang Wei</dc:creator>
			<dc:creator>Tak Lam</dc:creator>
			<dc:creator>Weijian Liu</dc:creator>
			<dc:creator>Waxun Su</dc:creator>
			<dc:creator>Zheng Wang</dc:creator>
			<dc:creator>Qiandong Wang</dc:creator>
			<dc:creator>Xiao Lin</dc:creator>
			<dc:creator>Peng Li</dc:creator>
		<dc:identifier>doi: 10.3390/jemr18060072</dc:identifier>
	<dc:source>Journal of Eye Movement Research</dc:source>
	<dc:date>2025-12-01</dc:date>

	<prism:publicationName>Journal of Eye Movement Research</prism:publicationName>
	<prism:publicationDate>2025-12-01</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>6</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>72</prism:startingPage>
		<prism:doi>10.3390/jemr18060072</prism:doi>
	<prism:url>https://www.mdpi.com/1995-8692/18/6/72</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1995-8692/18/6/71">

	<title>JEMR, Vol. 18, Pages 71: Robust Camera-Based Eye-Tracking Method Allowing Head Movements and Its Application in User Experience Research</title>
	<link>https://www.mdpi.com/1995-8692/18/6/71</link>
	<description>Eye-tracking for user experience analysis has traditionally relied on dedicated hardware, which is often costly and imposes restrictive operating conditions. As an alternative, solutions utilizing ordinary webcams have attracted significant interest due to their affordability and ease of use. However, a major limitation persists in these vision-based methods: sensitivity to head movements. Therefore, users are often required to maintain a rigid head position, leading to discomfort and potentially skewed results. To address this challenge, this paper proposes a robust eye-tracking methodology designed to accommodate head motion. Our core technique involves mapping the displacement of the pupil center from a dynamically updated reference point to estimate the gaze point. When head movement is detected, the system recalculates the head-pointing coordinate using estimated head pose and user-to-screen distance. This new head position and the corresponding pupil center are then established as the fresh benchmark for subsequent gaze point estimation, creating a continuous and adaptive correction loop. We conducted accuracy tests with 22 participants. The results demonstrate that our method surpasses the performance of many current methods, achieving mean gaze errors of 1.13 and 1.37 degrees in two testing modes. Further validation in a smooth pursuit task confirmed its efficacy in dynamic scenarios. Finally, we applied the method in a real-world gaming context, successfully extracting fixation counts and gaze heatmaps to analyze visual behavior and UX across different game modes, thereby verifying its practical utility.</description>
	<pubDate>2025-12-01</pubDate>

	<content:encoded><![CDATA[
	<p><b>JEMR, Vol. 18, Pages 71: Robust Camera-Based Eye-Tracking Method Allowing Head Movements and Its Application in User Experience Research</b></p>
	<p>Journal of Eye Movement Research <a href="https://www.mdpi.com/1995-8692/18/6/71">doi: 10.3390/jemr18060071</a></p>
	<p>Authors:
		He Zhang
		Lu Yin
		</p>
	<p>Eye-tracking for user experience analysis has traditionally relied on dedicated hardware, which is often costly and imposes restrictive operating conditions. As an alternative, solutions utilizing ordinary webcams have attracted significant interest due to their affordability and ease of use. However, a major limitation persists in these vision-based methods: sensitivity to head movements. Therefore, users are often required to maintain a rigid head position, leading to discomfort and potentially skewed results. To address this challenge, this paper proposes a robust eye-tracking methodology designed to accommodate head motion. Our core technique involves mapping the displacement of the pupil center from a dynamically updated reference point to estimate the gaze point. When head movement is detected, the system recalculates the head-pointing coordinate using estimated head pose and user-to-screen distance. This new head position and the corresponding pupil center are then established as the fresh benchmark for subsequent gaze point estimation, creating a continuous and adaptive correction loop. We conducted accuracy tests with 22 participants. The results demonstrate that our method surpasses the performance of many current methods, achieving mean gaze errors of 1.13 and 1.37 degrees in two testing modes. Further validation in a smooth pursuit task confirmed its efficacy in dynamic scenarios. Finally, we applied the method in a real-world gaming context, successfully extracting fixation counts and gaze heatmaps to analyze visual behavior and UX across different game modes, thereby verifying its practical utility.</p>
	]]></content:encoded>

	<dc:title>Robust Camera-Based Eye-Tracking Method Allowing Head Movements and Its Application in User Experience Research</dc:title>
			<dc:creator>He Zhang</dc:creator>
			<dc:creator>Lu Yin</dc:creator>
		<dc:identifier>doi: 10.3390/jemr18060071</dc:identifier>
	<dc:source>Journal of Eye Movement Research</dc:source>
	<dc:date>2025-12-01</dc:date>

	<prism:publicationName>Journal of Eye Movement Research</prism:publicationName>
	<prism:publicationDate>2025-12-01</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>6</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>71</prism:startingPage>
		<prism:doi>10.3390/jemr18060071</prism:doi>
	<prism:url>https://www.mdpi.com/1995-8692/18/6/71</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1995-8692/18/6/70">

	<title>JEMR, Vol. 18, Pages 70: Measuring Mental Effort in Real Time Using Pupillometry</title>
	<link>https://www.mdpi.com/1995-8692/18/6/70</link>
	<description>Mental effort, a critical factor influencing task performance, is often difficult to measure accurately and efficiently. Pupil diameter has emerged as a reliable, real-time indicator of mental effort. This study introduces RIPA2, an enhanced pupillometric index for real-time mental effort assessment. Building on the original RIPA method, RIPA2 incorporates refined Savitzky–Golay filter parameters to better isolate pupil diameter fluctuations within biologically relevant frequency bands linked to cognitive load. We validated RIPA2 across two distinct tasks: a structured N-back memory task and a naturalistic information search task involving fact-checking and decision-making scenarios. Our findings show that RIPA2 reliably tracks variations in mental effort, demonstrating improved sensitivity and consistency over the original RIPA and strong alignment with the established offline measures of pupil-based cognitive load indices, such as LHIPA. Notably, RIPA2 captured increased mental effort at higher N-back levels and successfully distinguished greater effort during decision-making tasks compared to fact-checking tasks, highlighting its applicability to real-world cognitive demands. These findings suggest that RIPA2 provides a robust, continuous, and low-latency method for assessing mental effort. It holds strong potential for broader use in educational settings, medical environments, workplaces, and adaptive user interfaces, facilitating objective monitoring of mental effort beyond laboratory conditions.</description>
	<pubDate>2025-11-24</pubDate>

	<content:encoded><![CDATA[
	<p><b>JEMR, Vol. 18, Pages 70: Measuring Mental Effort in Real Time Using Pupillometry</b></p>
	<p>Journal of Eye Movement Research <a href="https://www.mdpi.com/1995-8692/18/6/70">doi: 10.3390/jemr18060070</a></p>
	<p>Authors:
		Gavindya Jayawardena
		Yasith Jayawardana
		Jacek Gwizdka
		</p>
	<p>Mental effort, a critical factor influencing task performance, is often difficult to measure accurately and efficiently. Pupil diameter has emerged as a reliable, real-time indicator of mental effort. This study introduces RIPA2, an enhanced pupillometric index for real-time mental effort assessment. Building on the original RIPA method, RIPA2 incorporates refined Savitzky–Golay filter parameters to better isolate pupil diameter fluctuations within biologically relevant frequency bands linked to cognitive load. We validated RIPA2 across two distinct tasks: a structured N-back memory task and a naturalistic information search task involving fact-checking and decision-making scenarios. Our findings show that RIPA2 reliably tracks variations in mental effort, demonstrating improved sensitivity and consistency over the original RIPA and strong alignment with the established offline measures of pupil-based cognitive load indices, such as LHIPA. Notably, RIPA2 captured increased mental effort at higher N-back levels and successfully distinguished greater effort during decision-making tasks compared to fact-checking tasks, highlighting its applicability to real-world cognitive demands. These findings suggest that RIPA2 provides a robust, continuous, and low-latency method for assessing mental effort. It holds strong potential for broader use in educational settings, medical environments, workplaces, and adaptive user interfaces, facilitating objective monitoring of mental effort beyond laboratory conditions.</p>
	]]></content:encoded>

	<dc:title>Measuring Mental Effort in Real Time Using Pupillometry</dc:title>
			<dc:creator>Gavindya Jayawardena</dc:creator>
			<dc:creator>Yasith Jayawardana</dc:creator>
			<dc:creator>Jacek Gwizdka</dc:creator>
		<dc:identifier>doi: 10.3390/jemr18060070</dc:identifier>
	<dc:source>Journal of Eye Movement Research</dc:source>
	<dc:date>2025-11-24</dc:date>

	<prism:publicationName>Journal of Eye Movement Research</prism:publicationName>
	<prism:publicationDate>2025-11-24</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>6</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>70</prism:startingPage>
		<prism:doi>10.3390/jemr18060070</prism:doi>
	<prism:url>https://www.mdpi.com/1995-8692/18/6/70</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1995-8692/18/6/69">

	<title>JEMR, Vol. 18, Pages 69: Visual Attention to Food Content on Social Media: An Eye-Tracking Study Among Young Adults</title>
	<link>https://www.mdpi.com/1995-8692/18/6/69</link>
	<description>Social media has become a dominant channel for food marketing, particularly targeting youth through visually engaging and socially embedded content. This study investigates how young adults visually engage with food advertisements on social media and how specific visual and contextual features influence purchase intention. Using eye-tracking technology and survey analysis, data were collected from 35 participants aged 18 to 25. Participants viewed simulated Instagram posts incorporating elements such as food imagery, branding, influencer presence, and social cues. Visual attention was recorded using Tobii Pro Spectrum, and behavioral responses were assessed via post-surveys. A 2 × 2 design varying influencer presence and food type showed that both features significantly increased visual attention. Marketing cues and branding also attracted substantial visual attention. Linear regression revealed that core/non-core content and influencer features were among the strongest predictors of consumer response. The findings underscore the persuasive power of human and social features in digital food advertising. These insights have implications for commercial marketing practices and for understanding how visual and social elements influence youth engagement with food content on digital platforms.</description>
	<pubDate>2025-11-20</pubDate>

	<content:encoded><![CDATA[
	<p><b>JEMR, Vol. 18, Pages 69: Visual Attention to Food Content on Social Media: An Eye-Tracking Study Among Young Adults</b></p>
	<p>Journal of Eye Movement Research <a href="https://www.mdpi.com/1995-8692/18/6/69">doi: 10.3390/jemr18060069</a></p>
	<p>Authors:
		Aura Riswanto
		Seieun Kim
		Youngsam Ha
		Hak-Seon Kim
		</p>
	<p>Social media has become a dominant channel for food marketing, particularly targeting youth through visually engaging and socially embedded content. This study investigates how young adults visually engage with food advertisements on social media and how specific visual and contextual features influence purchase intention. Using eye-tracking technology and survey analysis, data were collected from 35 participants aged 18 to 25. Participants viewed simulated Instagram posts incorporating elements such as food imagery, branding, influencer presence, and social cues. Visual attention was recorded using Tobii Pro Spectrum, and behavioral responses were assessed via post-surveys. A 2 × 2 design varying influencer presence and food type showed that both features significantly increased visual attention. Marketing cues and branding also attracted substantial visual attention. Linear regression revealed that core/non-core content and influencer features were among the strongest predictors of consumer response. The findings underscore the persuasive power of human and social features in digital food advertising. These insights have implications for commercial marketing practices and for understanding how visual and social elements influence youth engagement with food content on digital platforms.</p>
	]]></content:encoded>

	<dc:title>Visual Attention to Food Content on Social Media: An Eye-Tracking Study Among Young Adults</dc:title>
			<dc:creator>Aura Riswanto</dc:creator>
			<dc:creator>Seieun Kim</dc:creator>
			<dc:creator>Youngsam Ha</dc:creator>
			<dc:creator>Hak-Seon Kim</dc:creator>
		<dc:identifier>doi: 10.3390/jemr18060069</dc:identifier>
	<dc:source>Journal of Eye Movement Research</dc:source>
	<dc:date>2025-11-20</dc:date>

	<prism:publicationName>Journal of Eye Movement Research</prism:publicationName>
	<prism:publicationDate>2025-11-20</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>6</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>69</prism:startingPage>
		<prism:doi>10.3390/jemr18060069</prism:doi>
	<prism:url>https://www.mdpi.com/1995-8692/18/6/69</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1995-8692/18/6/68">

	<title>JEMR, Vol. 18, Pages 68: Gaze Characteristics Using a Three-Dimensional Heads-Up Display During Cataract Surgery</title>
	<link>https://www.mdpi.com/1995-8692/18/6/68</link>
	<description>Purpose: An observational study to investigate differences in gaze behaviors across varying expertise levels using a 3D heads-up display (HUD) integrated with eye-tracking was conducted. Methods: 25 ophthalmologists (PGY2&amp;amp;ndash;4, fellows, attendings; number(n) = 5/group) performed cataract surgery on a SimulEYE model using NGENUITY HUD. Results: Surgical proficiency increased with experience, with attendings achieving the highest scores (54.4 &amp;amp;plusmn; 0.89). Compared with attendings, PGY2s had longer fixation durations (p = 0.042), longer saccades (p &amp;amp;lt; 0.0001), and fewer fixations on the HUD (p &amp;amp;lt; 0.0001). Capsulorhexis diameter relative to capsule size increased with expertise, with fellows and attendings achieving significantly larger diameters than PGY2s (p &amp;amp;lt; 0.0001). Experts maintained smaller tear angles, initiated tears closer to the main wound, and produced more circular morphologies. They rapidly alternated gaze between instruments and surrounding tissue, whereas novices (PGY2&amp;amp;ndash;4) fixated primarily on the instrument tip. Conclusions: Experts employ a feed-forward visual sampling strategy, allowing perception of instruments and surrounding tissue, minimizing inadvertent damage. Furthermore, attending surgeons maintain smaller tear angles and initiate tears proximally to forceps insertion, which may contribute to more controlled tears. Future integration of eye-tracking technology into surgical training could enhance visual-motor strategies in novices.</description>
	<pubDate>2025-11-17</pubDate>

	<content:encoded><![CDATA[
	<p><b>JEMR, Vol. 18, Pages 68: Gaze Characteristics Using a Three-Dimensional Heads-Up Display During Cataract Surgery</b></p>
	<p>Journal of Eye Movement Research <a href="https://www.mdpi.com/1995-8692/18/6/68">doi: 10.3390/jemr18060068</a></p>
	<p>Authors:
		Puranjay Gupta
		Emily Kao
		Neil Sheth
		Reem Alahmadi
		Michael J. Heiferman
		</p>
	<p>Purpose: An observational study to investigate differences in gaze behaviors across varying expertise levels using a 3D heads-up display (HUD) integrated with eye-tracking was conducted. Methods: 25 ophthalmologists (PGY2&amp;amp;ndash;4, fellows, attendings; number(n) = 5/group) performed cataract surgery on a SimulEYE model using NGENUITY HUD. Results: Surgical proficiency increased with experience, with attendings achieving the highest scores (54.4 &amp;amp;plusmn; 0.89). Compared with attendings, PGY2s had longer fixation durations (p = 0.042), longer saccades (p &amp;amp;lt; 0.0001), and fewer fixations on the HUD (p &amp;amp;lt; 0.0001). Capsulorhexis diameter relative to capsule size increased with expertise, with fellows and attendings achieving significantly larger diameters than PGY2s (p &amp;amp;lt; 0.0001). Experts maintained smaller tear angles, initiated tears closer to the main wound, and produced more circular morphologies. They rapidly alternated gaze between instruments and surrounding tissue, whereas novices (PGY2&amp;amp;ndash;4) fixated primarily on the instrument tip. Conclusions: Experts employ a feed-forward visual sampling strategy, allowing perception of instruments and surrounding tissue, minimizing inadvertent damage. Furthermore, attending surgeons maintain smaller tear angles and initiate tears proximally to forceps insertion, which may contribute to more controlled tears. Future integration of eye-tracking technology into surgical training could enhance visual-motor strategies in novices.</p>
	]]></content:encoded>

	<dc:title>Gaze Characteristics Using a Three-Dimensional Heads-Up Display During Cataract Surgery</dc:title>
			<dc:creator>Puranjay Gupta</dc:creator>
			<dc:creator>Emily Kao</dc:creator>
			<dc:creator>Neil Sheth</dc:creator>
			<dc:creator>Reem Alahmadi</dc:creator>
			<dc:creator>Michael J. Heiferman</dc:creator>
		<dc:identifier>doi: 10.3390/jemr18060068</dc:identifier>
	<dc:source>Journal of Eye Movement Research</dc:source>
	<dc:date>2025-11-17</dc:date>

	<prism:publicationName>Journal of Eye Movement Research</prism:publicationName>
	<prism:publicationDate>2025-11-17</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>6</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>68</prism:startingPage>
		<prism:doi>10.3390/jemr18060068</prism:doi>
	<prism:url>https://www.mdpi.com/1995-8692/18/6/68</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1995-8692/18/6/67">

	<title>JEMR, Vol. 18, Pages 67: BEACH-Gaze: Supporting Descriptive and Predictive Gaze Analytics in the Era of Artificial Intelligence and Advanced Data Science</title>
	<link>https://www.mdpi.com/1995-8692/18/6/67</link>
	<description>Recent breakthroughs in machine learning, artificial intelligence, and the emergence of large datasets have made the integration of eye tracking increasingly feasible not only in computing but also in many other disciplines to accelerate innovation and scientific discovery. These transformative changes often depend on intelligently analyzing and interpreting gaze data, which demand a substantial technical background. Overcoming these technical barriers has remained an obstacle to the broader adoption of eye tracking technologies in certain communities. In an effort to increase accessibility that potentially empowers a broader community of researchers and practitioners to leverage eye tracking, this paper presents an open-source software platform: Beach Environment for the Analytics of Human Gaze (BEACH-Gaze), designed to offer comprehensive descriptive and predictive analytical support. Firstly, BEACH-Gaze provides sequential gaze analytics through window segmentation in its data processing and analysis pipeline, which can be used to achieve simulations of real-time gaze-based systems. Secondly, it integrates a range of established machine learning models, allowing researchers from diverse disciplines to generate gaze-enabled predictions without advanced technical expertise. The overall goal is to simplify technical details and to aid the broader community interested in eye tracking research and applications in data interpretation, and to leverage knowledge gained from eye gaze in the development of machine intelligence. As such, we further demonstrate three use cases that apply descriptive and predictive gaze analytics to support individuals with autism spectrum disorder during technology-assisted exercises, to dynamically tailor visual cues for an individual user via physiologically adaptive visualizations, and to predict pilots’ performance in flight maneuvers to enhance aviation safety.</description>
	<pubDate>2025-11-12</pubDate>

	<content:encoded><![CDATA[
	<p><b>JEMR, Vol. 18, Pages 67: BEACH-Gaze: Supporting Descriptive and Predictive Gaze Analytics in the Era of Artificial Intelligence and Advanced Data Science</b></p>
	<p>Journal of Eye Movement Research <a href="https://www.mdpi.com/1995-8692/18/6/67">doi: 10.3390/jemr18060067</a></p>
	<p>Authors:
		Bo Fu
		Kayla Chu
		Angelo Soriano
		Peter Gatsby
		Nicolas Guardado
		Ashley Jones
		Matthew Halderman
		</p>
	<p>Recent breakthroughs in machine learning, artificial intelligence, and the emergence of large datasets have made the integration of eye tracking increasingly feasible not only in computing but also in many other disciplines to accelerate innovation and scientific discovery. These transformative changes often depend on intelligently analyzing and interpreting gaze data, which demand a substantial technical background. Overcoming these technical barriers has remained an obstacle to the broader adoption of eye tracking technologies in certain communities. In an effort to increase accessibility that potentially empowers a broader community of researchers and practitioners to leverage eye tracking, this paper presents an open-source software platform: Beach Environment for the Analytics of Human Gaze (BEACH-Gaze), designed to offer comprehensive descriptive and predictive analytical support. Firstly, BEACH-Gaze provides sequential gaze analytics through window segmentation in its data processing and analysis pipeline, which can be used to achieve simulations of real-time gaze-based systems. Secondly, it integrates a range of established machine learning models, allowing researchers from diverse disciplines to generate gaze-enabled predictions without advanced technical expertise. The overall goal is to simplify technical details and to aid the broader community interested in eye tracking research and applications in data interpretation, and to leverage knowledge gained from eye gaze in the development of machine intelligence. As such, we further demonstrate three use cases that apply descriptive and predictive gaze analytics to support individuals with autism spectrum disorder during technology-assisted exercises, to dynamically tailor visual cues for an individual user via physiologically adaptive visualizations, and to predict pilots’ performance in flight maneuvers to enhance aviation safety.</p>
	]]></content:encoded>

	<dc:title>BEACH-Gaze: Supporting Descriptive and Predictive Gaze Analytics in the Era of Artificial Intelligence and Advanced Data Science</dc:title>
			<dc:creator>Bo Fu</dc:creator>
			<dc:creator>Kayla Chu</dc:creator>
			<dc:creator>Angelo Soriano</dc:creator>
			<dc:creator>Peter Gatsby</dc:creator>
			<dc:creator>Nicolas Guardado</dc:creator>
			<dc:creator>Ashley Jones</dc:creator>
			<dc:creator>Matthew Halderman</dc:creator>
		<dc:identifier>doi: 10.3390/jemr18060067</dc:identifier>
	<dc:source>Journal of Eye Movement Research</dc:source>
	<dc:date>2025-11-12</dc:date>

	<prism:publicationName>Journal of Eye Movement Research</prism:publicationName>
	<prism:publicationDate>2025-11-12</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>6</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>67</prism:startingPage>
		<prism:doi>10.3390/jemr18060067</prism:doi>
	<prism:url>https://www.mdpi.com/1995-8692/18/6/67</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1995-8692/18/6/66">

	<title>JEMR, Vol. 18, Pages 66: Recovery of the Pupillary Response After Light Adaptation Is Slowed in Patients with Age-Related Macular Degeneration</title>
	<link>https://www.mdpi.com/1995-8692/18/6/66</link>
	<description>Purpose: This study evaluates a novel, non-invasive method using a virtual reality (VR) headset with integrated eye trackers to assess retinal function by measuring the recovery of the pupillary response after light adaptation in patients with age-related macular degeneration (AMD). Methods: In this pilot study, fourteen patients with clinically confirmed AMD and 14 age-matched healthy controls were exposed to alternating bright and dark stimuli using a VR headset. The dark stimulus duration increased incrementally by 100 milliseconds per trial, repeated over 50 cycles. The pupillary response to the re-onset of brightness was recorded. Data were analyzed using a linear mixed-effects model to compare recovery patterns between groups and a convolutional neural network to evaluate diagnostic accuracy. Results: The pupillary response amplitude increased with longer dark stimuli, i.e., the longer the eye was exposed to darkness the bigger was the subsequent pupillary amplitude. This pupillary recovery was significantly slowed by age and by the presence of macular degeneration. Test diagnostic accuracy for AMD was approximately 92%, with a sensitivity of 90% and a specificity of 70%. Conclusions: This proof-of-concept study demonstrates that consumer-grade VR headsets with integrated eye tracking can detect retinal dysfunction associated with AMD. The method offers a fast, accessible, and potentially scalable approach for retinal disease screening and monitoring. Further optimization and validation in larger cohorts are needed to confirm its clinical utility.</description>
	<pubDate>2025-11-10</pubDate>

	<content:encoded><![CDATA[
	<p><b>JEMR, Vol. 18, Pages 66: Recovery of the Pupillary Response After Light Adaptation Is Slowed in Patients with Age-Related Macular Degeneration</b></p>
	<p>Journal of Eye Movement Research <a href="https://www.mdpi.com/1995-8692/18/6/66">doi: 10.3390/jemr18060066</a></p>
	<p>Authors:
		Javier Barranco Garcia
		Thomas Ferrazzini
		Ana Coito
		Dominik Brügger
		Mathias Abegg
		</p>
	<p>Purpose: This study evaluates a novel, non-invasive method using a virtual reality (VR) headset with integrated eye trackers to assess retinal function by measuring the recovery of the pupillary response after light adaptation in patients with age-related macular degeneration (AMD). Methods: In this pilot study, fourteen patients with clinically confirmed AMD and 14 age-matched healthy controls were exposed to alternating bright and dark stimuli using a VR headset. The dark stimulus duration increased incrementally by 100 milliseconds per trial, repeated over 50 cycles. The pupillary response to the re-onset of brightness was recorded. Data were analyzed using a linear mixed-effects model to compare recovery patterns between groups and a convolutional neural network to evaluate diagnostic accuracy. Results: The pupillary response amplitude increased with longer dark stimuli, i.e., the longer the eye was exposed to darkness the bigger was the subsequent pupillary amplitude. This pupillary recovery was significantly slowed by age and by the presence of macular degeneration. Test diagnostic accuracy for AMD was approximately 92%, with a sensitivity of 90% and a specificity of 70%. Conclusions: This proof-of-concept study demonstrates that consumer-grade VR headsets with integrated eye tracking can detect retinal dysfunction associated with AMD. The method offers a fast, accessible, and potentially scalable approach for retinal disease screening and monitoring. Further optimization and validation in larger cohorts are needed to confirm its clinical utility.</p>
	]]></content:encoded>

	<dc:title>Recovery of the Pupillary Response After Light Adaptation Is Slowed in Patients with Age-Related Macular Degeneration</dc:title>
			<dc:creator>Javier Barranco Garcia</dc:creator>
			<dc:creator>Thomas Ferrazzini</dc:creator>
			<dc:creator>Ana Coito</dc:creator>
			<dc:creator>Dominik Brügger</dc:creator>
			<dc:creator>Mathias Abegg</dc:creator>
		<dc:identifier>doi: 10.3390/jemr18060066</dc:identifier>
	<dc:source>Journal of Eye Movement Research</dc:source>
	<dc:date>2025-11-10</dc:date>

	<prism:publicationName>Journal of Eye Movement Research</prism:publicationName>
	<prism:publicationDate>2025-11-10</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>6</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>66</prism:startingPage>
		<prism:doi>10.3390/jemr18060066</prism:doi>
	<prism:url>https://www.mdpi.com/1995-8692/18/6/66</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1995-8692/18/6/65">

	<title>JEMR, Vol. 18, Pages 65: Eye-Tracking Data in the Exploration of Students’ Engagement with Representations in Mathematics: Areas of Interest (AOIs) as Methodological and Conceptual Challenges</title>
	<link>https://www.mdpi.com/1995-8692/18/6/65</link>
	<description>In mathematics, and in learning mathematics, representations (texts, formulae, and figures) play a vital role. Eye-tracking is a promising approach for studying how representations are attended to in the context of mathematics learning. The focus of the research reported here is on the methodological and conceptual challenges that arise when analysing students’ engagement with different kinds of representations using such data. The study critically examines some of these issues through a case study of three engineering students engaging with an instructional document introducing double integrals. This study reports that not only the characteristics of different types of representations affect students’ engagement with areas of interests (AOIs), but also methodological decisions, such as how AOIs are defined, will be consequential for interpretations of that engagement. This shows that both technical parameters and the inherent nature of the representations themselves must be considered when defining AOIs and analysing students’ engagement with representations. The findings offer practical considerations for designing and analysing eye-tracking studies when students’ engagement with different representations is in focus.</description>
	<pubDate>2025-11-05</pubDate>

	<content:encoded><![CDATA[
	<p><b>JEMR, Vol. 18, Pages 65: Eye-Tracking Data in the Exploration of Students’ Engagement with Representations in Mathematics: Areas of Interest (AOIs) as Methodological and Conceptual Challenges</b></p>
	<p>Journal of Eye Movement Research <a href="https://www.mdpi.com/1995-8692/18/6/65">doi: 10.3390/jemr18060065</a></p>
	<p>Authors:
		Mahboubeh Nedaei
		Roger Säljö
		Shaista Kanwal
		Simon Goodchild
		</p>
	<p>In mathematics, and in learning mathematics, representations (texts, formulae, and figures) play a vital role. Eye-tracking is a promising approach for studying how representations are attended to in the context of mathematics learning. The focus of the research reported here is on the methodological and conceptual challenges that arise when analysing students’ engagement with different kinds of representations using such data. The study critically examines some of these issues through a case study of three engineering students engaging with an instructional document introducing double integrals. This study reports that not only the characteristics of different types of representations affect students’ engagement with areas of interests (AOIs), but also methodological decisions, such as how AOIs are defined, will be consequential for interpretations of that engagement. This shows that both technical parameters and the inherent nature of the representations themselves must be considered when defining AOIs and analysing students’ engagement with representations. The findings offer practical considerations for designing and analysing eye-tracking studies when students’ engagement with different representations is in focus.</p>
	]]></content:encoded>

	<dc:title>Eye-Tracking Data in the Exploration of Students’ Engagement with Representations in Mathematics: Areas of Interest (AOIs) as Methodological and Conceptual Challenges</dc:title>
			<dc:creator>Mahboubeh Nedaei</dc:creator>
			<dc:creator>Roger Säljö</dc:creator>
			<dc:creator>Shaista Kanwal</dc:creator>
			<dc:creator>Simon Goodchild</dc:creator>
		<dc:identifier>doi: 10.3390/jemr18060065</dc:identifier>
	<dc:source>Journal of Eye Movement Research</dc:source>
	<dc:date>2025-11-05</dc:date>

	<prism:publicationName>Journal of Eye Movement Research</prism:publicationName>
	<prism:publicationDate>2025-11-05</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>6</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>65</prism:startingPage>
		<prism:doi>10.3390/jemr18060065</prism:doi>
	<prism:url>https://www.mdpi.com/1995-8692/18/6/65</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1995-8692/18/6/64">

	<title>JEMR, Vol. 18, Pages 64: An Exploratory Eye-Tracking Study of Breast-Cancer Screening Ads: A Visual Analytics Framework and Descriptive Atlas</title>
	<link>https://www.mdpi.com/1995-8692/18/6/64</link>
	<description>Successful health promotion involves messages that are quickly captured and held long enough to permit eligibility, credibility, and calls to action to be coded. This research develops an exploratory eye-tracking atlas of breast cancer screening ads viewed by midlife women and a replicable pipeline that distinguishes early capture from long-term processing. Areas of Interest are divided into design-influential categories and graphed with two complementary measures: first hit and time to first fixation for entry and a tie-aware pairwise dominance model for dwell that produces rankings and an “early-vs.-sticky” quadrant visualization. Across creatives, pictorial and symbolic features were more likely to capture the first glance when they were perceptually dominant, while layouts containing centralized headlines or institutional cues deflected entry to the message and source. Prolonged attention was consistently focused on blocks of text, locations, and badges of authoring over ornamental pictures, demarcating the functional difference between capture and processing. Subgroup differences indicated audience-sensitive shifts: Older and household families shifted earlier toward source cues, more educated audiences shifted toward copy and locations, and younger or single viewers shifted toward symbols and images. Internal diagnostics verified that pairwise matrices were consistent with standard dwell summaries, verifying the comparative approach. The atlas converts the patterns into design-ready heuristics: defend sticky and early pieces, encourage sticky but late pieces by pushing them toward probable entry channels, de-clutter early but not sticky pieces to convert to processing, and re-think pieces that are neither. In practice, the diagnostics can be incorporated into procurement, pretesting, and briefs by agencies, educators, and campaign managers in order to enhance actionability without sacrificing segmentation of audiences. As an exploratory investigation, this study invites replication with larger and more diverse samples, generalizations to dynamic media, and associations with downstream measures such as recall and uptake of services.</description>
	<pubDate>2025-11-04</pubDate>

	<content:encoded><![CDATA[
	<p><b>JEMR, Vol. 18, Pages 64: An Exploratory Eye-Tracking Study of Breast-Cancer Screening Ads: A Visual Analytics Framework and Descriptive Atlas</b></p>
	<p>Journal of Eye Movement Research <a href="https://www.mdpi.com/1995-8692/18/6/64">doi: 10.3390/jemr18060064</a></p>
	<p>Authors:
		Ioanna Yfantidou
		Stefanos Balaskas
		Dimitra Skandali
		</p>
	<p>Successful health promotion involves messages that are quickly captured and held long enough to permit eligibility, credibility, and calls to action to be coded. This research develops an exploratory eye-tracking atlas of breast cancer screening ads viewed by midlife women and a replicable pipeline that distinguishes early capture from long-term processing. Areas of Interest are divided into design-influential categories and graphed with two complementary measures: first hit and time to first fixation for entry and a tie-aware pairwise dominance model for dwell that produces rankings and an “early-vs.-sticky” quadrant visualization. Across creatives, pictorial and symbolic features were more likely to capture the first glance when they were perceptually dominant, while layouts containing centralized headlines or institutional cues deflected entry to the message and source. Prolonged attention was consistently focused on blocks of text, locations, and badges of authoring over ornamental pictures, demarcating the functional difference between capture and processing. Subgroup differences indicated audience-sensitive shifts: Older and household families shifted earlier toward source cues, more educated audiences shifted toward copy and locations, and younger or single viewers shifted toward symbols and images. Internal diagnostics verified that pairwise matrices were consistent with standard dwell summaries, verifying the comparative approach. The atlas converts the patterns into design-ready heuristics: defend sticky and early pieces, encourage sticky but late pieces by pushing them toward probable entry channels, de-clutter early but not sticky pieces to convert to processing, and re-think pieces that are neither. In practice, the diagnostics can be incorporated into procurement, pretesting, and briefs by agencies, educators, and campaign managers in order to enhance actionability without sacrificing segmentation of audiences. As an exploratory investigation, this study invites replication with larger and more diverse samples, generalizations to dynamic media, and associations with downstream measures such as recall and uptake of services.</p>
	]]></content:encoded>

	<dc:title>An Exploratory Eye-Tracking Study of Breast-Cancer Screening Ads: A Visual Analytics Framework and Descriptive Atlas</dc:title>
			<dc:creator>Ioanna Yfantidou</dc:creator>
			<dc:creator>Stefanos Balaskas</dc:creator>
			<dc:creator>Dimitra Skandali</dc:creator>
		<dc:identifier>doi: 10.3390/jemr18060064</dc:identifier>
	<dc:source>Journal of Eye Movement Research</dc:source>
	<dc:date>2025-11-04</dc:date>

	<prism:publicationName>Journal of Eye Movement Research</prism:publicationName>
	<prism:publicationDate>2025-11-04</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>6</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>64</prism:startingPage>
		<prism:doi>10.3390/jemr18060064</prism:doi>
	<prism:url>https://www.mdpi.com/1995-8692/18/6/64</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1995-8692/18/6/63">

	<title>JEMR, Vol. 18, Pages 63: Effects of Multimodal AR-HUD Navigation Prompt Mode and Timing on Driving Behavior</title>
	<link>https://www.mdpi.com/1995-8692/18/6/63</link>
	<description>Current research on multimodal AR-HUD navigation systems primarily focuses on the presentation forms of auditory and visual information, yet the effects of synchrony between auditory and visual prompts as well as prompt timing on driving behavior and attention mechanisms remain insufficiently explored. This study employed a 2 (prompt mode: synchronous vs. asynchronous) &amp;amp;times; 3 (prompt timing: &amp;amp;minus;2000 m, &amp;amp;minus;1000 m, &amp;amp;minus;500 m) within-subject experimental design to assess the impact of multimodal prompt synchrony and prompt distance on drivers&amp;amp;rsquo; reaction time, sustained attention, and eye movement behaviors, including average fixation duration and fixation count. Behavioral data demonstrated that both prompt mode and prompt timing significantly influenced drivers&amp;amp;rsquo; response performance (indexed by reaction time) and attention stability, with synchronous prompts at &amp;amp;minus;1000 m yielding optimal performance. Eye-tracking results further revealed that synchronous prompts significantly enhanced fixation stability and reduced visual load, indicating more efficient information integration. Therefore, prompt mode and prompt timing significantly affect drivers&amp;amp;rsquo; perceptual processing and operational performance. Delivering synchronous auditory and visual prompts at &amp;amp;minus;1000 m achieves an optimal balance between information timeliness and multimodal integration. This study recommends the following: (1) maintaining temporal consistency in multimodal prompts to facilitate perceptual integration and (2) controlling prompt distance within an intermediate range (&amp;amp;minus;1000 m) to optimize the perception&amp;amp;ndash;action window, thereby improving the safety and efficiency of AR-HUD navigation systems.</description>
	<pubDate>2025-11-04</pubDate>

	<content:encoded><![CDATA[
	<p><b>JEMR, Vol. 18, Pages 63: Effects of Multimodal AR-HUD Navigation Prompt Mode and Timing on Driving Behavior</b></p>
	<p>Journal of Eye Movement Research <a href="https://www.mdpi.com/1995-8692/18/6/63">doi: 10.3390/jemr18060063</a></p>
	<p>Authors:
		Qi Zhu
		Ziqi Liu
		Youlan Li
		Jung Euitay
		</p>
	<p>Current research on multimodal AR-HUD navigation systems primarily focuses on the presentation forms of auditory and visual information, yet the effects of synchrony between auditory and visual prompts as well as prompt timing on driving behavior and attention mechanisms remain insufficiently explored. This study employed a 2 (prompt mode: synchronous vs. asynchronous) &amp;amp;times; 3 (prompt timing: &amp;amp;minus;2000 m, &amp;amp;minus;1000 m, &amp;amp;minus;500 m) within-subject experimental design to assess the impact of multimodal prompt synchrony and prompt distance on drivers&amp;amp;rsquo; reaction time, sustained attention, and eye movement behaviors, including average fixation duration and fixation count. Behavioral data demonstrated that both prompt mode and prompt timing significantly influenced drivers&amp;amp;rsquo; response performance (indexed by reaction time) and attention stability, with synchronous prompts at &amp;amp;minus;1000 m yielding optimal performance. Eye-tracking results further revealed that synchronous prompts significantly enhanced fixation stability and reduced visual load, indicating more efficient information integration. Therefore, prompt mode and prompt timing significantly affect drivers&amp;amp;rsquo; perceptual processing and operational performance. Delivering synchronous auditory and visual prompts at &amp;amp;minus;1000 m achieves an optimal balance between information timeliness and multimodal integration. This study recommends the following: (1) maintaining temporal consistency in multimodal prompts to facilitate perceptual integration and (2) controlling prompt distance within an intermediate range (&amp;amp;minus;1000 m) to optimize the perception&amp;amp;ndash;action window, thereby improving the safety and efficiency of AR-HUD navigation systems.</p>
	]]></content:encoded>

	<dc:title>Effects of Multimodal AR-HUD Navigation Prompt Mode and Timing on Driving Behavior</dc:title>
			<dc:creator>Qi Zhu</dc:creator>
			<dc:creator>Ziqi Liu</dc:creator>
			<dc:creator>Youlan Li</dc:creator>
			<dc:creator>Jung Euitay</dc:creator>
		<dc:identifier>doi: 10.3390/jemr18060063</dc:identifier>
	<dc:source>Journal of Eye Movement Research</dc:source>
	<dc:date>2025-11-04</dc:date>

	<prism:publicationName>Journal of Eye Movement Research</prism:publicationName>
	<prism:publicationDate>2025-11-04</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>6</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>63</prism:startingPage>
		<prism:doi>10.3390/jemr18060063</prism:doi>
	<prism:url>https://www.mdpi.com/1995-8692/18/6/63</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1995-8692/18/6/62">

	<title>JEMR, Vol. 18, Pages 62: The Influence of Social Media-like Cues on Visual Attention&amp;mdash;An Eye-Tracking Study with Food Products</title>
	<link>https://www.mdpi.com/1995-8692/18/6/62</link>
	<description>Social media has developed into a leading advertising platform, with Instagram likes serving as visual cues that may influence consumer perception and behavior. The present study investigated the effect of Instagram likes on visual attention, memory, and food evaluations focusing on traditional Greek food posts, using eye-tracking technology. The study assessed whether a higher number of likes increased attention to the food area, enhanced memory recall of food names, and influenced subjective ratings (liking, perceived tastiness, and intention to taste). The results demonstrated no significant differences in overall viewing time, memory performance, or evaluation ratings between high-like and low-like conditions. Although not statistically significant, descriptive trends suggested that posts with a higher number of likes tended to be evaluated more positively and the AOIs likes area showed a trend towards attracting more visual attention. The observed trends point to a possible subtle role of likes in user&amp;amp;rsquo;s engagement with food posts, influencing how they process and evaluate such content. These findings add to the discussion about the effect of social media likes on information processing when individuals observe food pictures on social media.</description>
	<pubDate>2025-11-04</pubDate>

	<content:encoded><![CDATA[
	<p><b>JEMR, Vol. 18, Pages 62: The Influence of Social Media-like Cues on Visual Attention&amp;mdash;An Eye-Tracking Study with Food Products</b></p>
	<p>Journal of Eye Movement Research <a href="https://www.mdpi.com/1995-8692/18/6/62">doi: 10.3390/jemr18060062</a></p>
	<p>Authors:
		Maria Mamalikou
		Konstantinos Gkatzionis
		Malamatenia Panagiotou
		</p>
	<p>Social media has developed into a leading advertising platform, with Instagram likes serving as visual cues that may influence consumer perception and behavior. The present study investigated the effect of Instagram likes on visual attention, memory, and food evaluations focusing on traditional Greek food posts, using eye-tracking technology. The study assessed whether a higher number of likes increased attention to the food area, enhanced memory recall of food names, and influenced subjective ratings (liking, perceived tastiness, and intention to taste). The results demonstrated no significant differences in overall viewing time, memory performance, or evaluation ratings between high-like and low-like conditions. Although not statistically significant, descriptive trends suggested that posts with a higher number of likes tended to be evaluated more positively and the AOIs likes area showed a trend towards attracting more visual attention. The observed trends point to a possible subtle role of likes in user&amp;amp;rsquo;s engagement with food posts, influencing how they process and evaluate such content. These findings add to the discussion about the effect of social media likes on information processing when individuals observe food pictures on social media.</p>
	]]></content:encoded>

	<dc:title>The Influence of Social Media-like Cues on Visual Attention&amp;amp;mdash;An Eye-Tracking Study with Food Products</dc:title>
			<dc:creator>Maria Mamalikou</dc:creator>
			<dc:creator>Konstantinos Gkatzionis</dc:creator>
			<dc:creator>Malamatenia Panagiotou</dc:creator>
		<dc:identifier>doi: 10.3390/jemr18060062</dc:identifier>
	<dc:source>Journal of Eye Movement Research</dc:source>
	<dc:date>2025-11-04</dc:date>

	<prism:publicationName>Journal of Eye Movement Research</prism:publicationName>
	<prism:publicationDate>2025-11-04</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>6</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>62</prism:startingPage>
		<prism:doi>10.3390/jemr18060062</prism:doi>
	<prism:url>https://www.mdpi.com/1995-8692/18/6/62</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1995-8692/18/6/61">

	<title>JEMR, Vol. 18, Pages 61: AI Images vs. Real Photographs: Investigating Visual Recognition and Perception</title>
	<link>https://www.mdpi.com/1995-8692/18/6/61</link>
	<description>Recently, the photorealism of generated images has improved noticeably due to the development of AI algorithms. These are high-resolution images of human faces and bodies, cats and dogs, vehicles, and other categories of objects that the untrained eye cannot distinguish from authentic photographs. The study assessed how people perceive 12 pictures generated by AI vs. 12 real photographs. Six main categories of stimuli were selected: architecture, art, faces, cars, landscapes, and pets. The visual perception of selected images was studied by means of eye tracking and gaze patterns as well as time characteristics, compared with consideration to the respondent groups&amp;amp;rsquo; gender and knowledge of AI graphics. After the experiment, the study participants analysed the pictures again in order to describe the reasons for their choice. The results show that AI images of pets and real photographs of architecture were the easiest to identify. The largest differences in visual perception are between men and women as well as between those experienced in digital graphics (including AI images) and the rest. Based on the analysis, several recommendations are suggested for AI developers and end-users.</description>
	<pubDate>2025-11-03</pubDate>

	<content:encoded><![CDATA[
	<p><b>JEMR, Vol. 18, Pages 61: AI Images vs. Real Photographs: Investigating Visual Recognition and Perception</b></p>
	<p>Journal of Eye Movement Research <a href="https://www.mdpi.com/1995-8692/18/6/61">doi: 10.3390/jemr18060061</a></p>
	<p>Authors:
		Veslava Osińska
		Weronika Kortas
		Adam Szalach
		Marc Welter
		</p>
	<p>Recently, the photorealism of generated images has improved noticeably due to the development of AI algorithms. These are high-resolution images of human faces and bodies, cats and dogs, vehicles, and other categories of objects that the untrained eye cannot distinguish from authentic photographs. The study assessed how people perceive 12 pictures generated by AI vs. 12 real photographs. Six main categories of stimuli were selected: architecture, art, faces, cars, landscapes, and pets. The visual perception of selected images was studied by means of eye tracking and gaze patterns as well as time characteristics, compared with consideration to the respondent groups&amp;amp;rsquo; gender and knowledge of AI graphics. After the experiment, the study participants analysed the pictures again in order to describe the reasons for their choice. The results show that AI images of pets and real photographs of architecture were the easiest to identify. The largest differences in visual perception are between men and women as well as between those experienced in digital graphics (including AI images) and the rest. Based on the analysis, several recommendations are suggested for AI developers and end-users.</p>
	]]></content:encoded>

	<dc:title>AI Images vs. Real Photographs: Investigating Visual Recognition and Perception</dc:title>
			<dc:creator>Veslava Osińska</dc:creator>
			<dc:creator>Weronika Kortas</dc:creator>
			<dc:creator>Adam Szalach</dc:creator>
			<dc:creator>Marc Welter</dc:creator>
		<dc:identifier>doi: 10.3390/jemr18060061</dc:identifier>
	<dc:source>Journal of Eye Movement Research</dc:source>
	<dc:date>2025-11-03</dc:date>

	<prism:publicationName>Journal of Eye Movement Research</prism:publicationName>
	<prism:publicationDate>2025-11-03</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>6</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>61</prism:startingPage>
		<prism:doi>10.3390/jemr18060061</prism:doi>
	<prism:url>https://www.mdpi.com/1995-8692/18/6/61</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1995-8692/18/6/60">

	<title>JEMR, Vol. 18, Pages 60: The Influence of Text Genre on Eye Movement Patterns During Reading</title>
	<link>https://www.mdpi.com/1995-8692/18/6/60</link>
	<description>Successful reading comprehension depends on many factors, including text genre. Eye-tracking studies indicate that genre shapes eye movement patterns at a local level. Although the reading of expository and narrative texts by adolescents has been described in the literature, the reading of poetry by adolescents remains understudied. In this study, we used scanpath analysis to examine how genre and comprehension level influence global eye movement strategies in adolescents (N = 44). Thus, the novelty of this study lies in the use of scanpath analysis to measure global eye movement strategies employed by adolescents while reading narrative, expository, and poetic texts. Two distinct reading patterns emerged: a forward reading pattern (linear progression) and a regressive reading pattern (frequent lookbacks). Readers tended to use regressive patterns more often with expository and poetic texts, while forward patterns were more common with a narrative text. Comprehension level also played a significant role, with readers with a higher level of comprehension relying more on regressive patterns for expository and poetic texts. The results of this experiment suggest that scanpaths effectively capture genre-driven differences in reading strategies, underscoring how genre expectations may shape visual processing during reading.</description>
	<pubDate>2025-11-03</pubDate>

	<content:encoded><![CDATA[
	<p><b>JEMR, Vol. 18, Pages 60: The Influence of Text Genre on Eye Movement Patterns During Reading</b></p>
	<p>Journal of Eye Movement Research <a href="https://www.mdpi.com/1995-8692/18/6/60">doi: 10.3390/jemr18060060</a></p>
	<p>Authors:
		Maksim Markevich
		Anastasiia Streltsova
		</p>
	<p>Successful reading comprehension depends on many factors, including text genre. Eye-tracking studies indicate that genre shapes eye movement patterns at a local level. Although the reading of expository and narrative texts by adolescents has been described in the literature, the reading of poetry by adolescents remains understudied. In this study, we used scanpath analysis to examine how genre and comprehension level influence global eye movement strategies in adolescents (N = 44). Thus, the novelty of this study lies in the use of scanpath analysis to measure global eye movement strategies employed by adolescents while reading narrative, expository, and poetic texts. Two distinct reading patterns emerged: a forward reading pattern (linear progression) and a regressive reading pattern (frequent lookbacks). Readers tended to use regressive patterns more often with expository and poetic texts, while forward patterns were more common with a narrative text. Comprehension level also played a significant role, with readers with a higher level of comprehension relying more on regressive patterns for expository and poetic texts. The results of this experiment suggest that scanpaths effectively capture genre-driven differences in reading strategies, underscoring how genre expectations may shape visual processing during reading.</p>
	]]></content:encoded>

	<dc:title>The Influence of Text Genre on Eye Movement Patterns During Reading</dc:title>
			<dc:creator>Maksim Markevich</dc:creator>
			<dc:creator>Anastasiia Streltsova</dc:creator>
		<dc:identifier>doi: 10.3390/jemr18060060</dc:identifier>
	<dc:source>Journal of Eye Movement Research</dc:source>
	<dc:date>2025-11-03</dc:date>

	<prism:publicationName>Journal of Eye Movement Research</prism:publicationName>
	<prism:publicationDate>2025-11-03</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>6</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>60</prism:startingPage>
		<prism:doi>10.3390/jemr18060060</prism:doi>
	<prism:url>https://www.mdpi.com/1995-8692/18/6/60</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1995-8692/18/5/59">

	<title>JEMR, Vol. 18, Pages 59: Sequential Fixation Behavior in Road Marking Recognition: Implications for Design</title>
	<link>https://www.mdpi.com/1995-8692/18/5/59</link>
	<description>This study examined how drivers’ eye fixations change before, during, and after recognizing road markings, and how these changes relate to driving speed, visual complexity, cognitive functions, and demographics. 20 licensed drivers viewed on-board movies showing digit or character road markings while their eye movements were tracked. Fixation positions and dispersions were analyzed. Results showed that, regardless of marking type, fixations were horizontally dispersed before and after recognition but became vertically concentrated during recognition, with fixation points shifting higher (p &amp;amp;lt; 0.001) and horizontal dispersion decreasing (p = 0.01). During the recognition period, fixations moved upward and narrowed horizontally toward the final third (p = 0.034), suggesting increased focus. Longer fixations were linked to slower speeds for digits (p = 0.029) and more characters for character markings (p &amp;amp;lt; 0.001). No significant correlations were found with cognitive functions or demographics. These findings suggest that drivers first scan broadly, then concentrate on markings as they approach. For optimal recognition, simple or essential information should be placed centrally or lower, while detailed content should appear higher to align with natural gaze patterns. In high-speed environments, markings should prioritize clarity and brevity in central positions to ensure safe and rapid recognition.</description>
	<pubDate>2025-10-21</pubDate>

	<content:encoded><![CDATA[
	<p><b>JEMR, Vol. 18, Pages 59: Sequential Fixation Behavior in Road Marking Recognition: Implications for Design</b></p>
	<p>Journal of Eye Movement Research <a href="https://www.mdpi.com/1995-8692/18/5/59">doi: 10.3390/jemr18050059</a></p>
	<p>Authors:
		Takaya Maeyama
		Hiroki Okada
		Daisuke Sawamura
		</p>
	<p>This study examined how drivers’ eye fixations change before, during, and after recognizing road markings, and how these changes relate to driving speed, visual complexity, cognitive functions, and demographics. 20 licensed drivers viewed on-board movies showing digit or character road markings while their eye movements were tracked. Fixation positions and dispersions were analyzed. Results showed that, regardless of marking type, fixations were horizontally dispersed before and after recognition but became vertically concentrated during recognition, with fixation points shifting higher (p &amp;amp;lt; 0.001) and horizontal dispersion decreasing (p = 0.01). During the recognition period, fixations moved upward and narrowed horizontally toward the final third (p = 0.034), suggesting increased focus. Longer fixations were linked to slower speeds for digits (p = 0.029) and more characters for character markings (p &amp;amp;lt; 0.001). No significant correlations were found with cognitive functions or demographics. These findings suggest that drivers first scan broadly, then concentrate on markings as they approach. For optimal recognition, simple or essential information should be placed centrally or lower, while detailed content should appear higher to align with natural gaze patterns. In high-speed environments, markings should prioritize clarity and brevity in central positions to ensure safe and rapid recognition.</p>
	]]></content:encoded>

	<dc:title>Sequential Fixation Behavior in Road Marking Recognition: Implications for Design</dc:title>
			<dc:creator>Takaya Maeyama</dc:creator>
			<dc:creator>Hiroki Okada</dc:creator>
			<dc:creator>Daisuke Sawamura</dc:creator>
		<dc:identifier>doi: 10.3390/jemr18050059</dc:identifier>
	<dc:source>Journal of Eye Movement Research</dc:source>
	<dc:date>2025-10-21</dc:date>

	<prism:publicationName>Journal of Eye Movement Research</prism:publicationName>
	<prism:publicationDate>2025-10-21</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>59</prism:startingPage>
		<prism:doi>10.3390/jemr18050059</prism:doi>
	<prism:url>https://www.mdpi.com/1995-8692/18/5/59</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1995-8692/18/5/58">

	<title>JEMR, Vol. 18, Pages 58: Oculomotor Behavior of L2 Readers with Typologically Distant L1 Background: The “Big Three” Effects of Word Length, Frequency, and Predictability</title>
	<link>https://www.mdpi.com/1995-8692/18/5/58</link>
	<description>Oculomotor reading behavior is influenced by both universal factors, like the “big three” of word length, frequency, and contextual predictability, and language-specific factors, such as script and grammar. The aim of this study was to examine the influence of the “big three” factors on L2 reading focusing on a typologically distant L1/L2 pair with dramatic differences in script and grammar. A total of 41 native Chinese-speaking learners of Russian (levels A2-B2) and 40 native Russian speakers read a corpus of 90 Russian sentences for comprehension. Their eye movements were recorded with EyeLink 1000+. We analyzed both early (gaze duration and skipping rate) and late (regression rate and rereading time) eye movement measures. As expected, the “big three” effects influenced oculomotor behavior in both L1 and L2 readers, being more pronounced for L2, but substantial differences were also revealed. Word frequency in L1 reading primarily influenced early processing stages, whereas in L2 reading it remained significant in later stages as well. Predictability had an immediate effect on skipping rates in L1 reading, while L2 readers only exhibited it in late measures. Word length was the only factor that interacted with L2 language exposure which demonstrated adjustment to alphabetic script and polymorphemic word structure. Our findings provide new insights into the processing challenges of L2 readers with typologically distant L1 backgrounds.</description>
	<pubDate>2025-10-18</pubDate>

	<content:encoded><![CDATA[
	<p><b>JEMR, Vol. 18, Pages 58: Oculomotor Behavior of L2 Readers with Typologically Distant L1 Background: The “Big Three” Effects of Word Length, Frequency, and Predictability</b></p>
	<p>Journal of Eye Movement Research <a href="https://www.mdpi.com/1995-8692/18/5/58">doi: 10.3390/jemr18050058</a></p>
	<p>Authors:
		Marina Norkina
		Daria Chernova
		Svetlana Alexeeva
		Maria Harchevnik
		</p>
	<p>Oculomotor reading behavior is influenced by both universal factors, like the “big three” of word length, frequency, and contextual predictability, and language-specific factors, such as script and grammar. The aim of this study was to examine the influence of the “big three” factors on L2 reading focusing on a typologically distant L1/L2 pair with dramatic differences in script and grammar. A total of 41 native Chinese-speaking learners of Russian (levels A2-B2) and 40 native Russian speakers read a corpus of 90 Russian sentences for comprehension. Their eye movements were recorded with EyeLink 1000+. We analyzed both early (gaze duration and skipping rate) and late (regression rate and rereading time) eye movement measures. As expected, the “big three” effects influenced oculomotor behavior in both L1 and L2 readers, being more pronounced for L2, but substantial differences were also revealed. Word frequency in L1 reading primarily influenced early processing stages, whereas in L2 reading it remained significant in later stages as well. Predictability had an immediate effect on skipping rates in L1 reading, while L2 readers only exhibited it in late measures. Word length was the only factor that interacted with L2 language exposure which demonstrated adjustment to alphabetic script and polymorphemic word structure. Our findings provide new insights into the processing challenges of L2 readers with typologically distant L1 backgrounds.</p>
	]]></content:encoded>

	<dc:title>Oculomotor Behavior of L2 Readers with Typologically Distant L1 Background: The “Big Three” Effects of Word Length, Frequency, and Predictability</dc:title>
			<dc:creator>Marina Norkina</dc:creator>
			<dc:creator>Daria Chernova</dc:creator>
			<dc:creator>Svetlana Alexeeva</dc:creator>
			<dc:creator>Maria Harchevnik</dc:creator>
		<dc:identifier>doi: 10.3390/jemr18050058</dc:identifier>
	<dc:source>Journal of Eye Movement Research</dc:source>
	<dc:date>2025-10-18</dc:date>

	<prism:publicationName>Journal of Eye Movement Research</prism:publicationName>
	<prism:publicationDate>2025-10-18</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>58</prism:startingPage>
		<prism:doi>10.3390/jemr18050058</prism:doi>
	<prism:url>https://www.mdpi.com/1995-8692/18/5/58</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1995-8692/18/5/57">

	<title>JEMR, Vol. 18, Pages 57: Visual Strategies for Guiding Gaze Sequences and Attention in Yi Symbols: Eye-Tracking Insights</title>
	<link>https://www.mdpi.com/1995-8692/18/5/57</link>
	<description>This study investigated the effectiveness of visual strategies in guiding gaze behavior and attention on Yi graphic symbols using eye-tracking. Four strategies, color brightness, layering, line guidance, and size variation, were tested with 34 Thai participants unfamiliar with Yi symbol meanings. Gaze sequence analysis, using Levenshtein distance and similarity ratio, showed that bright colors, layered arrangements, and connected lines enhanced alignment with intended gaze sequences, while size variation had minimal effect. Bright red symbols and lines captured faster initial fixations (Time to First Fixation, TTFF) on key Areas of Interest (AOIs), unlike layering and size. Lines reduced dwell time at sequence starts, promoting efficient progression, while larger symbols sustained longer attention, though inconsistently. Color and layering showed no consistent dwell time effects. These findings inform Yi graphic symbol design for effective cross-cultural visual communication.</description>
	<pubDate>2025-10-16</pubDate>

	<content:encoded><![CDATA[
	<p><b>JEMR, Vol. 18, Pages 57: Visual Strategies for Guiding Gaze Sequences and Attention in Yi Symbols: Eye-Tracking Insights</b></p>
	<p>Journal of Eye Movement Research <a href="https://www.mdpi.com/1995-8692/18/5/57">doi: 10.3390/jemr18050057</a></p>
	<p>Authors:
		Bo Yuan
		Sakol Teeravarunyou
		</p>
	<p>This study investigated the effectiveness of visual strategies in guiding gaze behavior and attention on Yi graphic symbols using eye-tracking. Four strategies, color brightness, layering, line guidance, and size variation, were tested with 34 Thai participants unfamiliar with Yi symbol meanings. Gaze sequence analysis, using Levenshtein distance and similarity ratio, showed that bright colors, layered arrangements, and connected lines enhanced alignment with intended gaze sequences, while size variation had minimal effect. Bright red symbols and lines captured faster initial fixations (Time to First Fixation, TTFF) on key Areas of Interest (AOIs), unlike layering and size. Lines reduced dwell time at sequence starts, promoting efficient progression, while larger symbols sustained longer attention, though inconsistently. Color and layering showed no consistent dwell time effects. These findings inform Yi graphic symbol design for effective cross-cultural visual communication.</p>
	]]></content:encoded>

	<dc:title>Visual Strategies for Guiding Gaze Sequences and Attention in Yi Symbols: Eye-Tracking Insights</dc:title>
			<dc:creator>Bo Yuan</dc:creator>
			<dc:creator>Sakol Teeravarunyou</dc:creator>
		<dc:identifier>doi: 10.3390/jemr18050057</dc:identifier>
	<dc:source>Journal of Eye Movement Research</dc:source>
	<dc:date>2025-10-16</dc:date>

	<prism:publicationName>Journal of Eye Movement Research</prism:publicationName>
	<prism:publicationDate>2025-10-16</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>57</prism:startingPage>
		<prism:doi>10.3390/jemr18050057</prism:doi>
	<prism:url>https://www.mdpi.com/1995-8692/18/5/57</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1995-8692/18/5/56">

	<title>JEMR, Vol. 18, Pages 56: DyslexiaNet: Examining the Viability and Efficacy of Eye Movement-Based Deep Learning for Dyslexia Detection</title>
	<link>https://www.mdpi.com/1995-8692/18/5/56</link>
	<description>Dyslexia is a neurodevelopmental disorder that impairs reading, affecting 5–17.5% of children and representing the most common learning disability. Individuals with dyslexia experience decoding, reading fluency, and comprehension difficulties, hindering vocabulary development and learning. Early and accurate identification is essential for targeted interventions. Traditional diagnostic methods rely on behavioral assessments and neuropsychological tests, which can be time-consuming and subjective. Recent studies suggest that physiological signals, such as electrooculography (EOG), can provide objective insights into reading-related cognitive and visual processes. Despite this potential, there is limited research on how typeface and font characteristics influence reading performance in dyslexic children using EOG measurements. To address this gap, we investigated the most suitable typefaces for Turkish-speaking children with dyslexia by analyzing EOG signals recorded during reading tasks. We developed a novel deep learning framework, DyslexiaNet, using scalogram images from horizontal and vertical EOG channels, and compared it with AlexNet, MobileNet, and ResNet. Reading performance indicators, including reading time, blink rate, regression rate, and EOG signal energy, were evaluated across multiple typefaces and font sizes. Results showed that typeface significantly affects reading efficiency in dyslexic children. The BonvenoCF font was associated with shorter reading times, fewer regressions, and lower cognitive load. DyslexiaNet achieved the highest classification accuracy (99.96% for horizontal channels) while requiring lower computational load than other networks. These findings demonstrate that EOG-based physiological measurements combined with deep learning offer a non-invasive, objective approach for dyslexia detection and personalized typeface selection. This method can provide practical guidance for designing educational materials and support clinicians in early diagnosis and individualized intervention strategies for children with dyslexia.</description>
	<pubDate>2025-10-15</pubDate>

	<content:encoded><![CDATA[
	<p><b>JEMR, Vol. 18, Pages 56: DyslexiaNet: Examining the Viability and Efficacy of Eye Movement-Based Deep Learning for Dyslexia Detection</b></p>
	<p>Journal of Eye Movement Research <a href="https://www.mdpi.com/1995-8692/18/5/56">doi: 10.3390/jemr18050056</a></p>
	<p>Authors:
		Ramis İleri
		Çiğdem Altıntop
		Fatma Latifoğlu
		Esra Demirci
		</p>
	<p>Dyslexia is a neurodevelopmental disorder that impairs reading, affecting 5–17.5% of children and representing the most common learning disability. Individuals with dyslexia experience decoding, reading fluency, and comprehension difficulties, hindering vocabulary development and learning. Early and accurate identification is essential for targeted interventions. Traditional diagnostic methods rely on behavioral assessments and neuropsychological tests, which can be time-consuming and subjective. Recent studies suggest that physiological signals, such as electrooculography (EOG), can provide objective insights into reading-related cognitive and visual processes. Despite this potential, there is limited research on how typeface and font characteristics influence reading performance in dyslexic children using EOG measurements. To address this gap, we investigated the most suitable typefaces for Turkish-speaking children with dyslexia by analyzing EOG signals recorded during reading tasks. We developed a novel deep learning framework, DyslexiaNet, using scalogram images from horizontal and vertical EOG channels, and compared it with AlexNet, MobileNet, and ResNet. Reading performance indicators, including reading time, blink rate, regression rate, and EOG signal energy, were evaluated across multiple typefaces and font sizes. Results showed that typeface significantly affects reading efficiency in dyslexic children. The BonvenoCF font was associated with shorter reading times, fewer regressions, and lower cognitive load. DyslexiaNet achieved the highest classification accuracy (99.96% for horizontal channels) while requiring lower computational load than other networks. These findings demonstrate that EOG-based physiological measurements combined with deep learning offer a non-invasive, objective approach for dyslexia detection and personalized typeface selection. This method can provide practical guidance for designing educational materials and support clinicians in early diagnosis and individualized intervention strategies for children with dyslexia.</p>
	]]></content:encoded>

	<dc:title>DyslexiaNet: Examining the Viability and Efficacy of Eye Movement-Based Deep Learning for Dyslexia Detection</dc:title>
			<dc:creator>Ramis İleri</dc:creator>
			<dc:creator>Çiğdem Altıntop</dc:creator>
			<dc:creator>Fatma Latifoğlu</dc:creator>
			<dc:creator>Esra Demirci</dc:creator>
		<dc:identifier>doi: 10.3390/jemr18050056</dc:identifier>
	<dc:source>Journal of Eye Movement Research</dc:source>
	<dc:date>2025-10-15</dc:date>

	<prism:publicationName>Journal of Eye Movement Research</prism:publicationName>
	<prism:publicationDate>2025-10-15</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>56</prism:startingPage>
		<prism:doi>10.3390/jemr18050056</prism:doi>
	<prism:url>https://www.mdpi.com/1995-8692/18/5/56</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1995-8692/18/5/55">

	<title>JEMR, Vol. 18, Pages 55: Head and Eye Movements During Pedestrian Crossing in Patients with Visual Impairment: A Virtual Reality Eye Tracking Study</title>
	<link>https://www.mdpi.com/1995-8692/18/5/55</link>
	<description>Real-world navigation depends on coordinated head–eye behaviour that standard tests of visual function miss. We investigated how visual impairment affects traffic navigation, whether behaviour differs by visual impairment type, and whether this functional grouping better explains performance than WHO categorisation. Using a virtual reality (VR) headset with integrated head and eye tracking, we evaluated detection of moving cars and safe road-crossing opportunities in 40 patients with central, peripheral, or combined visual impairment and 19 controls. Only two patients with a combination of very low visual acuity and severely constricted visual fields failed both visual tasks. Overall, patients identified safe-crossing intervals 1.3–1.5 s later than controls (p ≤ 0.01). Head-eye movement profiles diverged by visual impairment: patients with central impairment showed shorter, more frequent saccades (p &amp;amp;lt; 0.05); patients with peripheral impairment showed exploratory behaviour similar to controls; while patients with combined impairment executed fewer microsaccades (p &amp;amp;lt; 0.05), reduced total macrosaccade amplitude (p &amp;amp;lt; 0.05), and fewer head turns (p &amp;amp;lt; 0.05). Classification by impairment type explained behaviour better than WHO categorisation. These findings challenge acuity/field-based classifications and support integrating functional metrics into risk stratification and targeted rehabilitation, with VR providing a safe, scalable assessment tool.</description>
	<pubDate>2025-10-15</pubDate>

	<content:encoded><![CDATA[
	<p><b>JEMR, Vol. 18, Pages 55: Head and Eye Movements During Pedestrian Crossing in Patients with Visual Impairment: A Virtual Reality Eye Tracking Study</b></p>
	<p>Journal of Eye Movement Research <a href="https://www.mdpi.com/1995-8692/18/5/55">doi: 10.3390/jemr18050055</a></p>
	<p>Authors:
		Mark Mervic
		Ema Grašič
		Polona Jaki Mekjavić
		Nataša Vidovič Valentinčič
		Ana Fakin
		</p>
	<p>Real-world navigation depends on coordinated head–eye behaviour that standard tests of visual function miss. We investigated how visual impairment affects traffic navigation, whether behaviour differs by visual impairment type, and whether this functional grouping better explains performance than WHO categorisation. Using a virtual reality (VR) headset with integrated head and eye tracking, we evaluated detection of moving cars and safe road-crossing opportunities in 40 patients with central, peripheral, or combined visual impairment and 19 controls. Only two patients with a combination of very low visual acuity and severely constricted visual fields failed both visual tasks. Overall, patients identified safe-crossing intervals 1.3–1.5 s later than controls (p ≤ 0.01). Head-eye movement profiles diverged by visual impairment: patients with central impairment showed shorter, more frequent saccades (p &amp;amp;lt; 0.05); patients with peripheral impairment showed exploratory behaviour similar to controls; while patients with combined impairment executed fewer microsaccades (p &amp;amp;lt; 0.05), reduced total macrosaccade amplitude (p &amp;amp;lt; 0.05), and fewer head turns (p &amp;amp;lt; 0.05). Classification by impairment type explained behaviour better than WHO categorisation. These findings challenge acuity/field-based classifications and support integrating functional metrics into risk stratification and targeted rehabilitation, with VR providing a safe, scalable assessment tool.</p>
	]]></content:encoded>

	<dc:title>Head and Eye Movements During Pedestrian Crossing in Patients with Visual Impairment: A Virtual Reality Eye Tracking Study</dc:title>
			<dc:creator>Mark Mervic</dc:creator>
			<dc:creator>Ema Grašič</dc:creator>
			<dc:creator>Polona Jaki Mekjavić</dc:creator>
			<dc:creator>Nataša Vidovič Valentinčič</dc:creator>
			<dc:creator>Ana Fakin</dc:creator>
		<dc:identifier>doi: 10.3390/jemr18050055</dc:identifier>
	<dc:source>Journal of Eye Movement Research</dc:source>
	<dc:date>2025-10-15</dc:date>

	<prism:publicationName>Journal of Eye Movement Research</prism:publicationName>
	<prism:publicationDate>2025-10-15</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>55</prism:startingPage>
		<prism:doi>10.3390/jemr18050055</prism:doi>
	<prism:url>https://www.mdpi.com/1995-8692/18/5/55</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1995-8692/18/5/54">

	<title>JEMR, Vol. 18, Pages 54: Test–Retest Reliability of a Computerized Hand–Eye Coordination Task</title>
	<link>https://www.mdpi.com/1995-8692/18/5/54</link>
	<description>Background: Hand–eye coordination is essential for daily functioning and sports performance, but standardized digital protocols for its reliable assessment are limited. This study aimed to evaluate the intra-examiner repeatability and inter-examiner reproducibility of a computerized protocol (COI-SV®) for assessing hand–eye coordination in healthy adults, as well as the influence of age and sex. Methods: Seventy-eight adults completed four sessions of a computerized visual–motor task requiring rapid and accurate responses to randomly presented targets. Accuracy and response times were analyzed using repeated-measures and reliability analyses. Results: Accuracy showed a small session effect and minor examiner differences on the first day, whereas response times were consistent across sessions. Men generally responded faster than women, and response times increased slightly with age. Overall, reliability indices indicated moderate-to-good repeatability and reproducibility for both accuracy and response time measures. Conclusions: The COI-SV® protocol provides a robust, objective, and reproducible measurement of hand–eye coordination, supporting its use in clinical, sports, and research settings.</description>
	<pubDate>2025-10-14</pubDate>

	<content:encoded><![CDATA[
	<p><b>JEMR, Vol. 18, Pages 54: Test–Retest Reliability of a Computerized Hand–Eye Coordination Task</b></p>
	<p>Journal of Eye Movement Research <a href="https://www.mdpi.com/1995-8692/18/5/54">doi: 10.3390/jemr18050054</a></p>
	<p>Authors:
		Antonio Ríder-Vázquez
		Estanislao Gutiérrez-Sánchez
		Clara Martinez-Perez
		María Sánchez-González
		</p>
	<p>Background: Hand–eye coordination is essential for daily functioning and sports performance, but standardized digital protocols for its reliable assessment are limited. This study aimed to evaluate the intra-examiner repeatability and inter-examiner reproducibility of a computerized protocol (COI-SV®) for assessing hand–eye coordination in healthy adults, as well as the influence of age and sex. Methods: Seventy-eight adults completed four sessions of a computerized visual–motor task requiring rapid and accurate responses to randomly presented targets. Accuracy and response times were analyzed using repeated-measures and reliability analyses. Results: Accuracy showed a small session effect and minor examiner differences on the first day, whereas response times were consistent across sessions. Men generally responded faster than women, and response times increased slightly with age. Overall, reliability indices indicated moderate-to-good repeatability and reproducibility for both accuracy and response time measures. Conclusions: The COI-SV® protocol provides a robust, objective, and reproducible measurement of hand–eye coordination, supporting its use in clinical, sports, and research settings.</p>
	]]></content:encoded>

	<dc:title>Test–Retest Reliability of a Computerized Hand–Eye Coordination Task</dc:title>
			<dc:creator>Antonio Ríder-Vázquez</dc:creator>
			<dc:creator>Estanislao Gutiérrez-Sánchez</dc:creator>
			<dc:creator>Clara Martinez-Perez</dc:creator>
			<dc:creator>María Sánchez-González</dc:creator>
		<dc:identifier>doi: 10.3390/jemr18050054</dc:identifier>
	<dc:source>Journal of Eye Movement Research</dc:source>
	<dc:date>2025-10-14</dc:date>

	<prism:publicationName>Journal of Eye Movement Research</prism:publicationName>
	<prism:publicationDate>2025-10-14</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>54</prism:startingPage>
		<prism:doi>10.3390/jemr18050054</prism:doi>
	<prism:url>https://www.mdpi.com/1995-8692/18/5/54</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1995-8692/18/5/53">

	<title>JEMR, Vol. 18, Pages 53: Recognition and Misclassification Patterns of Basic Emotional Facial Expressions: An Eye-Tracking Study in Young Healthy Adults</title>
	<link>https://www.mdpi.com/1995-8692/18/5/53</link>
	<description>Accurate recognition of basic facial emotions is well documented, yet the mechanisms of misclassification and their relation to gaze allocation remain under-reported. The present study utilized a within-subjects eye-tracking design to examine both accurate and inaccurate recognition of five basic emotions (anger, disgust, fear, happiness, and sadness) in healthy young adults. Fifty participants (twenty-four women) completed a forced-choice categorization task with 10 stimuli (female/male poser × emotion). A remote eye tracker (60 Hz) recorded fixations mapped to eyes, nose, and mouth areas of interest (AOIs). The analyses combined accuracy and decision-time statistics with heatmap comparisons of misclassified versus accurate trials within the same image. Overall accuracy was 87.8% (439/500). Misclassification patterns depended on the target emotion, but not on participant gender. Fear male was most often misclassified (typically as disgust), and sadness female was frequently labeled as fear or disgust; disgust was the most incorrectly attributed response. For accurate trials, decision time showed main effects of emotion (p &amp;amp;lt; 0.001) and participant gender (p = 0.033): happiness was categorized fastest and anger slowest, and women responded faster overall, with particularly fast response times for sadness. The AOI results revealed strong main effects and an AOI × emotion interaction (p &amp;amp;lt; 0.001): eyes received the most fixations, but fear drew relatively more mouth sampling and sadness more nose sampling. Crucially, heatmaps showed an upper-face bias (eye AOI) in inaccurate trials, whereas accurate trials retained eye sampling and added nose and mouth AOI coverage, which aligned with diagnostic cues. These findings indicate that the scanpath strategy, in addition to information availability, underpins success and failure in basic-emotion recognition, with implications for theory, targeted training, and affective technologies.</description>
	<pubDate>2025-10-11</pubDate>

	<content:encoded><![CDATA[
	<p><b>JEMR, Vol. 18, Pages 53: Recognition and Misclassification Patterns of Basic Emotional Facial Expressions: An Eye-Tracking Study in Young Healthy Adults</b></p>
	<p>Journal of Eye Movement Research <a href="https://www.mdpi.com/1995-8692/18/5/53">doi: 10.3390/jemr18050053</a></p>
	<p>Authors:
		Neşe Alkan
		</p>
	<p>Accurate recognition of basic facial emotions is well documented, yet the mechanisms of misclassification and their relation to gaze allocation remain under-reported. The present study utilized a within-subjects eye-tracking design to examine both accurate and inaccurate recognition of five basic emotions (anger, disgust, fear, happiness, and sadness) in healthy young adults. Fifty participants (twenty-four women) completed a forced-choice categorization task with 10 stimuli (female/male poser × emotion). A remote eye tracker (60 Hz) recorded fixations mapped to eyes, nose, and mouth areas of interest (AOIs). The analyses combined accuracy and decision-time statistics with heatmap comparisons of misclassified versus accurate trials within the same image. Overall accuracy was 87.8% (439/500). Misclassification patterns depended on the target emotion, but not on participant gender. Fear male was most often misclassified (typically as disgust), and sadness female was frequently labeled as fear or disgust; disgust was the most incorrectly attributed response. For accurate trials, decision time showed main effects of emotion (p &amp;amp;lt; 0.001) and participant gender (p = 0.033): happiness was categorized fastest and anger slowest, and women responded faster overall, with particularly fast response times for sadness. The AOI results revealed strong main effects and an AOI × emotion interaction (p &amp;amp;lt; 0.001): eyes received the most fixations, but fear drew relatively more mouth sampling and sadness more nose sampling. Crucially, heatmaps showed an upper-face bias (eye AOI) in inaccurate trials, whereas accurate trials retained eye sampling and added nose and mouth AOI coverage, which aligned with diagnostic cues. These findings indicate that the scanpath strategy, in addition to information availability, underpins success and failure in basic-emotion recognition, with implications for theory, targeted training, and affective technologies.</p>
	]]></content:encoded>

	<dc:title>Recognition and Misclassification Patterns of Basic Emotional Facial Expressions: An Eye-Tracking Study in Young Healthy Adults</dc:title>
			<dc:creator>Neşe Alkan</dc:creator>
		<dc:identifier>doi: 10.3390/jemr18050053</dc:identifier>
	<dc:source>Journal of Eye Movement Research</dc:source>
	<dc:date>2025-10-11</dc:date>

	<prism:publicationName>Journal of Eye Movement Research</prism:publicationName>
	<prism:publicationDate>2025-10-11</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>53</prism:startingPage>
		<prism:doi>10.3390/jemr18050053</prism:doi>
	<prism:url>https://www.mdpi.com/1995-8692/18/5/53</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1995-8692/18/5/52">

	<title>JEMR, Vol. 18, Pages 52: The Effect of Visual Attention Dispersion on Cognitive Response Time</title>
	<link>https://www.mdpi.com/1995-8692/18/5/52</link>
	<description>In safety-critical systems like nuclear power plants, the rapid and accurate perception of visual interface information is vital. This study investigates the relationship between visual attention dispersion measured via heatmap entropy (as a specific measure of gaze entropy) and response time during information search tasks. Sixteen participants viewed a prototype of an accident response support system and answered questions at three difficulty levels while their eye movements were tracked using Tobii Pro Glasses 2. Results showed a significant positive correlation (r = 0.595, p &amp;amp;lt; 0.01) between heatmap entropy and response time, indicating that more dispersed attention leads to longer task completion times. This pattern held consistently across all difficulty levels. These findings suggest that heatmap entropy is a useful metric for evaluating user attention strategies and can inform interface usability assessments in high-stakes environments.</description>
	<pubDate>2025-10-10</pubDate>

	<content:encoded><![CDATA[
	<p><b>JEMR, Vol. 18, Pages 52: The Effect of Visual Attention Dispersion on Cognitive Response Time</b></p>
	<p>Journal of Eye Movement Research <a href="https://www.mdpi.com/1995-8692/18/5/52">doi: 10.3390/jemr18050052</a></p>
	<p>Authors:
		Yejin Lee
		Kwangtae Jung
		</p>
	<p>In safety-critical systems like nuclear power plants, the rapid and accurate perception of visual interface information is vital. This study investigates the relationship between visual attention dispersion measured via heatmap entropy (as a specific measure of gaze entropy) and response time during information search tasks. Sixteen participants viewed a prototype of an accident response support system and answered questions at three difficulty levels while their eye movements were tracked using Tobii Pro Glasses 2. Results showed a significant positive correlation (r = 0.595, p &amp;amp;lt; 0.01) between heatmap entropy and response time, indicating that more dispersed attention leads to longer task completion times. This pattern held consistently across all difficulty levels. These findings suggest that heatmap entropy is a useful metric for evaluating user attention strategies and can inform interface usability assessments in high-stakes environments.</p>
	]]></content:encoded>

	<dc:title>The Effect of Visual Attention Dispersion on Cognitive Response Time</dc:title>
			<dc:creator>Yejin Lee</dc:creator>
			<dc:creator>Kwangtae Jung</dc:creator>
		<dc:identifier>doi: 10.3390/jemr18050052</dc:identifier>
	<dc:source>Journal of Eye Movement Research</dc:source>
	<dc:date>2025-10-10</dc:date>

	<prism:publicationName>Journal of Eye Movement Research</prism:publicationName>
	<prism:publicationDate>2025-10-10</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>52</prism:startingPage>
		<prism:doi>10.3390/jemr18050052</prism:doi>
	<prism:url>https://www.mdpi.com/1995-8692/18/5/52</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1995-8692/18/5/51">

	<title>JEMR, Vol. 18, Pages 51: Diagnosing Colour Vision Deficiencies Using Eye Movements (Without Dedicated Eye-Tracking Hardware)</title>
	<link>https://www.mdpi.com/1995-8692/18/5/51</link>
	<description>Purpose: To investigate the efficacy of a novel test for diagnosing colour vision deficiencies using reflexive eye movements measured using an unmodified tablet. Methods: This study followed a cross-sectional design, where thirty-three participants aged between 17 and 65 years were recruited. The participant group comprised 23 controls, 8 deuteranopes, and 2 protanopes. An anomaloscope was employed to determine the colour vision status of these participants. The study methodology involved using an Apple iPad Pro&amp;amp;rsquo;s built-in eye-tracking capabilities to record eye movements in response to coloured patterns drifting on the screen. Through an automated analysis of these movements, the researchers estimated individuals&amp;amp;rsquo; red&amp;amp;ndash;green equiluminant point and their equivalent luminance contrast. Results: Estimates of the red&amp;amp;ndash;green equiluminant point and the equivalent luminance contrast were used to classify participants&amp;amp;rsquo; colour vision status with a sensitivity rate of 90.0% and a specificity rate of 91.30%. Conclusions: The novel colour vision test administered using an unmodified tablet was found to be effective in diagnosing colour vision deficiencies and has the potential to be a practical and cost-effective alternative to traditional methods. Translation Relevance: The test&amp;amp;rsquo;s objectivity, its straightforward implementation on a standard tablet, and its minimal requirement for patient cooperation, all contribute to the wider accessibility of colour vision diagnosis. This is particularly advantageous for demographics like children who might be challenging to engage, but for whom early detection is of paramount importance.</description>
	<pubDate>2025-10-02</pubDate>

	<content:encoded><![CDATA[
	<p><b>JEMR, Vol. 18, Pages 51: Diagnosing Colour Vision Deficiencies Using Eye Movements (Without Dedicated Eye-Tracking Hardware)</b></p>
	<p>Journal of Eye Movement Research <a href="https://www.mdpi.com/1995-8692/18/5/51">doi: 10.3390/jemr18050051</a></p>
	<p>Authors:
		Aryaman Taore
		Gabriel Lobo
		Philip R. K. Turnbull
		Steven C. Dakin
		</p>
	<p>Purpose: To investigate the efficacy of a novel test for diagnosing colour vision deficiencies using reflexive eye movements measured using an unmodified tablet. Methods: This study followed a cross-sectional design, where thirty-three participants aged between 17 and 65 years were recruited. The participant group comprised 23 controls, 8 deuteranopes, and 2 protanopes. An anomaloscope was employed to determine the colour vision status of these participants. The study methodology involved using an Apple iPad Pro&amp;amp;rsquo;s built-in eye-tracking capabilities to record eye movements in response to coloured patterns drifting on the screen. Through an automated analysis of these movements, the researchers estimated individuals&amp;amp;rsquo; red&amp;amp;ndash;green equiluminant point and their equivalent luminance contrast. Results: Estimates of the red&amp;amp;ndash;green equiluminant point and the equivalent luminance contrast were used to classify participants&amp;amp;rsquo; colour vision status with a sensitivity rate of 90.0% and a specificity rate of 91.30%. Conclusions: The novel colour vision test administered using an unmodified tablet was found to be effective in diagnosing colour vision deficiencies and has the potential to be a practical and cost-effective alternative to traditional methods. Translation Relevance: The test&amp;amp;rsquo;s objectivity, its straightforward implementation on a standard tablet, and its minimal requirement for patient cooperation, all contribute to the wider accessibility of colour vision diagnosis. This is particularly advantageous for demographics like children who might be challenging to engage, but for whom early detection is of paramount importance.</p>
	]]></content:encoded>

	<dc:title>Diagnosing Colour Vision Deficiencies Using Eye Movements (Without Dedicated Eye-Tracking Hardware)</dc:title>
			<dc:creator>Aryaman Taore</dc:creator>
			<dc:creator>Gabriel Lobo</dc:creator>
			<dc:creator>Philip R. K. Turnbull</dc:creator>
			<dc:creator>Steven C. Dakin</dc:creator>
		<dc:identifier>doi: 10.3390/jemr18050051</dc:identifier>
	<dc:source>Journal of Eye Movement Research</dc:source>
	<dc:date>2025-10-02</dc:date>

	<prism:publicationName>Journal of Eye Movement Research</prism:publicationName>
	<prism:publicationDate>2025-10-02</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>51</prism:startingPage>
		<prism:doi>10.3390/jemr18050051</prism:doi>
	<prism:url>https://www.mdpi.com/1995-8692/18/5/51</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1995-8692/18/5/50">

	<title>JEMR, Vol. 18, Pages 50: Visual Attention to Economic Information in Simulated Ophthalmic Deficits: A Remote Eye-Tracking Study</title>
	<link>https://www.mdpi.com/1995-8692/18/5/50</link>
	<description>This study investigated how simulated ophthalmic visual field deficits affect visual attention and economic information processing. Using webcam-based eye tracking, 227 participants with normal vision recruited through Amazon Mechanical Turk were assigned to control, central vision loss, peripheral vision loss, or scattered vision loss simulation conditions. Participants viewed economic stimuli of varying complexity while eye movements, cognitive load, and comprehension were measured. All deficit conditions showed altered oculomotor behaviors. Central vision loss produced the most severe impairments: 43.6% increased fixation durations, 68% longer scanpaths, and comprehension accuracy of 61.2% versus 87.3% for controls. Visual deficits interacted with information complexity, showing accelerated impairment for complex stimuli. Mediation analysis revealed 47% of comprehension deficits were mediated through altered attention patterns. Cognitive load was significantly elevated, with central vision loss participants reporting 84% higher mental demand than controls. These findings demonstrate that visual field deficits fundamentally alter economic information processing through both direct perceptual limitations and compensatory attention strategies. Results demonstrate the feasibility of webcam-based eye tracking for studying simulated visual deficits and suggest that different types of simulated visual deficits may require distinct information presentation strategies.</description>
	<pubDate>2025-10-02</pubDate>

	<content:encoded><![CDATA[
	<p><b>JEMR, Vol. 18, Pages 50: Visual Attention to Economic Information in Simulated Ophthalmic Deficits: A Remote Eye-Tracking Study</b></p>
	<p>Journal of Eye Movement Research <a href="https://www.mdpi.com/1995-8692/18/5/50">doi: 10.3390/jemr18050050</a></p>
	<p>Authors:
		Cansu Yuksel Elgin
		Ceyhun Elgin
		</p>
	<p>This study investigated how simulated ophthalmic visual field deficits affect visual attention and economic information processing. Using webcam-based eye tracking, 227 participants with normal vision recruited through Amazon Mechanical Turk were assigned to control, central vision loss, peripheral vision loss, or scattered vision loss simulation conditions. Participants viewed economic stimuli of varying complexity while eye movements, cognitive load, and comprehension were measured. All deficit conditions showed altered oculomotor behaviors. Central vision loss produced the most severe impairments: 43.6% increased fixation durations, 68% longer scanpaths, and comprehension accuracy of 61.2% versus 87.3% for controls. Visual deficits interacted with information complexity, showing accelerated impairment for complex stimuli. Mediation analysis revealed 47% of comprehension deficits were mediated through altered attention patterns. Cognitive load was significantly elevated, with central vision loss participants reporting 84% higher mental demand than controls. These findings demonstrate that visual field deficits fundamentally alter economic information processing through both direct perceptual limitations and compensatory attention strategies. Results demonstrate the feasibility of webcam-based eye tracking for studying simulated visual deficits and suggest that different types of simulated visual deficits may require distinct information presentation strategies.</p>
	]]></content:encoded>

	<dc:title>Visual Attention to Economic Information in Simulated Ophthalmic Deficits: A Remote Eye-Tracking Study</dc:title>
			<dc:creator>Cansu Yuksel Elgin</dc:creator>
			<dc:creator>Ceyhun Elgin</dc:creator>
		<dc:identifier>doi: 10.3390/jemr18050050</dc:identifier>
	<dc:source>Journal of Eye Movement Research</dc:source>
	<dc:date>2025-10-02</dc:date>

	<prism:publicationName>Journal of Eye Movement Research</prism:publicationName>
	<prism:publicationDate>2025-10-02</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>50</prism:startingPage>
		<prism:doi>10.3390/jemr18050050</prism:doi>
	<prism:url>https://www.mdpi.com/1995-8692/18/5/50</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1995-8692/18/5/49">

	<title>JEMR, Vol. 18, Pages 49: Guiding the Gaze: How Bionic Reading Influences Eye Movements</title>
	<link>https://www.mdpi.com/1995-8692/18/5/49</link>
	<description>In recent years, Bionic reading has been introduced as a means to combat superficial reading and low comprehension rates. This paper investigates eye movements between participants who read a passage in standard font and an additional Bionic font passage. It was found that Bionic font does not significantly change eye movements when reading. Fixation durations, number of fixations and reading speeds were not significantly different between the two formats. Furthermore, fixations were spread throughout the word and not only on leading characters, even when using Bionic font; hence, participants were not able to &amp;amp;ldquo;auto-complete&amp;amp;rdquo; the words. Additionally, Bionic font did not facilitate easier processing of low-frequency or unfamiliar words. Overall, it would appear that Bionic font, in the short term, does not affect reading. Further investigation is needed to determine whether a long-term intervention with Bionic font is more meaningful than standard interventions.</description>
	<pubDate>2025-10-01</pubDate>

	<content:encoded><![CDATA[
	<p><b>JEMR, Vol. 18, Pages 49: Guiding the Gaze: How Bionic Reading Influences Eye Movements</b></p>
	<p>Journal of Eye Movement Research <a href="https://www.mdpi.com/1995-8692/18/5/49">doi: 10.3390/jemr18050049</a></p>
	<p>Authors:
		T. R. Beelders
		</p>
	<p>In recent years, Bionic reading has been introduced as a means to combat superficial reading and low comprehension rates. This paper investigates eye movements between participants who read a passage in standard font and an additional Bionic font passage. It was found that Bionic font does not significantly change eye movements when reading. Fixation durations, number of fixations and reading speeds were not significantly different between the two formats. Furthermore, fixations were spread throughout the word and not only on leading characters, even when using Bionic font; hence, participants were not able to &amp;amp;ldquo;auto-complete&amp;amp;rdquo; the words. Additionally, Bionic font did not facilitate easier processing of low-frequency or unfamiliar words. Overall, it would appear that Bionic font, in the short term, does not affect reading. Further investigation is needed to determine whether a long-term intervention with Bionic font is more meaningful than standard interventions.</p>
	]]></content:encoded>

	<dc:title>Guiding the Gaze: How Bionic Reading Influences Eye Movements</dc:title>
			<dc:creator>T. R. Beelders</dc:creator>
		<dc:identifier>doi: 10.3390/jemr18050049</dc:identifier>
	<dc:source>Journal of Eye Movement Research</dc:source>
	<dc:date>2025-10-01</dc:date>

	<prism:publicationName>Journal of Eye Movement Research</prism:publicationName>
	<prism:publicationDate>2025-10-01</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>49</prism:startingPage>
		<prism:doi>10.3390/jemr18050049</prism:doi>
	<prism:url>https://www.mdpi.com/1995-8692/18/5/49</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1995-8692/18/5/48">

	<title>JEMR, Vol. 18, Pages 48: Tracking the Impact of Age and Dimensional Shifts on Situation Model Updating During Narrative Text Comprehension</title>
	<link>https://www.mdpi.com/1995-8692/18/5/48</link>
	<description>Studies on the relationship between age and situation model updating during narrative text reading have mainly used response or reading times. This study enhances previous measures (working memory, recognition probes, and comprehension) by incorporating eye-tracking techniques to compare situation model updating between young and older Chilean adults. The study included 82 participants (40 older adults and 42 young adults) who read two narrative texts under three conditions (no shift, spatial shift, and character shift) using a between-subject (age) and within-subject (dimensional change) design. The results show that, while differences in working memory capacity were observed between the groups, these differences did not impact situation model comprehension. Younger adults performed better in recognition tests regardless of updating conditions. Eye-tracking data showed increased fixation times for dimensional shifts and longer reading times in older adults, with no interaction between age and dimensional shifts.</description>
	<pubDate>2025-09-26</pubDate>

	<content:encoded><![CDATA[
	<p><b>JEMR, Vol. 18, Pages 48: Tracking the Impact of Age and Dimensional Shifts on Situation Model Updating During Narrative Text Comprehension</b></p>
	<p>Journal of Eye Movement Research <a href="https://www.mdpi.com/1995-8692/18/5/48">doi: 10.3390/jemr18050048</a></p>
	<p>Authors:
		César Campos-Rojas
		Romualdo Ibáñez-Orellana
		</p>
	<p>Studies on the relationship between age and situation model updating during narrative text reading have mainly used response or reading times. This study enhances previous measures (working memory, recognition probes, and comprehension) by incorporating eye-tracking techniques to compare situation model updating between young and older Chilean adults. The study included 82 participants (40 older adults and 42 young adults) who read two narrative texts under three conditions (no shift, spatial shift, and character shift) using a between-subject (age) and within-subject (dimensional change) design. The results show that, while differences in working memory capacity were observed between the groups, these differences did not impact situation model comprehension. Younger adults performed better in recognition tests regardless of updating conditions. Eye-tracking data showed increased fixation times for dimensional shifts and longer reading times in older adults, with no interaction between age and dimensional shifts.</p>
	]]></content:encoded>

	<dc:title>Tracking the Impact of Age and Dimensional Shifts on Situation Model Updating During Narrative Text Comprehension</dc:title>
			<dc:creator>César Campos-Rojas</dc:creator>
			<dc:creator>Romualdo Ibáñez-Orellana</dc:creator>
		<dc:identifier>doi: 10.3390/jemr18050048</dc:identifier>
	<dc:source>Journal of Eye Movement Research</dc:source>
	<dc:date>2025-09-26</dc:date>

	<prism:publicationName>Journal of Eye Movement Research</prism:publicationName>
	<prism:publicationDate>2025-09-26</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>48</prism:startingPage>
		<prism:doi>10.3390/jemr18050048</prism:doi>
	<prism:url>https://www.mdpi.com/1995-8692/18/5/48</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1995-8692/18/5/47">

	<title>JEMR, Vol. 18, Pages 47: A Comprehensive Framework for Eye Tracking: Methods, Tools, Applications, and Cross-Platform Evaluation</title>
	<link>https://www.mdpi.com/1995-8692/18/5/47</link>
	<description>Eye tracking, a fundamental process in gaze analysis, involves measuring the point of gaze or eye motion. It is crucial in numerous applications, including human&amp;amp;ndash;computer interaction (HCI), education, health care, and virtual reality. This study delves into eye-tracking concepts, terminology, performance parameters, applications, and techniques, focusing on modern and efficient approaches such as video-oculography (VOG)-based systems, deep learning models for gaze estimation, wearable and cost-effective devices, and integration with virtual/augmented reality and assistive technologies. These contemporary methods, prevalent for over two decades, significantly contribute to developing cutting-edge eye-tracking applications. The findings underscore the significance of diverse eye-tracking techniques in advancing eye-tracking applications. They leverage machine learning to glean insights from existing data, enhance decision-making, and minimize the need for manual calibration during tracking. Furthermore, the study explores and recommends strategies to address limitations/challenges inherent in specific eye-tracking methods and applications. Finally, the study outlines future directions for leveraging eye tracking across various developed applications, highlighting its potential to continue evolving and enriching user experiences.</description>
	<pubDate>2025-09-23</pubDate>

	<content:encoded><![CDATA[
	<p><b>JEMR, Vol. 18, Pages 47: A Comprehensive Framework for Eye Tracking: Methods, Tools, Applications, and Cross-Platform Evaluation</b></p>
	<p>Journal of Eye Movement Research <a href="https://www.mdpi.com/1995-8692/18/5/47">doi: 10.3390/jemr18050047</a></p>
	<p>Authors:
		Govind Ram Chhimpa
		Ajay Kumar
		Sunita Garhwal
		Dhiraj Kumar
		Niyaz Ahmad Wani
		Mudasir Ahmad Wani
		Kashish Ara Shakil
		</p>
	<p>Eye tracking, a fundamental process in gaze analysis, involves measuring the point of gaze or eye motion. It is crucial in numerous applications, including human&amp;amp;ndash;computer interaction (HCI), education, health care, and virtual reality. This study delves into eye-tracking concepts, terminology, performance parameters, applications, and techniques, focusing on modern and efficient approaches such as video-oculography (VOG)-based systems, deep learning models for gaze estimation, wearable and cost-effective devices, and integration with virtual/augmented reality and assistive technologies. These contemporary methods, prevalent for over two decades, significantly contribute to developing cutting-edge eye-tracking applications. The findings underscore the significance of diverse eye-tracking techniques in advancing eye-tracking applications. They leverage machine learning to glean insights from existing data, enhance decision-making, and minimize the need for manual calibration during tracking. Furthermore, the study explores and recommends strategies to address limitations/challenges inherent in specific eye-tracking methods and applications. Finally, the study outlines future directions for leveraging eye tracking across various developed applications, highlighting its potential to continue evolving and enriching user experiences.</p>
	]]></content:encoded>

	<dc:title>A Comprehensive Framework for Eye Tracking: Methods, Tools, Applications, and Cross-Platform Evaluation</dc:title>
			<dc:creator>Govind Ram Chhimpa</dc:creator>
			<dc:creator>Ajay Kumar</dc:creator>
			<dc:creator>Sunita Garhwal</dc:creator>
			<dc:creator>Dhiraj Kumar</dc:creator>
			<dc:creator>Niyaz Ahmad Wani</dc:creator>
			<dc:creator>Mudasir Ahmad Wani</dc:creator>
			<dc:creator>Kashish Ara Shakil</dc:creator>
		<dc:identifier>doi: 10.3390/jemr18050047</dc:identifier>
	<dc:source>Journal of Eye Movement Research</dc:source>
	<dc:date>2025-09-23</dc:date>

	<prism:publicationName>Journal of Eye Movement Research</prism:publicationName>
	<prism:publicationDate>2025-09-23</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Review</prism:section>
	<prism:startingPage>47</prism:startingPage>
		<prism:doi>10.3390/jemr18050047</prism:doi>
	<prism:url>https://www.mdpi.com/1995-8692/18/5/47</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1995-8692/18/5/46">

	<title>JEMR, Vol. 18, Pages 46: Microsaccade Activity During Visuospatial Working Memory in Early-Stage Parkinson&amp;rsquo;s Disease</title>
	<link>https://www.mdpi.com/1995-8692/18/5/46</link>
	<description>Fixational saccadic eye movements (microsaccades) have been associated with cognitive processes, especially in tasks requiring spatial attention and memory. Alterations in oculomotor and cognitive control are commonly observed in Parkinson&amp;amp;rsquo;s disease (PD), though it is unclear to what extent microsaccade activity is affected. We acquired eye movement data from sixteen participants with early-stage PD and thirteen older healthy controls to examine the effects of dopamine modulation on microsaccade activity during the delay period of a spatial working memory task. Some microsaccade characteristics, like amplitude and duration, were moderately larger in the PD participants when they were &amp;amp;ldquo;on&amp;amp;rdquo; their dopaminergic medication than healthy controls, or when they were &amp;amp;ldquo;off&amp;amp;rdquo; medication, while PD participants exhibited microsaccades with a linear amplitude&amp;amp;ndash;velocity relationship comparable to controls. Both groups showed similar microsaccade rate patterns across task events, with most participants showing a horizontal bias in microsaccade direction during the delay period regardless of the remembered target location. Overall, our data suggest minimal involvement of microsaccades during visuospatial working memory maintenance under conditions without explicit attentional cues in both subject groups. However, moderate effects of PD-related dopamine deficiency were observed for microsaccade size during working memory maintenance.</description>
	<pubDate>2025-09-22</pubDate>

	<content:encoded><![CDATA[
	<p><b>JEMR, Vol. 18, Pages 46: Microsaccade Activity During Visuospatial Working Memory in Early-Stage Parkinson&amp;rsquo;s Disease</b></p>
	<p>Journal of Eye Movement Research <a href="https://www.mdpi.com/1995-8692/18/5/46">doi: 10.3390/jemr18050046</a></p>
	<p>Authors:
		Katherine Farber
		Linjing Jiang
		Mario Michiels
		Ignacio Obeso
		Hoi-Chung Leung
		</p>
	<p>Fixational saccadic eye movements (microsaccades) have been associated with cognitive processes, especially in tasks requiring spatial attention and memory. Alterations in oculomotor and cognitive control are commonly observed in Parkinson&amp;amp;rsquo;s disease (PD), though it is unclear to what extent microsaccade activity is affected. We acquired eye movement data from sixteen participants with early-stage PD and thirteen older healthy controls to examine the effects of dopamine modulation on microsaccade activity during the delay period of a spatial working memory task. Some microsaccade characteristics, like amplitude and duration, were moderately larger in the PD participants when they were &amp;amp;ldquo;on&amp;amp;rdquo; their dopaminergic medication than healthy controls, or when they were &amp;amp;ldquo;off&amp;amp;rdquo; medication, while PD participants exhibited microsaccades with a linear amplitude&amp;amp;ndash;velocity relationship comparable to controls. Both groups showed similar microsaccade rate patterns across task events, with most participants showing a horizontal bias in microsaccade direction during the delay period regardless of the remembered target location. Overall, our data suggest minimal involvement of microsaccades during visuospatial working memory maintenance under conditions without explicit attentional cues in both subject groups. However, moderate effects of PD-related dopamine deficiency were observed for microsaccade size during working memory maintenance.</p>
	]]></content:encoded>

	<dc:title>Microsaccade Activity During Visuospatial Working Memory in Early-Stage Parkinson&amp;amp;rsquo;s Disease</dc:title>
			<dc:creator>Katherine Farber</dc:creator>
			<dc:creator>Linjing Jiang</dc:creator>
			<dc:creator>Mario Michiels</dc:creator>
			<dc:creator>Ignacio Obeso</dc:creator>
			<dc:creator>Hoi-Chung Leung</dc:creator>
		<dc:identifier>doi: 10.3390/jemr18050046</dc:identifier>
	<dc:source>Journal of Eye Movement Research</dc:source>
	<dc:date>2025-09-22</dc:date>

	<prism:publicationName>Journal of Eye Movement Research</prism:publicationName>
	<prism:publicationDate>2025-09-22</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>46</prism:startingPage>
		<prism:doi>10.3390/jemr18050046</prism:doi>
	<prism:url>https://www.mdpi.com/1995-8692/18/5/46</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1995-8692/18/5/45">

	<title>JEMR, Vol. 18, Pages 45: Active Gaze Guidance and Pupil Dilation Effects Through Subject Engagement in Ophthalmic Imaging</title>
	<link>https://www.mdpi.com/1995-8692/18/5/45</link>
	<description>Modern ophthalmic imaging methods such as optical coherence tomography (OCT) typically require expensive scanner components to direct the light beam across the retina while the patient’s gaze remains fixed. This proof-of-concept experiment investigates whether the patient’s natural eye movements can replace mechanical scanning by guiding the gaze along predefined patterns. An infrared fundus camera setup was used with nine healthy adults (aged 20–57) who completed tasks comparing passive viewing of moving patterns to actively tracing them by drawing using a touchpad interface. The active task involved participant-controlled target movement with real-time color feedback for accurate pattern tracing. Results showed that active tracing significantly increased pupil diameter by an average of 17.8% (range 8.9–43.6%; p &amp;amp;lt; 0.001) and reduced blink frequency compared to passive viewing. More complex patterns led to greater pupil dilation, confirming the link between cognitive load and physiological response. These findings demonstrate that patient driven gaze guidance can stabilize gaze, reduce blinking, and naturally dilate the pupil. These conditions might enhance the quality of scannerless OCT or other imaging techniques benefiting from guided gaze and larger pupils. There could be benefits for children and people with compliance issues, although further research is needed to consider cognitive load.</description>
	<pubDate>2025-09-19</pubDate>

	<content:encoded><![CDATA[
	<p><b>JEMR, Vol. 18, Pages 45: Active Gaze Guidance and Pupil Dilation Effects Through Subject Engagement in Ophthalmic Imaging</b></p>
	<p>Journal of Eye Movement Research <a href="https://www.mdpi.com/1995-8692/18/5/45">doi: 10.3390/jemr18050045</a></p>
	<p>Authors:
		David Harings
		Niklas Bauer
		Damian Mendroch
		Uwe Oberheide
		Holger Lubatschowski
		</p>
	<p>Modern ophthalmic imaging methods such as optical coherence tomography (OCT) typically require expensive scanner components to direct the light beam across the retina while the patient’s gaze remains fixed. This proof-of-concept experiment investigates whether the patient’s natural eye movements can replace mechanical scanning by guiding the gaze along predefined patterns. An infrared fundus camera setup was used with nine healthy adults (aged 20–57) who completed tasks comparing passive viewing of moving patterns to actively tracing them by drawing using a touchpad interface. The active task involved participant-controlled target movement with real-time color feedback for accurate pattern tracing. Results showed that active tracing significantly increased pupil diameter by an average of 17.8% (range 8.9–43.6%; p &amp;amp;lt; 0.001) and reduced blink frequency compared to passive viewing. More complex patterns led to greater pupil dilation, confirming the link between cognitive load and physiological response. These findings demonstrate that patient driven gaze guidance can stabilize gaze, reduce blinking, and naturally dilate the pupil. These conditions might enhance the quality of scannerless OCT or other imaging techniques benefiting from guided gaze and larger pupils. There could be benefits for children and people with compliance issues, although further research is needed to consider cognitive load.</p>
	]]></content:encoded>

	<dc:title>Active Gaze Guidance and Pupil Dilation Effects Through Subject Engagement in Ophthalmic Imaging</dc:title>
			<dc:creator>David Harings</dc:creator>
			<dc:creator>Niklas Bauer</dc:creator>
			<dc:creator>Damian Mendroch</dc:creator>
			<dc:creator>Uwe Oberheide</dc:creator>
			<dc:creator>Holger Lubatschowski</dc:creator>
		<dc:identifier>doi: 10.3390/jemr18050045</dc:identifier>
	<dc:source>Journal of Eye Movement Research</dc:source>
	<dc:date>2025-09-19</dc:date>

	<prism:publicationName>Journal of Eye Movement Research</prism:publicationName>
	<prism:publicationDate>2025-09-19</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>45</prism:startingPage>
		<prism:doi>10.3390/jemr18050045</prism:doi>
	<prism:url>https://www.mdpi.com/1995-8692/18/5/45</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1995-8692/18/5/44">

	<title>JEMR, Vol. 18, Pages 44: Processing Written Language in Video Games: An Eye-Tracking Study on Subtitled Instructions</title>
	<link>https://www.mdpi.com/1995-8692/18/5/44</link>
	<description>Written language is a common component among the multimodal representations that help players construct meanings and guide actions in video games. However, how players process texts in video games remains underexplored. To address this, the current exploratory eye-tracking study examines how players processed subtitled instructions and resultant game performance. Sixty-four participants were recruited to play a videogame set in a foggy desert, where they were guided by subtitled instructions to locate, corral, and contain robot agents (targets). These instructions were manipulated into three modalities: visual-only (with subtitled instructions only), auditory only (with spoken instructions), and visual&amp;amp;ndash;auditory (with both subtitled and spoken instructions). The instructions were addressed to participants (as relevant subtitles) or their AI teammates (as irrelevant subtitles). Subtitle-level results of eye movements showed that participants primarily focused on the relevant subtitles, as evidenced by more fixations and higher dwell time percentages. Moreover, the word-level results indicate that participants showed lower skipping rates, more fixations, and higher dwell time percentages on words loaded with immediate action-related information, especially in the absence of audio. No significant differences were found in player performance across conditions. The findings of this study contribute to a better understanding of subtitle processing in video games and, more broadly, text processing in multimedia contexts. Implications for future research on digital literacy and computer-mediated text processing are discussed.</description>
	<pubDate>2025-09-17</pubDate>

	<content:encoded><![CDATA[
	<p><b>JEMR, Vol. 18, Pages 44: Processing Written Language in Video Games: An Eye-Tracking Study on Subtitled Instructions</b></p>
	<p>Journal of Eye Movement Research <a href="https://www.mdpi.com/1995-8692/18/5/44">doi: 10.3390/jemr18050044</a></p>
	<p>Authors:
		Haiting Lan
		Sixin Liao
		Jan-Louis Kruger
		Michael J. Richardson
		</p>
	<p>Written language is a common component among the multimodal representations that help players construct meanings and guide actions in video games. However, how players process texts in video games remains underexplored. To address this, the current exploratory eye-tracking study examines how players processed subtitled instructions and resultant game performance. Sixty-four participants were recruited to play a videogame set in a foggy desert, where they were guided by subtitled instructions to locate, corral, and contain robot agents (targets). These instructions were manipulated into three modalities: visual-only (with subtitled instructions only), auditory only (with spoken instructions), and visual&amp;amp;ndash;auditory (with both subtitled and spoken instructions). The instructions were addressed to participants (as relevant subtitles) or their AI teammates (as irrelevant subtitles). Subtitle-level results of eye movements showed that participants primarily focused on the relevant subtitles, as evidenced by more fixations and higher dwell time percentages. Moreover, the word-level results indicate that participants showed lower skipping rates, more fixations, and higher dwell time percentages on words loaded with immediate action-related information, especially in the absence of audio. No significant differences were found in player performance across conditions. The findings of this study contribute to a better understanding of subtitle processing in video games and, more broadly, text processing in multimedia contexts. Implications for future research on digital literacy and computer-mediated text processing are discussed.</p>
	]]></content:encoded>

	<dc:title>Processing Written Language in Video Games: An Eye-Tracking Study on Subtitled Instructions</dc:title>
			<dc:creator>Haiting Lan</dc:creator>
			<dc:creator>Sixin Liao</dc:creator>
			<dc:creator>Jan-Louis Kruger</dc:creator>
			<dc:creator>Michael J. Richardson</dc:creator>
		<dc:identifier>doi: 10.3390/jemr18050044</dc:identifier>
	<dc:source>Journal of Eye Movement Research</dc:source>
	<dc:date>2025-09-17</dc:date>

	<prism:publicationName>Journal of Eye Movement Research</prism:publicationName>
	<prism:publicationDate>2025-09-17</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>44</prism:startingPage>
		<prism:doi>10.3390/jemr18050044</prism:doi>
	<prism:url>https://www.mdpi.com/1995-8692/18/5/44</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1995-8692/18/5/43">

	<title>JEMR, Vol. 18, Pages 43: Entropy as a Lens: Exploring Visual Behavior Patterns in Architects</title>
	<link>https://www.mdpi.com/1995-8692/18/5/43</link>
	<description>This study examines how architectural expertise shapes visual perception, extending the “Seeing for Speaking” hypothesis into a non-linguistic domain. Specifically, it investigates whether architectural training influences unconscious visual processing of architectural content. Using eye-tracking, 48 architects and 48 laypeople freely viewed 15 still images of built, mixed, and natural environments. Visual behavior was analyzed using Shannon’s entropy scores based on dwell times within 16 × 16 grids during the first six seconds of viewing. Results revealed distinct visual attention patterns between groups. Architects showed lower entropy, indicating more focused and systematic gaze behavior, and their attention was consistently drawn to built structures. In contrast, laypeople exhibited more variable and less organized scanning patterns, with greater individual differences. Moreover, architects demonstrated higher intra-group similarity in their gaze behavior, suggesting a shared attentional schema shaped by professional training. These findings highlight that domain-specific expertise deeply influences perceptual processing, resulting in systematic and efficient attention allocation. Entropy-based metrics proved effective in capturing these differences, offering a robust tool for quantifying expert vs. non-expert visual strategies in architectural cognition. The visual patterns exhibited by architects are interpreted to reflect a “Grammar of Space”, i.e., a structured way of visually parsing spatial elements.</description>
	<pubDate>2025-09-16</pubDate>

	<content:encoded><![CDATA[
	<p><b>JEMR, Vol. 18, Pages 43: Entropy as a Lens: Exploring Visual Behavior Patterns in Architects</b></p>
	<p>Journal of Eye Movement Research <a href="https://www.mdpi.com/1995-8692/18/5/43">doi: 10.3390/jemr18050043</a></p>
	<p>Authors:
		Renate Delucchi Danhier
		Barbara Mertins
		Holger Mertins
		Gerold Schneider
		</p>
	<p>This study examines how architectural expertise shapes visual perception, extending the “Seeing for Speaking” hypothesis into a non-linguistic domain. Specifically, it investigates whether architectural training influences unconscious visual processing of architectural content. Using eye-tracking, 48 architects and 48 laypeople freely viewed 15 still images of built, mixed, and natural environments. Visual behavior was analyzed using Shannon’s entropy scores based on dwell times within 16 × 16 grids during the first six seconds of viewing. Results revealed distinct visual attention patterns between groups. Architects showed lower entropy, indicating more focused and systematic gaze behavior, and their attention was consistently drawn to built structures. In contrast, laypeople exhibited more variable and less organized scanning patterns, with greater individual differences. Moreover, architects demonstrated higher intra-group similarity in their gaze behavior, suggesting a shared attentional schema shaped by professional training. These findings highlight that domain-specific expertise deeply influences perceptual processing, resulting in systematic and efficient attention allocation. Entropy-based metrics proved effective in capturing these differences, offering a robust tool for quantifying expert vs. non-expert visual strategies in architectural cognition. The visual patterns exhibited by architects are interpreted to reflect a “Grammar of Space”, i.e., a structured way of visually parsing spatial elements.</p>
	]]></content:encoded>

	<dc:title>Entropy as a Lens: Exploring Visual Behavior Patterns in Architects</dc:title>
			<dc:creator>Renate Delucchi Danhier</dc:creator>
			<dc:creator>Barbara Mertins</dc:creator>
			<dc:creator>Holger Mertins</dc:creator>
			<dc:creator>Gerold Schneider</dc:creator>
		<dc:identifier>doi: 10.3390/jemr18050043</dc:identifier>
	<dc:source>Journal of Eye Movement Research</dc:source>
	<dc:date>2025-09-16</dc:date>

	<prism:publicationName>Journal of Eye Movement Research</prism:publicationName>
	<prism:publicationDate>2025-09-16</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>43</prism:startingPage>
		<prism:doi>10.3390/jemr18050043</prism:doi>
	<prism:url>https://www.mdpi.com/1995-8692/18/5/43</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1995-8692/18/5/42">

	<title>JEMR, Vol. 18, Pages 42: How Visual Style Shapes Tourism Advertising Effectiveness: Eye-Tracking Insights into Traditional and Modern Chinese Ink Paintings</title>
	<link>https://www.mdpi.com/1995-8692/18/5/42</link>
	<description>This study investigates how traditional versus modern Chinese ink painting styles in tourism advertisements affect viewers’ visual attention, aesthetic evaluations, and tourism intentions. Using eye-tracking experiments combined with surveys and interviews, the researchers conducted a mixed-design experiment with 80 Chinese college students. Results indicate that traditional ink-style advertisements attracted longer total fixation durations, higher aesthetic evaluations, and stronger cultural resonance in natural landscape contexts, while modern ink-style advertisements captured initial attention more quickly and performed better aesthetically in urban settings. Qualitative analyses further revealed cultural familiarity and aesthetic resonance underpinning preferences for traditional style, whereas modern style mainly attracted attention through novelty and creativity. These findings expand Cultural Schema Theory and the aesthetic processing model within advertising research, suggesting practical strategies for tourism advertising to match visual styles appropriately with destination types and audience characteristics to enhance promotional effectiveness.</description>
	<pubDate>2025-09-12</pubDate>

	<content:encoded><![CDATA[
	<p><b>JEMR, Vol. 18, Pages 42: How Visual Style Shapes Tourism Advertising Effectiveness: Eye-Tracking Insights into Traditional and Modern Chinese Ink Paintings</b></p>
	<p>Journal of Eye Movement Research <a href="https://www.mdpi.com/1995-8692/18/5/42">doi: 10.3390/jemr18050042</a></p>
	<p>Authors:
		Fulong Liu
		Xiheng Shao
		Zhengwei Tao
		Nurul Romainoor
		Mohammad Saat
		</p>
	<p>This study investigates how traditional versus modern Chinese ink painting styles in tourism advertisements affect viewers’ visual attention, aesthetic evaluations, and tourism intentions. Using eye-tracking experiments combined with surveys and interviews, the researchers conducted a mixed-design experiment with 80 Chinese college students. Results indicate that traditional ink-style advertisements attracted longer total fixation durations, higher aesthetic evaluations, and stronger cultural resonance in natural landscape contexts, while modern ink-style advertisements captured initial attention more quickly and performed better aesthetically in urban settings. Qualitative analyses further revealed cultural familiarity and aesthetic resonance underpinning preferences for traditional style, whereas modern style mainly attracted attention through novelty and creativity. These findings expand Cultural Schema Theory and the aesthetic processing model within advertising research, suggesting practical strategies for tourism advertising to match visual styles appropriately with destination types and audience characteristics to enhance promotional effectiveness.</p>
	]]></content:encoded>

	<dc:title>How Visual Style Shapes Tourism Advertising Effectiveness: Eye-Tracking Insights into Traditional and Modern Chinese Ink Paintings</dc:title>
			<dc:creator>Fulong Liu</dc:creator>
			<dc:creator>Xiheng Shao</dc:creator>
			<dc:creator>Zhengwei Tao</dc:creator>
			<dc:creator>Nurul Romainoor</dc:creator>
			<dc:creator>Mohammad Saat</dc:creator>
		<dc:identifier>doi: 10.3390/jemr18050042</dc:identifier>
	<dc:source>Journal of Eye Movement Research</dc:source>
	<dc:date>2025-09-12</dc:date>

	<prism:publicationName>Journal of Eye Movement Research</prism:publicationName>
	<prism:publicationDate>2025-09-12</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>42</prism:startingPage>
		<prism:doi>10.3390/jemr18050042</prism:doi>
	<prism:url>https://www.mdpi.com/1995-8692/18/5/42</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1995-8692/18/5/41">

	<title>JEMR, Vol. 18, Pages 41: Eye Movement Impairment in Women Undergoing Chemotherapy</title>
	<link>https://www.mdpi.com/1995-8692/18/5/41</link>
	<description>The assessment of visual attention is important in visual and cognitive neuroscience, providing objective measures for researchers and clinicians. This study investigated the effects of chemotherapy on eye movements in women with breast cancer. Twelve women with breast cancer and twelve healthy controls aged between 33 and 59 years completed a visual search task, identifying an Arabic number among 79 alphabetic letters. Test duration, fixation duration, total fixation duration, and total visit duration were recorded. Compared to healthy controls, women with breast cancer exhibited significantly longer mean fixation duration [t = 4.54, p &amp;amp;lt; 0.00]; mean total fixation duration [t = 2.41, p &amp;amp;lt; 0.02]; mean total visitation duration [t = 2.05, p &amp;amp;lt; 0.05]; and total test time [t = 2.32, p &amp;amp;lt; 0.03]. Additionally, positive correlations were observed between the number of chemotherapy cycles and the eye tracking parameters. These results suggest the possibility of slower information processing in women experiencing acute effects of chemotherapy. However, further studies are needed to clarify this relationship.</description>
	<pubDate>2025-09-11</pubDate>

	<content:encoded><![CDATA[
	<p><b>JEMR, Vol. 18, Pages 41: Eye Movement Impairment in Women Undergoing Chemotherapy</b></p>
	<p>Journal of Eye Movement Research <a href="https://www.mdpi.com/1995-8692/18/5/41">doi: 10.3390/jemr18050041</a></p>
	<p>Authors:
		Milena Edite Casé de Oliveira
		José Marcos Nascimento de Sousa
		Gerlane Da Silva Vieira Torres
		Ruanna Priscila Silva de Brito
		Nathalia dos Santos Negreiros
		Bianca da Nóbrega Tomaz Trombetta
		Kedma Anne Lima Gomes Alexandrino
		Waleska Fernanda Souto Nóbrega
		Letícia Lorena Soares Silva Polimeni
		Catarina Cavalcanti Braga
		Cristiane Maria Silva de Souza Lima
		Thiago P. Fernandes
		Natanael Antonio dos Santos
		</p>
	<p>The assessment of visual attention is important in visual and cognitive neuroscience, providing objective measures for researchers and clinicians. This study investigated the effects of chemotherapy on eye movements in women with breast cancer. Twelve women with breast cancer and twelve healthy controls aged between 33 and 59 years completed a visual search task, identifying an Arabic number among 79 alphabetic letters. Test duration, fixation duration, total fixation duration, and total visit duration were recorded. Compared to healthy controls, women with breast cancer exhibited significantly longer mean fixation duration [t = 4.54, p &amp;amp;lt; 0.00]; mean total fixation duration [t = 2.41, p &amp;amp;lt; 0.02]; mean total visitation duration [t = 2.05, p &amp;amp;lt; 0.05]; and total test time [t = 2.32, p &amp;amp;lt; 0.03]. Additionally, positive correlations were observed between the number of chemotherapy cycles and the eye tracking parameters. These results suggest the possibility of slower information processing in women experiencing acute effects of chemotherapy. However, further studies are needed to clarify this relationship.</p>
	]]></content:encoded>

	<dc:title>Eye Movement Impairment in Women Undergoing Chemotherapy</dc:title>
			<dc:creator>Milena Edite Casé de Oliveira</dc:creator>
			<dc:creator>José Marcos Nascimento de Sousa</dc:creator>
			<dc:creator>Gerlane Da Silva Vieira Torres</dc:creator>
			<dc:creator>Ruanna Priscila Silva de Brito</dc:creator>
			<dc:creator>Nathalia dos Santos Negreiros</dc:creator>
			<dc:creator>Bianca da Nóbrega Tomaz Trombetta</dc:creator>
			<dc:creator>Kedma Anne Lima Gomes Alexandrino</dc:creator>
			<dc:creator>Waleska Fernanda Souto Nóbrega</dc:creator>
			<dc:creator>Letícia Lorena Soares Silva Polimeni</dc:creator>
			<dc:creator>Catarina Cavalcanti Braga</dc:creator>
			<dc:creator>Cristiane Maria Silva de Souza Lima</dc:creator>
			<dc:creator>Thiago P. Fernandes</dc:creator>
			<dc:creator>Natanael Antonio dos Santos</dc:creator>
		<dc:identifier>doi: 10.3390/jemr18050041</dc:identifier>
	<dc:source>Journal of Eye Movement Research</dc:source>
	<dc:date>2025-09-11</dc:date>

	<prism:publicationName>Journal of Eye Movement Research</prism:publicationName>
	<prism:publicationDate>2025-09-11</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>41</prism:startingPage>
		<prism:doi>10.3390/jemr18050041</prism:doi>
	<prism:url>https://www.mdpi.com/1995-8692/18/5/41</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1995-8692/18/5/40">

	<title>JEMR, Vol. 18, Pages 40: Interpretable Quantification of Scene-Induced Driver Visual Load: Linking Eye-Tracking Behavior to Road Scene Features via SHAP Analysis</title>
	<link>https://www.mdpi.com/1995-8692/18/5/40</link>
	<description>Road traffic accidents remain a major global public health concern, where complex urban driving environments significantly elevate drivers’ visual load and accident risks. Unlike existing research that adopts a macro perspective by considering multiple factors such as the driver, vehicle, and road, this study focuses on the driver’s visual load, a key safety factor, and its direct source—the driver’s visual environment. We have developed an interpretable framework combining computer vision and machine learning to quantify how road scene features influence oculomotor behavior and scene-induced visual load, establishing a complete and interpretable link between scene features, eye movement behavior, and visual load. Using the DR(eye)VE dataset, visual attention demand is established through occlusion experiments and confirmed to correlate with eye-tracking metrics. K-means clustering is applied to classify visual load levels based on discriminative oculomotor features, while semantic segmentation extracts quantifiable road scene features such as the Green Visibility Index, Sky Visibility Index and Street Canyon Enclosure. Among multiple machine learning models (Random Forest, Ada-Boost, XGBoost, and SVM), XGBoost demonstrates optimal performance in visual load detection. SHAP analysis reveals critical thresholds: the probability of high visual load increases when pole density exceeds 0.08%, signage surpasses 0.55%, or buildings account for more than 14%; while blink duration/rate decrease when street enclosure exceeds 38% or road congestion goes beyond 25%, indicating elevated visual load. The proposed framework provides actionable insights for urban design and driver assistance systems, advancing traffic safety through data-driven optimization of road environments.</description>
	<pubDate>2025-09-09</pubDate>

	<content:encoded><![CDATA[
	<p><b>JEMR, Vol. 18, Pages 40: Interpretable Quantification of Scene-Induced Driver Visual Load: Linking Eye-Tracking Behavior to Road Scene Features via SHAP Analysis</b></p>
	<p>Journal of Eye Movement Research <a href="https://www.mdpi.com/1995-8692/18/5/40">doi: 10.3390/jemr18050040</a></p>
	<p>Authors:
		Jie Ni
		Yifu Shao
		Yiwen Guo
		Yongqi Gu
		</p>
	<p>Road traffic accidents remain a major global public health concern, where complex urban driving environments significantly elevate drivers’ visual load and accident risks. Unlike existing research that adopts a macro perspective by considering multiple factors such as the driver, vehicle, and road, this study focuses on the driver’s visual load, a key safety factor, and its direct source—the driver’s visual environment. We have developed an interpretable framework combining computer vision and machine learning to quantify how road scene features influence oculomotor behavior and scene-induced visual load, establishing a complete and interpretable link between scene features, eye movement behavior, and visual load. Using the DR(eye)VE dataset, visual attention demand is established through occlusion experiments and confirmed to correlate with eye-tracking metrics. K-means clustering is applied to classify visual load levels based on discriminative oculomotor features, while semantic segmentation extracts quantifiable road scene features such as the Green Visibility Index, Sky Visibility Index and Street Canyon Enclosure. Among multiple machine learning models (Random Forest, Ada-Boost, XGBoost, and SVM), XGBoost demonstrates optimal performance in visual load detection. SHAP analysis reveals critical thresholds: the probability of high visual load increases when pole density exceeds 0.08%, signage surpasses 0.55%, or buildings account for more than 14%; while blink duration/rate decrease when street enclosure exceeds 38% or road congestion goes beyond 25%, indicating elevated visual load. The proposed framework provides actionable insights for urban design and driver assistance systems, advancing traffic safety through data-driven optimization of road environments.</p>
	]]></content:encoded>

	<dc:title>Interpretable Quantification of Scene-Induced Driver Visual Load: Linking Eye-Tracking Behavior to Road Scene Features via SHAP Analysis</dc:title>
			<dc:creator>Jie Ni</dc:creator>
			<dc:creator>Yifu Shao</dc:creator>
			<dc:creator>Yiwen Guo</dc:creator>
			<dc:creator>Yongqi Gu</dc:creator>
		<dc:identifier>doi: 10.3390/jemr18050040</dc:identifier>
	<dc:source>Journal of Eye Movement Research</dc:source>
	<dc:date>2025-09-09</dc:date>

	<prism:publicationName>Journal of Eye Movement Research</prism:publicationName>
	<prism:publicationDate>2025-09-09</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>40</prism:startingPage>
		<prism:doi>10.3390/jemr18050040</prism:doi>
	<prism:url>https://www.mdpi.com/1995-8692/18/5/40</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1995-8692/18/5/39">

	<title>JEMR, Vol. 18, Pages 39: A Review of Digital Eye Strain: Binocular Vision Anomalies, Ocular Surface Changes, and the Need for Objective Assessment</title>
	<link>https://www.mdpi.com/1995-8692/18/5/39</link>
	<description>(1) Background: This study investigates the impact of digital device usage on the visual system, with a focus on binocular vision. It also highlights the importance of objective assessment in accurately diagnosing and guiding therapeutic approaches for Digital Eye Strain Syndrome (DESS). (2) Methods: A comprehensive narrative review was conducted to synthesize existing evidence. The methodological quality of observational and case&amp;amp;ndash;control studies was assessed using the Newcastle&amp;amp;ndash;Ottawa scale, while randomized controlled trials (RCTs) were evaluated using the Cochrane risk-of-bias (RoB 2) tool. (3) Results: Fifteen articles were included in this review, with a predominant focus on binocular vision anomalies, particularly accommodative and vergence dysfunctions, as well as ocular surface anomalies related to DESS. Clinical assessments relied primarily on symptom-based questionnaires, which represent a significant limitation. The included studies were largely observational, with a lack of longitudinal and RCTs. In contrast, research in dry eye disease has been more comprehensive, with multiple RCTs already conducted. (4) Therefore, it is essential to develop validated objective metrics that support accurate clinical diagnosis and guide evidence-based interventions. Conclusions: It remains unclear whether changes in binocular vision are a cause or consequence of DESS. However, prolonged screen time can exacerbate pre-existing binocular vision anomalies due to continuous strain on convergence and accommodation, leading to symptoms. Future research should prioritize prospective longitudinal studies and well-designed RCTs that integrate objective clinical measures to elucidate causal relationships and improve diagnostic and therapeutic frameworks.</description>
	<pubDate>2025-09-05</pubDate>

	<content:encoded><![CDATA[
	<p><b>JEMR, Vol. 18, Pages 39: A Review of Digital Eye Strain: Binocular Vision Anomalies, Ocular Surface Changes, and the Need for Objective Assessment</b></p>
	<p>Journal of Eye Movement Research <a href="https://www.mdpi.com/1995-8692/18/5/39">doi: 10.3390/jemr18050039</a></p>
	<p>Authors:
		Maria João Barata
		Pedro Aguiar
		Andrzej Grzybowski
		André Moreira-Rosário
		Carla Lança
		</p>
	<p>(1) Background: This study investigates the impact of digital device usage on the visual system, with a focus on binocular vision. It also highlights the importance of objective assessment in accurately diagnosing and guiding therapeutic approaches for Digital Eye Strain Syndrome (DESS). (2) Methods: A comprehensive narrative review was conducted to synthesize existing evidence. The methodological quality of observational and case&amp;amp;ndash;control studies was assessed using the Newcastle&amp;amp;ndash;Ottawa scale, while randomized controlled trials (RCTs) were evaluated using the Cochrane risk-of-bias (RoB 2) tool. (3) Results: Fifteen articles were included in this review, with a predominant focus on binocular vision anomalies, particularly accommodative and vergence dysfunctions, as well as ocular surface anomalies related to DESS. Clinical assessments relied primarily on symptom-based questionnaires, which represent a significant limitation. The included studies were largely observational, with a lack of longitudinal and RCTs. In contrast, research in dry eye disease has been more comprehensive, with multiple RCTs already conducted. (4) Therefore, it is essential to develop validated objective metrics that support accurate clinical diagnosis and guide evidence-based interventions. Conclusions: It remains unclear whether changes in binocular vision are a cause or consequence of DESS. However, prolonged screen time can exacerbate pre-existing binocular vision anomalies due to continuous strain on convergence and accommodation, leading to symptoms. Future research should prioritize prospective longitudinal studies and well-designed RCTs that integrate objective clinical measures to elucidate causal relationships and improve diagnostic and therapeutic frameworks.</p>
	]]></content:encoded>

	<dc:title>A Review of Digital Eye Strain: Binocular Vision Anomalies, Ocular Surface Changes, and the Need for Objective Assessment</dc:title>
			<dc:creator>Maria João Barata</dc:creator>
			<dc:creator>Pedro Aguiar</dc:creator>
			<dc:creator>Andrzej Grzybowski</dc:creator>
			<dc:creator>André Moreira-Rosário</dc:creator>
			<dc:creator>Carla Lança</dc:creator>
		<dc:identifier>doi: 10.3390/jemr18050039</dc:identifier>
	<dc:source>Journal of Eye Movement Research</dc:source>
	<dc:date>2025-09-05</dc:date>

	<prism:publicationName>Journal of Eye Movement Research</prism:publicationName>
	<prism:publicationDate>2025-09-05</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Review</prism:section>
	<prism:startingPage>39</prism:startingPage>
		<prism:doi>10.3390/jemr18050039</prism:doi>
	<prism:url>https://www.mdpi.com/1995-8692/18/5/39</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1995-8692/18/5/38">

	<title>JEMR, Vol. 18, Pages 38: Reading Assessment and Eye Movement Analysis in Bilateral Central Scotoma Due to Age-Related Macular Degeneration</title>
	<link>https://www.mdpi.com/1995-8692/18/5/38</link>
	<description>This study investigates reading performances and eye movements in individuals with eccentric fixation due to age-related macular degeneration (AMD). Overall, 17 individuals with bilateral AMD (7 males; mean age 77.47 ± 5.96 years) and 17 controls (10 males; mean age 72.18 ± 5.98 years) were assessed for reading visual acuity (VA), reading speed (Minnesota low vision reading chart in Slovene, MNREAD-SI), and near contrast sensitivity (Pelli-Robson). Microperimetry (NIDEK MP-3) was used to evaluate preferential retinal locus (PRL) location and fixation stability. Eye movements were recorded with Tobii Pro-glasses 2 and analyzed for reading duration, saccade amplitude, peak velocity, number of saccades, saccade duration, and fixation duration. Individuals with AMD exhibited significantly reduced reading indices (worse reading VA (p &amp;amp;lt; 0.001), slower reading (p &amp;amp;lt; 0.001), and lower near contrast sensitivity (p &amp;amp;lt; 0.001)). Eye movement analysis revealed prolonged reading duration, longer fixation duration, and an increased number of saccades in individuals with AMD per paragraph. The number of saccades per paragraph was significantly correlated with all measured reading indices. These findings provide insights into reading adaptations in AMD. Simultaneously, the proposed approach in analyzing eye movements puts forward eye trackers as a prospective diagnostic tool in ophthalmology.</description>
	<pubDate>2025-08-30</pubDate>

	<content:encoded><![CDATA[
	<p><b>JEMR, Vol. 18, Pages 38: Reading Assessment and Eye Movement Analysis in Bilateral Central Scotoma Due to Age-Related Macular Degeneration</b></p>
	<p>Journal of Eye Movement Research <a href="https://www.mdpi.com/1995-8692/18/5/38">doi: 10.3390/jemr18050038</a></p>
	<p>Authors:
		Polona Benda
		Grega Jakus
		Jaka Sodnik
		Nadica Miljković
		Ilija Tanasković
		Smilja Stokanović
		Andrej Meglič
		Nataša Valentinčič
		Polona Mekjavić
		</p>
	<p>This study investigates reading performances and eye movements in individuals with eccentric fixation due to age-related macular degeneration (AMD). Overall, 17 individuals with bilateral AMD (7 males; mean age 77.47 ± 5.96 years) and 17 controls (10 males; mean age 72.18 ± 5.98 years) were assessed for reading visual acuity (VA), reading speed (Minnesota low vision reading chart in Slovene, MNREAD-SI), and near contrast sensitivity (Pelli-Robson). Microperimetry (NIDEK MP-3) was used to evaluate preferential retinal locus (PRL) location and fixation stability. Eye movements were recorded with Tobii Pro-glasses 2 and analyzed for reading duration, saccade amplitude, peak velocity, number of saccades, saccade duration, and fixation duration. Individuals with AMD exhibited significantly reduced reading indices (worse reading VA (p &amp;amp;lt; 0.001), slower reading (p &amp;amp;lt; 0.001), and lower near contrast sensitivity (p &amp;amp;lt; 0.001)). Eye movement analysis revealed prolonged reading duration, longer fixation duration, and an increased number of saccades in individuals with AMD per paragraph. The number of saccades per paragraph was significantly correlated with all measured reading indices. These findings provide insights into reading adaptations in AMD. Simultaneously, the proposed approach in analyzing eye movements puts forward eye trackers as a prospective diagnostic tool in ophthalmology.</p>
	]]></content:encoded>

	<dc:title>Reading Assessment and Eye Movement Analysis in Bilateral Central Scotoma Due to Age-Related Macular Degeneration</dc:title>
			<dc:creator>Polona Benda</dc:creator>
			<dc:creator>Grega Jakus</dc:creator>
			<dc:creator>Jaka Sodnik</dc:creator>
			<dc:creator>Nadica Miljković</dc:creator>
			<dc:creator>Ilija Tanasković</dc:creator>
			<dc:creator>Smilja Stokanović</dc:creator>
			<dc:creator>Andrej Meglič</dc:creator>
			<dc:creator>Nataša Valentinčič</dc:creator>
			<dc:creator>Polona Mekjavić</dc:creator>
		<dc:identifier>doi: 10.3390/jemr18050038</dc:identifier>
	<dc:source>Journal of Eye Movement Research</dc:source>
	<dc:date>2025-08-30</dc:date>

	<prism:publicationName>Journal of Eye Movement Research</prism:publicationName>
	<prism:publicationDate>2025-08-30</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>38</prism:startingPage>
		<prism:doi>10.3390/jemr18050038</prism:doi>
	<prism:url>https://www.mdpi.com/1995-8692/18/5/38</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1995-8692/18/5/37">

	<title>JEMR, Vol. 18, Pages 37: Spatial Guidance Overrides Dynamic Saliency in VR: An Eye-Tracking Study on Gestalt Grouping Mechanisms and Visual Attention Patterns</title>
	<link>https://www.mdpi.com/1995-8692/18/5/37</link>
	<description>(1) Background: Virtual Reality (VR) films challenge traditional visual cognition by offering novel perceptual experiences. This study investigates the applicability of Gestalt grouping principles in dynamic VR scenes, the influence of VR environments on grouping efficiency, and the relationship between viewer experience and grouping effects. (2) Methods: Eye-tracking experiments were conducted with 42 participants using the HTC Vive Pro Eye and Tobii Pro Lab. Participants watched a non-narrative VR film with fixed camera positions to eliminate narrative and auditory confounds. Eye-tracking metrics were analyzed using SPSS version 29.0.1, and data were visualized through heat maps and gaze trajectory plots. (3) Results: Viewers tended to focus on spatial nodes and continuous structures. Initial fixations were anchored near the body but shifted rapidly thereafter. Heat maps revealed a consistent concentration of fixations on the dock area. (4) Conclusions: VR reshapes visual organization, where proximity, continuity, and closure outweigh traditional saliency. Dynamic elements draw attention only when linked to user goals. Designers should prioritize spatial logic, using functional nodes as cognitive anchors and continuous paths as embodied guides. Future work should test these mechanisms in narrative VR and explore neural correlates via fNIRS or EEG.</description>
	<pubDate>2025-08-25</pubDate>

	<content:encoded><![CDATA[
	<p><b>JEMR, Vol. 18, Pages 37: Spatial Guidance Overrides Dynamic Saliency in VR: An Eye-Tracking Study on Gestalt Grouping Mechanisms and Visual Attention Patterns</b></p>
	<p>Journal of Eye Movement Research <a href="https://www.mdpi.com/1995-8692/18/5/37">doi: 10.3390/jemr18050037</a></p>
	<p>Authors:
		Qiaoling Zou
		Wanyu Zheng
		Xinyan Jiang
		Dongning Li
		</p>
	<p>(1) Background: Virtual Reality (VR) films challenge traditional visual cognition by offering novel perceptual experiences. This study investigates the applicability of Gestalt grouping principles in dynamic VR scenes, the influence of VR environments on grouping efficiency, and the relationship between viewer experience and grouping effects. (2) Methods: Eye-tracking experiments were conducted with 42 participants using the HTC Vive Pro Eye and Tobii Pro Lab. Participants watched a non-narrative VR film with fixed camera positions to eliminate narrative and auditory confounds. Eye-tracking metrics were analyzed using SPSS version 29.0.1, and data were visualized through heat maps and gaze trajectory plots. (3) Results: Viewers tended to focus on spatial nodes and continuous structures. Initial fixations were anchored near the body but shifted rapidly thereafter. Heat maps revealed a consistent concentration of fixations on the dock area. (4) Conclusions: VR reshapes visual organization, where proximity, continuity, and closure outweigh traditional saliency. Dynamic elements draw attention only when linked to user goals. Designers should prioritize spatial logic, using functional nodes as cognitive anchors and continuous paths as embodied guides. Future work should test these mechanisms in narrative VR and explore neural correlates via fNIRS or EEG.</p>
	]]></content:encoded>

	<dc:title>Spatial Guidance Overrides Dynamic Saliency in VR: An Eye-Tracking Study on Gestalt Grouping Mechanisms and Visual Attention Patterns</dc:title>
			<dc:creator>Qiaoling Zou</dc:creator>
			<dc:creator>Wanyu Zheng</dc:creator>
			<dc:creator>Xinyan Jiang</dc:creator>
			<dc:creator>Dongning Li</dc:creator>
		<dc:identifier>doi: 10.3390/jemr18050037</dc:identifier>
	<dc:source>Journal of Eye Movement Research</dc:source>
	<dc:date>2025-08-25</dc:date>

	<prism:publicationName>Journal of Eye Movement Research</prism:publicationName>
	<prism:publicationDate>2025-08-25</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>5</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>37</prism:startingPage>
		<prism:doi>10.3390/jemr18050037</prism:doi>
	<prism:url>https://www.mdpi.com/1995-8692/18/5/37</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1995-8692/18/4/36">

	<title>JEMR, Vol. 18, Pages 36: Multimodal Assessment of Therapeutic Alliance: A Study Using Wearable Technology</title>
	<link>https://www.mdpi.com/1995-8692/18/4/36</link>
	<description>This empirical pilot study explored the use of wearable eye-tracking technology to gain objective insights into interpersonal interactions, particularly in healthcare provider training. Traditional methods of understanding these interactions rely on subjective observations, but wearable tech offers a more precise, multimodal approach. This multidisciplinary study integrated counseling perspectives on therapeutic alliance with an empirically motivated wearable framework informed by prior research in clinical psychology. The aims of the study were to describe the complex data that can be achieved with wearable technology and to test our primary hypothesis that the therapeutic alliance in clinical training interactions is associated with certain behaviors consistent with stronger interpersonal engagement. One key finding was that a single multimodal feature predicted discrepancies in client versus therapist working alliance ratings (b = &amp;amp;minus;4.29, 95% CI [&amp;amp;minus;8.12, &amp;amp;minus;0.38]), suggesting clients may have perceived highly structured interactions as less personal than therapists did. Multimodal features were more strongly associated with therapist rated working alliance, whereas linguistic analysis better captured client rated working alliance. The preliminary findings support the utility of multimodal approaches to capture clinical interactions. This technology provides valuable context for developing actionable insights without burdening instructors or learners. Findings from this study will motivate data-driven methods for providing actionable feedback to clinical trainees.</description>
	<pubDate>2025-08-12</pubDate>

	<content:encoded><![CDATA[
	<p><b>JEMR, Vol. 18, Pages 36: Multimodal Assessment of Therapeutic Alliance: A Study Using Wearable Technology</b></p>
	<p>Journal of Eye Movement Research <a href="https://www.mdpi.com/1995-8692/18/4/36">doi: 10.3390/jemr18040036</a></p>
	<p>Authors:
		Mikael Rubin
		Robert Hickson
		Caitlyn Suen
		Shreya Vaishnav
		</p>
	<p>This empirical pilot study explored the use of wearable eye-tracking technology to gain objective insights into interpersonal interactions, particularly in healthcare provider training. Traditional methods of understanding these interactions rely on subjective observations, but wearable tech offers a more precise, multimodal approach. This multidisciplinary study integrated counseling perspectives on therapeutic alliance with an empirically motivated wearable framework informed by prior research in clinical psychology. The aims of the study were to describe the complex data that can be achieved with wearable technology and to test our primary hypothesis that the therapeutic alliance in clinical training interactions is associated with certain behaviors consistent with stronger interpersonal engagement. One key finding was that a single multimodal feature predicted discrepancies in client versus therapist working alliance ratings (b = &amp;amp;minus;4.29, 95% CI [&amp;amp;minus;8.12, &amp;amp;minus;0.38]), suggesting clients may have perceived highly structured interactions as less personal than therapists did. Multimodal features were more strongly associated with therapist rated working alliance, whereas linguistic analysis better captured client rated working alliance. The preliminary findings support the utility of multimodal approaches to capture clinical interactions. This technology provides valuable context for developing actionable insights without burdening instructors or learners. Findings from this study will motivate data-driven methods for providing actionable feedback to clinical trainees.</p>
	]]></content:encoded>

	<dc:title>Multimodal Assessment of Therapeutic Alliance: A Study Using Wearable Technology</dc:title>
			<dc:creator>Mikael Rubin</dc:creator>
			<dc:creator>Robert Hickson</dc:creator>
			<dc:creator>Caitlyn Suen</dc:creator>
			<dc:creator>Shreya Vaishnav</dc:creator>
		<dc:identifier>doi: 10.3390/jemr18040036</dc:identifier>
	<dc:source>Journal of Eye Movement Research</dc:source>
	<dc:date>2025-08-12</dc:date>

	<prism:publicationName>Journal of Eye Movement Research</prism:publicationName>
	<prism:publicationDate>2025-08-12</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>36</prism:startingPage>
		<prism:doi>10.3390/jemr18040036</prism:doi>
	<prism:url>https://www.mdpi.com/1995-8692/18/4/36</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1995-8692/18/4/35">

	<title>JEMR, Vol. 18, Pages 35: Predicting Cartographic Symbol Location with Eye-Tracking Data and Machine Learning Approach</title>
	<link>https://www.mdpi.com/1995-8692/18/4/35</link>
	<description>Visual search is a core component of map reading, influenced by both cartographic design and human perceptual processes. This study investigates whether the location of a target cartographic symbol—central or peripheral—can be predicted using eye-tracking data and machine learning techniques. Two datasets were analyzed, each derived from separate studies involving visual search tasks with varying map characteristics. A comprehensive set of eye movement features, including fixation duration, saccade amplitude, and gaze dispersion, were extracted and standardized. Feature selection and polynomial interaction terms were applied to enhance model performance. Twelve supervised classification algorithms were tested, including Random Forest, Gradient Boosting, and Support Vector Machines. The models were evaluated using accuracy, precision, recall, F1-score, and ROC-AUC. Results show that models trained on the first dataset achieved higher accuracy and class separation, with AdaBoost and Gradient Boosting performing best (accuracy = 0.822; ROC-AUC &amp;amp;gt; 0.86). In contrast, the second dataset presented greater classification challenges, despite high recall in some models. Feature importance analysis revealed that fixation standard deviation as a proxy for gaze dispersion, particularly along the vertical axis, was the most predictive metric. These findings suggest that gaze behavior can reliably indicate the spatial focus of visual search, providing valuable insight for the development of adaptive, gaze-aware cartographic interfaces.</description>
	<pubDate>2025-08-07</pubDate>

	<content:encoded><![CDATA[
	<p><b>JEMR, Vol. 18, Pages 35: Predicting Cartographic Symbol Location with Eye-Tracking Data and Machine Learning Approach</b></p>
	<p>Journal of Eye Movement Research <a href="https://www.mdpi.com/1995-8692/18/4/35">doi: 10.3390/jemr18040035</a></p>
	<p>Authors:
		Paweł Cybulski
		</p>
	<p>Visual search is a core component of map reading, influenced by both cartographic design and human perceptual processes. This study investigates whether the location of a target cartographic symbol—central or peripheral—can be predicted using eye-tracking data and machine learning techniques. Two datasets were analyzed, each derived from separate studies involving visual search tasks with varying map characteristics. A comprehensive set of eye movement features, including fixation duration, saccade amplitude, and gaze dispersion, were extracted and standardized. Feature selection and polynomial interaction terms were applied to enhance model performance. Twelve supervised classification algorithms were tested, including Random Forest, Gradient Boosting, and Support Vector Machines. The models were evaluated using accuracy, precision, recall, F1-score, and ROC-AUC. Results show that models trained on the first dataset achieved higher accuracy and class separation, with AdaBoost and Gradient Boosting performing best (accuracy = 0.822; ROC-AUC &amp;amp;gt; 0.86). In contrast, the second dataset presented greater classification challenges, despite high recall in some models. Feature importance analysis revealed that fixation standard deviation as a proxy for gaze dispersion, particularly along the vertical axis, was the most predictive metric. These findings suggest that gaze behavior can reliably indicate the spatial focus of visual search, providing valuable insight for the development of adaptive, gaze-aware cartographic interfaces.</p>
	]]></content:encoded>

	<dc:title>Predicting Cartographic Symbol Location with Eye-Tracking Data and Machine Learning Approach</dc:title>
			<dc:creator>Paweł Cybulski</dc:creator>
		<dc:identifier>doi: 10.3390/jemr18040035</dc:identifier>
	<dc:source>Journal of Eye Movement Research</dc:source>
	<dc:date>2025-08-07</dc:date>

	<prism:publicationName>Journal of Eye Movement Research</prism:publicationName>
	<prism:publicationDate>2025-08-07</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>35</prism:startingPage>
		<prism:doi>10.3390/jemr18040035</prism:doi>
	<prism:url>https://www.mdpi.com/1995-8692/18/4/35</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1995-8692/18/4/34">

	<title>JEMR, Vol. 18, Pages 34: Digital Eye Strain Monitoring for One-Hour Smartphone Engagement Through Eye Activity Measurement System</title>
	<link>https://www.mdpi.com/1995-8692/18/4/34</link>
	<description>Smartphones have revolutionized our daily lives, becoming portable pocket computers with easy internet access. India, the second-highest smartphone and internet user, experienced a significant rise in smartphone usage between 2013 and 2024. Prolonged smartphone use, exceeding 20 min at a time, can lead to physical and mental health issues, including psychophysiological disorders. Digital devices and their extended exposure to blue light cause digital eyestrain, sleep disorders and visual-related problems. This research examines the impact of 1 h smartphone usage on visual fatigue among young Indian adults. A portable, low-cost system has been developed to measure visual activity to address this. The developed visual activity measurement system measures blink rate, inter-blink interval, and pupil diameter. Measured eye activity was recorded during 1 h smartphone usage of e-book reading, video watching, and social-media reels (short videos). Social media reels show increased screen variations, affecting pupil dilation and reducing blink rate due to continuous screen brightness and intensity changes. This reduction in blink rate and increase in inter-blink interval or pupil dilation could lead to visual fatigue.</description>
	<pubDate>2025-08-05</pubDate>

	<content:encoded><![CDATA[
	<p><b>JEMR, Vol. 18, Pages 34: Digital Eye Strain Monitoring for One-Hour Smartphone Engagement Through Eye Activity Measurement System</b></p>
	<p>Journal of Eye Movement Research <a href="https://www.mdpi.com/1995-8692/18/4/34">doi: 10.3390/jemr18040034</a></p>
	<p>Authors:
		Bhanu Dandumahanti
		Prithvi Chittoor
		Murali Subramaniyam
		</p>
	<p>Smartphones have revolutionized our daily lives, becoming portable pocket computers with easy internet access. India, the second-highest smartphone and internet user, experienced a significant rise in smartphone usage between 2013 and 2024. Prolonged smartphone use, exceeding 20 min at a time, can lead to physical and mental health issues, including psychophysiological disorders. Digital devices and their extended exposure to blue light cause digital eyestrain, sleep disorders and visual-related problems. This research examines the impact of 1 h smartphone usage on visual fatigue among young Indian adults. A portable, low-cost system has been developed to measure visual activity to address this. The developed visual activity measurement system measures blink rate, inter-blink interval, and pupil diameter. Measured eye activity was recorded during 1 h smartphone usage of e-book reading, video watching, and social-media reels (short videos). Social media reels show increased screen variations, affecting pupil dilation and reducing blink rate due to continuous screen brightness and intensity changes. This reduction in blink rate and increase in inter-blink interval or pupil dilation could lead to visual fatigue.</p>
	]]></content:encoded>

	<dc:title>Digital Eye Strain Monitoring for One-Hour Smartphone Engagement Through Eye Activity Measurement System</dc:title>
			<dc:creator>Bhanu Dandumahanti</dc:creator>
			<dc:creator>Prithvi Chittoor</dc:creator>
			<dc:creator>Murali Subramaniyam</dc:creator>
		<dc:identifier>doi: 10.3390/jemr18040034</dc:identifier>
	<dc:source>Journal of Eye Movement Research</dc:source>
	<dc:date>2025-08-05</dc:date>

	<prism:publicationName>Journal of Eye Movement Research</prism:publicationName>
	<prism:publicationDate>2025-08-05</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>34</prism:startingPage>
		<prism:doi>10.3390/jemr18040034</prism:doi>
	<prism:url>https://www.mdpi.com/1995-8692/18/4/34</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/1995-8692/18/4/33">

	<title>JEMR, Vol. 18, Pages 33: Visual Word Segmentation Cues in Tibetan Reading: Comparing Dictionary-Based and Psychological Word Segmentation</title>
	<link>https://www.mdpi.com/1995-8692/18/4/33</link>
	<description>This study utilized eye-tracking technology to explore the role of visual word segmentation cues in Tibetan reading, with a particular focus on the effects of dictionary-based and psychological word segmentation on reading and lexical recognition. The experiment employed a 2 &amp;amp;times; 3 design, comparing six conditions: normal sentences, dictionary word segmentation (spaces), psychological word segmentation (spaces), normal sentences (green), dictionary word segmentation (color alternation), and psychological word segmentation (color alternation). The results revealed that word segmentation with spaces (whether dictionary-based or psychological) significantly improved reading efficiency and lexical recognition, whereas color alternation showed no substantial facilitative effect. Psychological and dictionary word segmentation performed similarly across most metrics, though psychological segmentation slightly outperformed in specific indicators (e.g., sentence reading time and number of fixations), and dictionary word segmentation slightly outperformed in other indicators (e.g., average saccade amplitude and number of regressions). The study further suggests that Tibetan reading may involve cognitive processes at different levels, and the basic units of different levels of cognitive processes may not be consistent. These findings hold significant implications for understanding the cognitive processes involved in Tibetan reading and for optimizing the presentation of Tibetan text.</description>
	<pubDate>2025-08-04</pubDate>

	<content:encoded><![CDATA[
	<p><b>JEMR, Vol. 18, Pages 33: Visual Word Segmentation Cues in Tibetan Reading: Comparing Dictionary-Based and Psychological Word Segmentation</b></p>
	<p>Journal of Eye Movement Research <a href="https://www.mdpi.com/1995-8692/18/4/33">doi: 10.3390/jemr18040033</a></p>
	<p>Authors:
		Dingyi Niu
		Zijian Xie
		Jiaqi Liu
		Chen Wang
		Ze Zhang
		</p>
	<p>This study utilized eye-tracking technology to explore the role of visual word segmentation cues in Tibetan reading, with a particular focus on the effects of dictionary-based and psychological word segmentation on reading and lexical recognition. The experiment employed a 2 &amp;amp;times; 3 design, comparing six conditions: normal sentences, dictionary word segmentation (spaces), psychological word segmentation (spaces), normal sentences (green), dictionary word segmentation (color alternation), and psychological word segmentation (color alternation). The results revealed that word segmentation with spaces (whether dictionary-based or psychological) significantly improved reading efficiency and lexical recognition, whereas color alternation showed no substantial facilitative effect. Psychological and dictionary word segmentation performed similarly across most metrics, though psychological segmentation slightly outperformed in specific indicators (e.g., sentence reading time and number of fixations), and dictionary word segmentation slightly outperformed in other indicators (e.g., average saccade amplitude and number of regressions). The study further suggests that Tibetan reading may involve cognitive processes at different levels, and the basic units of different levels of cognitive processes may not be consistent. These findings hold significant implications for understanding the cognitive processes involved in Tibetan reading and for optimizing the presentation of Tibetan text.</p>
	]]></content:encoded>

	<dc:title>Visual Word Segmentation Cues in Tibetan Reading: Comparing Dictionary-Based and Psychological Word Segmentation</dc:title>
			<dc:creator>Dingyi Niu</dc:creator>
			<dc:creator>Zijian Xie</dc:creator>
			<dc:creator>Jiaqi Liu</dc:creator>
			<dc:creator>Chen Wang</dc:creator>
			<dc:creator>Ze Zhang</dc:creator>
		<dc:identifier>doi: 10.3390/jemr18040033</dc:identifier>
	<dc:source>Journal of Eye Movement Research</dc:source>
	<dc:date>2025-08-04</dc:date>

	<prism:publicationName>Journal of Eye Movement Research</prism:publicationName>
	<prism:publicationDate>2025-08-04</prism:publicationDate>
	<prism:volume>18</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>33</prism:startingPage>
		<prism:doi>10.3390/jemr18040033</prism:doi>
	<prism:url>https://www.mdpi.com/1995-8692/18/4/33</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
    
<cc:License rdf:about="https://creativecommons.org/licenses/by/4.0/">
	<cc:permits rdf:resource="https://creativecommons.org/ns#Reproduction" />
	<cc:permits rdf:resource="https://creativecommons.org/ns#Distribution" />
	<cc:permits rdf:resource="https://creativecommons.org/ns#DerivativeWorks" />
</cc:License>

</rdf:RDF>
